def Browse_ItemAol(html, metamethod='', content='tvshows', view='515'): if (len(html) == 0): return html = GetDataBeetwenMarkers(html, 'Tytu', '</table>', False)[1] data = re.findall('<a href="/(.+?)">(.+?)</a>', html) ItemCount = len(data) for item in data: strona = mainSite4 + item[0] + '?page=0' name = item[1].encode("utf-8") ### scraper meta = metaget.get_meta('tvshow', name) fanart = str(meta['backdrop_url']).replace('u', '') img = str(meta['cover_url']).replace('u', '') plot = meta['plot'] labs = {} try: labs['plot'] = plot except: labs['plot'] = '' ### pars = { 'mode': 'EpisodesAnime', 'site': site, 'section': section, 'title': name, 'url': strona, 'img': img, 'fanart': fanart } contextLabs = { 'title': name, 'url': strona, 'img': img, 'fanart': fanart, 'todoparams': _addon.build_plugin_url(pars), 'site': site, 'section': section, 'plot': labs['plot'] } if section == 'movie': contextMenuItems = ContextMenu_Movies(contextLabs) elif section == 'animeonline': contextMenuItems = ContextMenu_Series(contextLabs) elif section == 'animedrama': contextMenuItems = ContextMenu_Series(contextLabs) else: contextMenuItems = [] labs['title'] = name _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount) set_view(content, view_mode=addst('tvshows-view'))
def Browse_Itemdragon(html, url, metamethod='', content='movies', view='515'): if (len(html) == 0): return html = GetDataBeetwenMarkers(html, '<ul class="lista-odcinkow">', '<div class="kontener">')[1] data = re.findall('<li>(.+?)</li>\n<li><a href="(.+?)">(.+?)</a>', html) ItemCount = len(data) for item in data: strona = mainSite + item[1] name = item[0] + " - " + item[2] name = name.encode("utf-8") img = '' plot = '' fanart = fanartAol labs = {} try: labs['plot'] = plot except: labs['plot'] = '' ### pars = { 'mode': 'Playdragon', 'site': site, 'section': section, 'title': name, 'url': strona, 'img': img, 'fanart': fanart } contextLabs = { 'title': name, 'url': strona, 'img': img, 'fanart': fanart, 'todoparams': _addon.build_plugin_url(pars), 'site': site, 'section': section, 'plot': labs['plot'] } if section == 'dragonball': contextMenuItems = ContextMenu_Series(contextLabs) else: contextMenuItems = [] labs['title'] = name _addon.add_directory(pars, labs, is_folder=False, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount) set_view(content, view_mode=addst('tvshows-view'))
def SectionMenu(): cNumber ='8'; cNumber2='2'; cNumber3='0'; contextMenuItems=[]; contextMenuItems.append(('Add URL','XBMC.RunPlugin(%s)' % _addon.build_plugin_url({'site':site,'mode':'AddUrlToList'}) )) _addon.add_directory({'mode':'About','site':site},{'title':cFL_('About',colors['9'])},is_folder=False,contextmenu_items=contextMenuItems,fanart=fanartSite,img='http://i.imgur.com/0h78x5V.png') # iconSite if (_debugging==True): _addon.add_directory({'mode':'AddUrlToList','site':site},{'title':cFL_('Add Url',colors[cNumber3])},is_folder=True,contextmenu_items=contextMenuItems,fanart=fanartSite,img='http://i.imgur.com/0h78x5V.png') # iconSite if isFile(workingFileWP)==False: _SaveFile(workingFileWP,'\n') else: dd=_OpenFile(workingFileWP); deb('length of workingfile',str(len(dd))); #debob(dd) s="'([A-Za-z0-9]+://.+)'"; try: matches=re.compile(s).findall(dd); debob(matches) #,re.DOTALL except: matches='' ItemCount=len(matches); deb('# of matches',str(ItemCount)); i=1 if ItemCount > 0: for match in matches: contextMenuItems=[]; img=iconSite; fimg=fanartSite; pars={'mode':'PlayURL','url':match,'site':site,'section':section}; _title=str(i)+'.) '+cFL(match,colors[cNumber]); contextMenuItems.append(('Try URL With UrlResolver','XBMC.RunPlugin(%s)' % _addon.build_plugin_url({'site':site,'mode':'PlayURLs','url':match}) )) contextMenuItems.append(('Remove URL','XBMC.RunPlugin(%s)' % _addon.build_plugin_url({'site':site,'mode':'RemoveUrlToList','url':match}))) contextMenuItems.append(('Add URL','XBMC.RunPlugin(%s)' % _addon.build_plugin_url({'site':site,'mode':'AddUrlToList'}) )) try: _addon.add_directory(pars,{'title':_title},is_folder=False,contextmenu_items=contextMenuItems,total_items=ItemCount,fanart=fimg,img=img); i=i+1; except: pass eod()
def Fav_List(site='',section='',subfav=''): debob(['test1',site,section,subfav]); favs=fav__COMMON__list_fetcher(site=site,section=section,subfav=subfav); ItemCount=len(favs); debob('test2 - '+str(ItemCount)); if len(favs)==0: myNote('Favorites','None Found'); eod(); return debob(favs); favs=sorted(favs,key=lambda item: (item[0],item[1]),reverse=False); for (_name,_year,_img,_fanart,_Country,_Url,_plot,_Genres,_site,_subfav,_section,_ToDoParams,_commonID,_commonID2) in favs: if _img > 0: img=_img else: img=iconSite if _fanart > 0: fimg=_fanart else: fimg=fanartSite debob('_ToDoParams'); debob(_ToDoParams) pars=_addon.parse_query(_ToDoParams) pars[u'fimg']=_fanart; pars[u'img']=_img; #if len(_commonID) > 0: pars['imdb_id']=_commonID debob('pars'); debob(pars) _title=AFColoring(_name) if (len(_year) > 0) and (not _year=='0000'): _title+=cFL(' ('+cFL(_year,'mediumpurple')+')',colorA) if len(_Country) > 0: _title+=cFL(' ['+cFL(_Country,'mediumpurple')+']',colorA) wwT=_name+" ~~ "; try: if visited_check2(wwT)==True: ww=7 else: ww=6 except: ww=6 #try: if ww > 1: contextLabs={'title':_name,'year':_year,'img':_img,'fanart':_fanart,'country':_Country,'url':_Url,'plot':_plot,'genres':_Genres,'site':_site,'subfav':_subfav,'section':_section,'todoparams':_ToDoParams,'commonid':_commonID,'commonid2':_commonID2} ##contextLabs={'title':_name,'year':'0000','url':_url,'img':img,'fanart':fimg,'DateAdded':'','todoparams':_addon.build_plugin_url(pars),'site':site,'section':section} contextMenuItems=ContextMenu_Favorites(contextLabs) contextMenuItems.append( ('Empty List','XBMC.RunPlugin(%s)' % _addon.build_plugin_url({'mode':'cFavoritesEmpty','site':site,'section':section,'subfav':subfav}) ) ) #contextMenuItems=[] _title=wwA(_title,ww); _addon.add_directory(pars,{'title':_title,'plot':_plot},is_folder=True,fanart=fimg,img=img,total_items=ItemCount,contextmenu_items=contextMenuItems) #except: pass # # if 'movie' in section.lower(): content='movies' else: content='tvshows' set_view(content,view_mode=int(addst('tvshows-view'))); eod()
def Browse_Itemscen(html, name, metamethod='', content='movies', view='515'): if (len(html) == 0): return html = GetDataBeetwenMarkers(html, '<div class="content_episode">', '<div class="clr"></div>', False)[1] html = html.replace('\'', '') html = html.replace('\n', '') html = html.replace('\r', '') html = html.replace('\t', '') html.encode("utf-8") data = re.compile( '<a href="(.+?)" title="(.+?)">(.+?)url\((.+?)\);">').findall(html) ItemCount = len(data) for item in data: img = item[3].replace(' ', '%20') strona = item[0] name2 = item[1].encode("utf-8") plot = '' labs = {} try: labs['plot'] = plot except: labs['plot'] = '' ## pars = { 'mode': 'Episodes4fun', 'site': site, 'section': section, 'title': name2, 'url': strona, 'img': img, 'fanart': fanart } contextLabs = { 'title': name2, 'url': strona, 'img': img, 'fanart': fanart, 'todoparams': _addon.build_plugin_url(pars), 'site': site, 'section': section, 'plot': labs['plot'] } if section == 'anime4fun': contextMenuItems = ContextMenu_Series(contextLabs) else: contextMenuItems = [] labs['title'] = name2 _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount) # next page npage = url[:-1] + str(int(url[-1:]) + 1) # if -1 != html.find("do strony "): _addon.add_directory( { 'mode': 'Page4fun', 'site': site, 'section': section, 'url': npage, 'page': npage }, {'title': "Next page"}, is_folder=True, fanart=fanart, img=nexticon) set_view(content, view_mode=addst('links-view')) eod()
def Browse_ItemAol(html, page, metamethod='', content='movies', view='515'): if (len(html) == 0): return html = GetDataBeetwenMarkers(html, 'Tytu', '</table>', False)[1] page = page.lower() data = re.findall('<a href="https://a-o.ninja/anime/' + page+ '(.+?)">(.+?)</a>', html) ItemCount = len(data) for item in data: strona = 'https://a-o.ninja/anime/' + page + item[0] name = item[1].encode("utf-8") name = ParseDescription(name) ### scraper if (tfalse(addst("aodc-thumbs")) == True): import scraper scrap = scraper.scraper_check(host, name) try: if (name not in scrap): if '?page=0'in strona: strona2 = strona.replace('?page=0','') else: strona2 = strona html = nURL(strona2) html = GetDataBeetwenMarkers(html, 'field-name-field-okladka field-type-image field-label-above', '<p> </p>', False)[1] data = re.findall('<img src="(.+?)"', html) ItemCount = len(data) if len(data) > 0: for item in data: img = item else: img = '' data = re.findall('<p><p>(.+?)</p>', html) ItemCount = len(data) if len(data) > 0: for item in data: plot = ParseDescription(item) else: plot = '' scraper.scraper_add(host, name, img, plot, '') scrap = scraper.scraper_check(host, name) except: scrap = '' try: img = scrap[1] except: img = '' try: plot = scrap[2] except: plot = '' else: img = '' plot ='' fanart = fanartAol labs = {} try: labs['plot'] = plot except: labs['plot'] = '' ### pars = {'mode': 'EpisodesAnime', 'site': site, 'section': section, 'title': name, 'url': strona, 'img': img, 'fanart': fanart} contextLabs = {'title': name, 'url': strona, 'img': img, 'fanart': fanart, 'todoparams': _addon.build_plugin_url(pars), 'site': site, 'section': section, 'plot': labs['plot']} if section == 'animeonline': contextMenuItems = ContextMenu_Series(contextLabs) else: contextMenuItems = [] labs['title'] = name _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount) set_view(content, view_mode=addst('tvshows-view'))
def Browse_ItemAon(html, metamethod='', content='tvshows', view='515'): if (len(html) == 0): return data = re.findall("<img src='http://animeon.pl/images/anime_min/mins/(.+?)'(.+?)<a href='(.+?)'>(.+?)</a></strong>(.+?)<div>(.+?)</div>", html) ItemCount = len(data) for item in data: _url = item[2] name = item[3] img = 'http://animeon.pl/images/anime_min/' + item [0] fanart = fanartAol plot = item[5] labs = {} try: labs['plot'] = plot except: labs['plot'] = '' ### pars = {'mode': 'EpisodesAnimeon', 'site': site, 'section': section, 'title': name, 'url': _url, 'img': img, 'fanart': fanart} contextLabs = {'title': name, 'url': _url, 'img': img, 'fanart': fanart, 'todoparams': _addon.build_plugin_url(pars), 'site': site, 'section': section, 'plot': labs['plot']} if section == 'movie': contextMenuItems = ContextMenu_Movies(contextLabs) elif section == 'animeon': contextMenuItems = ContextMenu_Series(contextLabs) else: contextMenuItems = [] labs['title'] = name _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount) # szukanie następnej strony npage = url[:-2] + str(int(url[-2:]) + 20) if -1 != html.find("div class='pagenav"): _addon.add_directory({'mode': 'Pageanimeon', 'site': site, 'section': section, 'url': npage, 'page': npage}, {'title': "Next page"}, is_folder=True, fanart=fanartAol, img=nexticon) set_view(content, view_mode=addst('tvshows-view'))
def Browse_Itemscen(html, metamethod='', content='tvshows', view='515'): if (len(html) == 0): return r = re.compile( '<a href="http://anime-centrum.net/(.+?)" class="tip-(.+?) tip-style-2"' ).findall(html) ItemCount = len(r) if len(r) > 0: for _url, _x in r: strona = 'http://anime-centrum.net/' + _url html2 = nURL(strona) # nazwa tytul = re.compile( '<meta property="og:title" content="(.+?)" />').findall(html2) ItemCount = len(tytul) if len(tytul) > 0: for _tytul in tytul: _name2 = _tytul.encode('utf-8') # grafika image = re.compile( '<meta property="og:image" content="(.+?)" />').findall(html2) ItemCount = len(image) if len(image) > 0: for foto in image: img = foto else: img = "" # fanart image3 = re.compile('http:(.+?).jpg').findall(html2) ItemCount = len(image3) if len(image3) > 0: for foto3 in image3: fanart = "http:" + foto3 + ".jpg" else: fanart = img # opis opis = re.compile('<strong>Opis:</strong>(.+)').findall(html2) ItemCount = len(opis) if len(opis) > 0: for desc in opis: plot = desc labs = {} try: labs['plot'] = plot except: labs['plot'] = '' ### pars = { 'mode': 'EpisodesCentrum', 'site': site, 'section': section, 'title': _name2, 'url': strona, 'img': img, 'fanart': fanart } contextLabs = { 'title': _name2, 'url': strona, 'img': img, 'fanart': fanart, 'todoparams': _addon.build_plugin_url(pars), 'site': site, 'section': section, 'plot': labs['plot'] } if section == 'movie': contextMenuItems = ContextMenu_Movies(contextLabs) elif section == 'animecentrum': contextMenuItems = ContextMenu_Series(contextLabs) else: contextMenuItems = [] labs['title'] = _name2 _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount) set_view(content, view_mode=addst('tvshows-view'))
def Browse_ItemsDramaQueen_drama(html, metamethod='', content='tvshows', view='515'): if (len(html) == 0): return idx = html.find("<div id='main' data-scroll-offset='88'>") if idx == -1: return idx2 = html.find('<span class="seperator extralight-border">', idx) if idx2 == -1: return html = html[idx:idx2] html = html.replace("\"", "\'") r = re.compile("uploads(.+?)'(.+?)href='(.+?)' title='(.+?)'>").findall(html) ItemCount = len(r) if len(r) > 0: for img, xxx, _url, _name in r: strona = _url _name2 = _name html2 = nURL(strona) image = re.compile("<img src='http://www.dramaqueen.pl/wp-content/uploads/(.+?)' width").findall(html2) ItemCount = len(image) if len(image) > 0: for foto in image: fanart = "http://www.dramaqueen.pl/wp-content/uploads/" + foto else: fanart = "" img = "http://www.dramaqueen.pl/wp-content/uploads/" + img #szukanie opisu idx = html2.find('<footer class="entry-footer"></footer></article>') if idx == -1: return idx2 = html2.find('</div> </div>', idx) if idx2 == -1: return desc = html2[idx:idx2] desc = desc.replace("\n", "") opis = re.compile('itemprop="text" ><p><em>(.+?)</em></p>').findall(desc) ItemCount = len(opis) if len(opis) > 0: for desc in opis: plot = clean_html(desc) labs = {} try: labs['plot'] = plot except: labs['plot'] = '' pars = {'mode': 'EpisodesDramaQueen_drama', 'site': site, 'section': section, 'title': _name2, 'url': strona, 'img': img, 'fanart': fanart} contextLabs = {'title': _name2, 'url': strona, 'img': img, 'fanart': fanart, 'todoparams': _addon.build_plugin_url(pars), 'site': site, 'section': section, 'plot': labs['plot']} if section == 'movie': contextMenuItems = ContextMenu_Movies(contextLabs) elif section == 'Dramadrama': contextMenuItems = ContextMenu_Series(contextLabs) else: contextMenuItems = [] labs['title'] = _name2 _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount) set_view(content, view_mode=addst('tvshows-view'))
def Browse_List(url): html=nURL(url); html=messupText(html,True,True); html=spAfterSplit(html,'<div class="ani-post">'); html=spAfterSplit(html,'<div id="ddmcc_container">'); html=spAfterSplit(html,'<div class="ddmcc">'); html=spBeforeSplit(html,'</div>'); #s='<li.*?><a href="(http://www.cartoon-world.tv/watch/(.+?)/)"><i class="icon-\D*-\D*"></i>\s*(.+?)\s*</a></li>' s='<li.*?><a.*?href="(http://www.cartoon-world.tv/watch/(.+?)/)".*?><i.*?></i>\s*(.+?)\s*</a></li>' matches=re.compile(s).findall(html); ItemCount=len(matches) if ItemCount==0: return if '/anime' in url.lower(): color_z=colors['8'] elif '/cartoon' in url.lower(): color_z=colors['5'] elif '/movie' in url.lower(): color_z=colors['1'] else: color_z=colors['0'] for _url,_folder,_name in matches: #fimg='http://www.dubbednetwork.net/static/'+_folder+'.jpg' #img='http://www.dubbednetwork.net/static/'+_folder+'.jpg' fimg='%simages/%s.jpg'%(mainSite,_folder) img='%simages/%s.jpg'%(mainSite,_folder) _title=cFL_(''+_name+'',color_z) pars={'mode':'Episodes','site':site,'section':section,'title':_name,'url':_url,'fanart':fimg,'img':img} contextLabs={'title':_name,'year':'0000','url':_url,'img':img,'fanart':fimg,'todoparams':_addon.build_plugin_url(pars),'site':site,'section':section,'plot':''}; if 'movie' in url.lower(): contextMenuItems=ContextMenu_Movies(contextLabs) else: contextMenuItems=ContextMenu_Series(contextLabs) try: _addon.add_directory(pars,{'title':_title},is_folder=True,fanart=fimg,img=img,contextmenu_items=contextMenuItems,total_items=ItemCount) except: t='' set_view('tvshows',view_mode=addst('tvshows-view')); eod()
def Browse_Items(html,metamethod='',content='tvshows',view='515'): if (len(html)==0): return s='<a href="(.+?)"><img src="(.+?)" width="120" height="168" alt="Watch (.+?) online"'; html=messupText(html,True,True); matches=re.compile(s).findall(html) #,re.DOTALL ItemCount=len(matches) if ItemCount > 0: #debob(matches) for _url,_img,_name in matches: labs={}; _NameTag='alt="Watch '+_name+' online"'; SN1S='<span class="small">'; SN1B='<span class="bold">'; SN2='</span>' img =''+_img; fimg=''+_img ##img=_artIcon; fimg=_artFanart try: labs['plot']=html.split(_NameTag)[1].split('<div class="descr">')[1].split('</div>')[0].strip() if ('[<a href' in labs['plot']): labs['plot']=labs['plot'].split('[<a href')[0].strip() labs['plot']+='...' labs['plot']=cFL(labs['plot'],colors['12']) except: labs['plot']='' try: labs['year']=html.split(_NameTag)[1].split(SN1S+'Released:'+SN2)[1].split(SN1B)[1].split(SN2)[0].strip() except: labs['year']='' if len(labs['year']) > 0: labs['plot']+=CR+cFL('Year: ',colors['11'])+cFL(labs['year'],colors['10']) try: labs['type']=html.split(_NameTag)[1].split('<span class="type_indic">')[1].split(SN2)[0].strip() except: labs['type']='' if len(labs['type']) > 0: labs['plot']+=CR+cFL('Video Type: ',colors['11'])+cFL(labs['type'],colors['10']) try: labs['status']=html.split(_NameTag)[1].split(SN1S+'Status:'+SN2)[1].split(SN1B)[1].split(SN2)[0].strip() except: labs['status']='' if len(labs['status']) > 0: labs['plot']+=CR+cFL('Status: ',colors['11'])+cFL(labs['status'],colors['10']) try: labs['rating']=html.split(_NameTag)[1].split(SN1S+'Rating:'+SN2)[1].split(SN1B)[1].split(SN2)[0].strip() except: labs['rating']='' if len(labs['rating']) > 0: labs['plot']+=CR+cFL('Rating: ',colors['11'])+cFL(labs['rating'],colors['10']) if 'movie' in labs['type'].lower(): section='movie' elif 'show' in labs['type'].lower(): section='series' else: section='series' # pars={'mode':'Episodes','site':site,'section':section,'title':_name,'url':_url,'img':img,'fanart':fimg,'year':labs['year']} contextLabs={'title':_name,'year':labs['year'],'url':_url,'img':img,'fanart':fimg,'todoparams':_addon.build_plugin_url(pars),'site':site,'section':section,'plot':labs['plot']} if section=='movie': contextMenuItems=ContextMenu_Movies(contextLabs) elif section=='series': contextMenuItems=ContextMenu_Series(contextLabs) else: contextMenuItems=[] labs['title']=cFL_(_name,'white') _addon.add_directory(pars,labs,is_folder=True,fanart=fimg,img=img,contextmenu_items=contextMenuItems,total_items=ItemCount) set_view(content,view_mode=addst('tvshows-view')); #set_view(content,int(view));
def Browse_Itemscen(html, metamethod='', content='tvshows', view='515'): if (len(html) == 0): return r = re.compile('<a href="http://anime-centrum.net/(.+?)" class="tip-(.+?) tip-style-2"').findall(html) ItemCount = len(r) if len(r) > 0: for _url, _x in r: strona = 'http://anime-centrum.net/' + _url html2 = nURL(strona) # nazwa tytul = re.compile('<meta property="og:title" content="(.+?)" />').findall(html2) ItemCount = len(tytul) if len(tytul) > 0: for _tytul in tytul: _name2 = _tytul.encode('utf-8') # grafika image = re.compile('<meta property="og:image" content="(.+?)" />').findall(html2) ItemCount = len(image) if len(image) > 0: for foto in image: img = foto else: img = "" # fanart image3 = re.compile('http:(.+?).jpg').findall(html2) ItemCount = len(image3) if len(image3) > 0: for foto3 in image3: fanart = "http:" + foto3 + ".jpg" else: fanart = img # opis opis = re.compile('<strong>Opis:</strong>(.+)').findall(html2) ItemCount = len(opis) if len(opis) > 0: for desc in opis: plot = desc labs = {} try: labs['plot'] = plot except: labs['plot'] = '' ### pars = {'mode': 'EpisodesCentrum', 'site': site, 'section': section, 'title': _name2, 'url': strona, 'img': img, 'fanart': fanart} contextLabs = {'title': _name2, 'url': strona, 'img': img, 'fanart': fanart, 'todoparams': _addon.build_plugin_url(pars), 'site': site, 'section': section, 'plot': labs['plot']} if section == 'movie': contextMenuItems = ContextMenu_Movies(contextLabs) elif section == 'animecentrum': contextMenuItems = ContextMenu_Series(contextLabs) else: contextMenuItems = [] labs['title'] = _name2 _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount) set_view(content, view_mode=addst('tvshows-view'))
def Browse_ItemShniden(html, metamethod='', content='tvshows', view='515'): if (len(html) == 0): return idx = html.find('<dl class="sub-nav">') if idx == -1: return idx2 = html.find('</body>', idx) if idx2 == -1: return html = html[idx:idx2] r = re.compile('<a href="(.+?.html)">(.+?) </a>').findall(html) ItemCount = len(r) if len(r) > 0: for _url, _tytul in r: _name2 = html_entity_decode(_tytul) strona = _url ### scraper meta = metaget.get_meta('tvshow', _name2) fanart = str(meta['backdrop_url']).replace('u','') img = str(meta['cover_url']).replace('u','') plot = meta['plot'] labs = {} try: labs['plot'] = plot except: labs['plot'] = '' ### pars = {'mode': 'EpisodesShniden', 'site': site, 'section': section, 'title': _name2, 'url': strona, 'img': img, 'fanart': fanart} contextLabs = {'title': _name2, 'url': strona, 'img': img, 'fanart': fanart, 'todoparams': _addon.build_plugin_url(pars), 'site': site, 'section': section, 'plot': labs['plot']} if section == 'movie': contextMenuItems = ContextMenu_Movies(contextLabs) elif section == 'shnidenodc': contextMenuItems = ContextMenu_Series(contextLabs) else: contextMenuItems = [] labs['title'] = _name2 _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount) set_view(content, view_mode=addst('tvshows-view'))
def Browse_Itemscen(html, name, metamethod='', content='movies', view='515'): if (len(html) == 0): return data = re.compile('<div class="anim"><a href="(.+?)"> ' + name + '(.+?)</a>').findall(html) ItemCount = len(data) for item in data: strona = 'http://anime-joy.tv/' + item[0] name2 = name + item[1] ### scraper if (tfalse(addst("ajoy-thumbs")) == True): import scraper scrap = scraper.scraper_check(host, name2) try: if (name2 not in scrap): html = nURL(strona) htmlimg = GetDataBeetwenMarkers( html, '<div class="animedetay">', '<div class="addcolumn centerbutton">', False)[1] data = re.findall('<img src="(.+?)"/>', htmlimg) ItemCount = len(data) if len(data) > 0: for item in data: img = item else: img = '' htmlplot = GetDataBeetwenMarkers( html, '<strong>Summary:</strong> </br>', '</div>', False)[1] ItemCount = len(htmlplot) if len(data) > 0: plot = clean_html(htmlplot) else: plot = '' scraper.scraper_add(host, name2, img, plot, '') scrap = scraper.scraper_check(host, name2) except: scrap = '' try: img = scrap[1] except: img = '' try: plot = scrap[2] except: plot = '' else: img = '' plot = '' fanart = fanartAol labs = {} try: labs['plot'] = plot except: labs['plot'] = '' ## pars = { 'mode': 'Episodesjoy', 'site': site, 'section': section, 'title': name2, 'url': strona, 'img': img, 'fanart': fanart } contextLabs = { 'title': name2, 'url': strona, 'img': img, 'fanart': fanart, 'todoparams': _addon.build_plugin_url(pars), 'site': site, 'section': section, 'plot': labs['plot'] } if section == 'animejoy': contextMenuItems = ContextMenu_Series(contextLabs) else: contextMenuItems = [] labs['title'] = name2 _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount) set_view(content, view_mode=addst('links-view')) eod()
def Pageshniden(url, nmr, metamethod='', content='tvshows', view='515'): if 'search' in url: from common import getUserInput url = url + getUserInput('Szukaj','') html = nURL(url.replace(' ', '+')) else: html = nURL(url + nmr) html = GetDataBeetwenMarkers(html, 'data-view-table-cover', '<nav class="pagination">', False)[1] html = html.replace('\r\n', '') html = html.replace(' ', '') data = re.findall('src="(.+?)"/></td><tdclass="desc-col"><h3><ahref="(.+?)">(.+?)</a></h3>', html) ItemCount = len(data) for item in data: strona = mainSite5 + item[1] name = item[2].encode("utf-8") img = (mainSite5 + item[0]).replace('/resources/images/100x100/','/resources/images/genuine/') img = img.replace('100x100', '225x350') plot = '' fanart = fanartAol labs = {} try: labs['plot'] = plot except: labs['plot'] = '' ### pars = {'mode': 'EpisodesShniden', 'site': site, 'section': section, 'title': name, 'url': strona, 'img': img, 'fanart': fanart} contextLabs = {'title': name, 'url': strona, 'img': img, 'fanart': fanart, 'todoparams': _addon.build_plugin_url(pars), 'site': site, 'section': section, 'plot': labs['plot']} if section == 'movie': contextMenuItems = ContextMenu_Movies(contextLabs) elif section == 'shnidenodc': contextMenuItems = ContextMenu_Series(contextLabs) else: contextMenuItems = [] labs['title'] = name _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount) if not 'search' in url: npage = str(int(nmr) + 1) print npage # if -1 != html.find("do strony "): _addon.add_directory({'mode': 'Pageshniden', 'site': site, 'section': section, 'url': url, 'page': url, 'nmr': npage}, {'title': "Next page"}, is_folder=True, fanart=fanartAol, img=nexticon) set_view(content, view_mode=addst('links-view')) eod()
def Browse_List(url,page='',content='tvshows',view='515'): if url=='': return html=nURL(url); html=messupText(html,True,True) html=spAfterSplit(html,'<div id="videos_link">'); html=spAfterSplit(html,'<div id="videos">'); html=spBeforeSplit(html,'<ul class="pagination">') s='<li>\s*\n*\s*<a href="(.+?)">\s*\n*\s*(.+?)\s*\n*\s*</a>\s*\n*\s*</li>' matches=re.compile(s).findall(html) ItemCount=len(matches) if ItemCount > 0: for _url,_name in matches: labs={}; labs['plot']='' img=''+thumbnail; fimg=''+fanart ##img=_artIcon; fimg=_artFanart _title=''+cFL_(_name,'white') # if section=='movies': pars={'mode':'Hosts' ,'site':site,'section':section,'title':_name,'url':_url,'img':img,'fanart':fimg} else: pars={'mode':'Episodes','site':site,'section':section,'title':_name,'url':_url,'img':img,'fanart':fimg} contextLabs={'title':_name,'year':'0000','url':_url,'img':img,'fanart':fimg,'DateAdded':'','todoparams':_addon.build_plugin_url(pars),'site':site,'section':section} if section=='movies': contextMenuItems=ContextMenu_Movies(contextLabs) elif section=='series': contextMenuItems=ContextMenu_Series(contextLabs) else: contextMenuItems=[] labs['title']=_title _addon.add_directory(pars,labs,is_folder=True,fanart=fimg,img=img,contextmenu_items=contextMenuItems,total_items=ItemCount) set_view(content,int(addst('episode-view'))); eod()
def Browse_Search(html,metamethod=''): #html=nURL(url); html=messupText(html,True,True); #s='<li.*?><a href="(http://www.cartoon-world.tv/watch/(.+?)/)"><i class="icon-\D*-\D*"></i>\s*(.+?)\s*</a></li>' s='<a.*?href="(http://www.cartoon-world.tv/(.+?))".*?>\s*\n*\s*<div id="image-spot">\s*\n*\s*\n*\s*<img src=".*?(http://.+?\.[jpg|png].*?)".*?>\s*\n*\s*\n*\s*</div>\s*\n*\s<div id="series-box">\s*\n*\s<div class="series-info-box">\s*\n*\s<div class="main-title">\s*\n*\s(.*?)\s*\n*\s</div>\s*\n*\s<div class=".*?">\s*\n*\s(.*?)\s*\n*\s</div>\s*\n*\s<span class=".*?">\s*\n*\s(.*?)\s*\n*\s</div>\s*\n*\s</div>\s*\n*\s</a>' s='<a href="(http://www.cartoon-world.tv/watch/(.+?)/)" class="box-link"><div id="image-spot">\s*\n*\s*\n*\s*<img src=".*?(http://.+?\.jpg).*?".*?></div><div id="series-box"><div class="series-info-box"><div class="main-title">\s*(.+?)\s*</div><div.*?>\s*(.*?)\s*</div><span.*?>\s*(.*?)\s*</div></div></a>' s='<li class="list-group-item"><a href="(http://www.cartoon-world.tv/watch/(.+?)/)">\s*([^<]+)</a' matches=re.compile(s).findall(html); ItemCount=len(matches); deb('# of items found',str(ItemCount)) if ItemCount==0: return #for _url,_folder,_img,_name,_type,_status in matches: for _url,_folder,_name in matches: _img='%simages/%s.jpg'%(mainSite,_folder) _type='' _status='' if 'anime' in _type.lower(): color_z=colors['8'] elif 'cartoon' in _type.lower(): color_z=colors['5'] elif 'movie' in _type.lower(): color_z=colors['1'] else: color_z=colors['0'] fimg=_img; img=_img if len(_type) > 0: _title=cFL_(''+_name+' '+cFL('['+_type+']',color_z),color_z) else: _title=cFL_(''+_name+'',color_z) pars={'mode':'Episodes','site':site,'section':section,'title':_name,'url':_url,'fanart':fimg,'img':img} contextLabs={'title':_name,'year':'0000','url':_url,'img':img,'fanart':fimg,'todoparams':_addon.build_plugin_url(pars),'site':site,'section':section,'plot':''}; if ('movie' in _type.lower()) or ('movie'==_type.lower()) or (color_z==colors['0']): contextMenuItems=ContextMenu_Movies(contextLabs) else: contextMenuItems=ContextMenu_Series(contextLabs) try: _addon.add_directory(pars,{'title':_title},is_folder=True,fanart=fimg,img=img,contextmenu_items=contextMenuItems,total_items=ItemCount) except: t=''
def iLiveBrowseLIVE(channel='',iLive_Sort='',iLive_Language='',iLive_category='',search=''): iLive_category=channel if len(iLive_Sort)==0: iLive_Sort="0" ##channel=channel.replace(" ","%20"); #### \/ Catching the first page. ##if len(search) > 0: tUrl="http://www.ilive.to/channels/?q=%s"%search ##else: tUrl="http://www.ilive.to/channels/"+channel.replace(" ","%20")+"?sort="+iLive_Sort+"&lang="+iLive_Language; ##else: ##tUrl="http://www.ilive.to/api/live.xml"; tUrl="http://www.streamlive.to/api/live.xml"; #ApiLiveDomain=addst('DefaultApiLiveList','Default') tUrl=doMain+"api/live.xml"; #if ApiLiveDomain=='ilive.to': tUrl="http://www.ilive.to/api/live.xml"; #elif ApiLiveDomain=='streamlive.to': tUrl="http://www.streamlive.to/api/live.xml"; #else: tUrl="http://www.streamlive.to/api/live.xml"; #if ApiLiveDomain=='ilive.to': headers={'Referer':'http://www.ilive.to/'}; #elif ApiLiveDomain=='streamlive.to': headers={'Referer':'http://www.streamlive.to/'}; #else: headers={'Referer':'http://www.streamlive.to/'}; deb("url",tUrl); html=nURL(tUrl,headers=headers); deb("length of remote xml",str(len(html))); LocalXML=os.path.join(_addonPath,'live.xml') if len(html) > 20: try: _SaveFile(LocalXML,html) except: pass elif isFile(LocalXML): deb("Faild to load Remote File","Attempting to load local file."); myNote("Faild to load Remote File","Attempting to load local file."); try: html=_OpenFile(LocalXML) except: html='' deb("length of local xml",str(len(html))); if len(html) < 20: deb("Faild to load Remote File","Unable to locate local file."); myNote("Faild to load Remote File","Unable to locate local file."); else: deb("Faild to load Remote File","Unable to locate local file."); myNote("Faild to load Remote File","Unable to locate local file."); pass if ('/api/' in tUrl) and ('.xml' in tUrl): iLive_Language=LanguageNoToNa(iLive_Language) debob({'iLive_Language':iLive_Language,'iLive_category':iLive_category,'search':search}); if '</channels>' in html: html=nolines(html).split('</channels>')[0] if '<channels>' in html: html=html.split('<channels>')[1]; html=html.replace('</channel><channel>','</channel>\n\r<channel>').replace('</channel>','</channel\n\r>'); deb('Length of HTML',str(len(html))); #debob(html); ## <channel><name>SKY S</name><url> http:// www. ilive.to/view/68276 </url><category>Live S</category><language>Engli</language><views>116.5</views></channel> #s="<channel><name>(.+?)</name><url>(http://(?:www.)?(?:ilive.to|streamlive.to)?/view/(\d+))</url><category>(.+?)</category><language>(.*?)</language><views>(.*?)</views></channel"; s="<channel><name>(.+?)</name><url>(http://(?:www.)?(?:ilive.to|streamlive.to)?/(?:view|\D+-channel|%s)?/(\d+))</url><category>(.+?)</category><language>(.*?)</language><views>(.*?)</views></channel"%UrlTAG; try: match=re.compile(s).findall(html); except: match=[] ItemCount=len(match); deb('number of matches',str(ItemCount)); #debob(match); if ItemCount > 0: match=sorted(match,key=lambda i:(i[5],i[0],i[6],i[7]),reverse=False) for (ChName,ChUrl,ChId,ChImg,ChImgId,ChCat,ChLang,ChViews) in match: #debob({'ChName':ChName,'ChUrl':ChUrl,'ChId':ChId,'ChImg':ChImg,'ChImgId':ChImgId,'ChCat':ChCat,'ChLang':ChLang,'ChViews':ChViews}) if (len(search) > 0) and (not search.lower() in ChName.lower()): pass elif (len(iLive_Language) > 0) and (not iLive_Language.lower()==ChLang.lower()): pass elif (len(iLive_category) > 0) and (not iLive_category.lower()==ChCat.lower()): pass else: debob({'ChName':ChName,'ChUrl':ChUrl,'ChId':ChId,'ChImg':ChImg,'ChImgId':ChImgId,'ChCat':ChCat,'ChLang':ChLang,'ChViews':ChViews}) contextMenuItems=[]; unCacheAnImage(ChImg); pars={'mode':'iLivePlay','site':site,'section':section,'title':ChName,'url':ChUrl,'fanart':ChImg,'img':ChImg,'link':'99'}; pars0={'mode':'iLivePlay','site':site,'section':section,'title':ChName,'url':ChUrl,'fanart':ChImg,'img':ChImg,'link':'0'}; pars1={'mode':'iLivePlay','site':site,'section':section,'title':ChName,'url':ChUrl,'fanart':ChImg,'img':ChImg,'link':'1'}; pars2={'mode':'iLivePlay','site':site,'section':section,'title':ChName,'url':ChUrl,'fanart':ChImg,'img':ChImg,'link':'2'}; PlotD=cFL("[CR]Language: "+ChLang+"[CR]Category: "+ChCat+"[CR]Views: "+ChViews,"tan"); contextMenuItems.append(('Channel Information','XBMC.Action(Info)')) #contextMenuItems.append(('Play [HLS]' ,'XBMC.Container.Update(%s)'%_addon.build_plugin_url(pars0) )) #contextMenuItems.append(('Play [RTMP]','XBMC.Container.Update(%s)'%_addon.build_plugin_url(pars1) )) #contextMenuItems.append(('Play [RTSP]','XBMC.Container.Update(%s)'%_addon.build_plugin_url(pars2) )) contextMenuItems.append(('Play [HLS]' ,'XBMC.RunPlugin(%s)'%_addon.build_plugin_url(pars0) )) contextMenuItems.append(('Play [RTMP]','XBMC.RunPlugin(%s)'%_addon.build_plugin_url(pars1) )) contextMenuItems.append(('Play [RTSP]','XBMC.RunPlugin(%s)'%_addon.build_plugin_url(pars2) )) try: _addon.add_directory(pars,{'title':ChName+' ['+cFL(ChLang,colors['6'])+']','plot':PlotD},is_folder=False,fanart=ChImg,img=ChImg,total_items=ItemCount,contextmenu_items=contextMenuItems,context_replace=False) except: pass else: ### \/ Catching the rest of the pages. if '<p align="center" class="pages"><strong>Page: </strong>' in html: phtml=html.split('<p align="center" class="pages"><strong>Page: </strong>')[1].split('</span></p>')[0]; deb("length of phtml",str(len(phtml))); try: ppages=re.compile('<a href="(http://www.(?:ilive.to|streamlive.to)?/channels/.+?)">\s*(\d+)\s*</a>').findall(phtml) except: ppages=[] deb("number of pages",str(len(ppages)+1)); debob(ppages); dialogWait=xbmcgui.DialogProgress(); loaded=1; ptotal=len(ppages)+1; ret=dialogWait.create('Please wait...'); percent=(loaded * 100)/ptotal; remaining_display='[B]Page '+str(loaded)+' of '+str(ptotal)+'[/B].'; dialogWait.update(percent,'[B]Loading Pages...[/B]',remaining_display); for (ppage,pname) in ppages: time.sleep(1); html+=nURL(ppage.replace(" ","%20")); loaded=loaded+1; percent=(loaded * 100)/ptotal; remaining_display='[B]Page '+str(loaded)+' of '+str(ptotal)+'[/B].'; dialogWait.update(percent,'[B]Loading Pages...[/B]',remaining_display); dialogWait.close(); del dialogWait ### \/ Catching Items. html=nolines(messupText(html.replace(" ",""),True,True)); deb("length of all pages",str(len(html))); s='<noscript><img width="\d+" height="\d+" src="(http://snapshots.(?:ilive.to|streamlive.to)?/snapshots/[0-9a-zA-Z]+_snapshot.jpg)" alt=".+?"\s*/></noscript>\s*</a>\s*\n*\s*'; s+='<a href="(http://www.(?:ilive.to|streamlive.to)?/(?:view|\D+-channel|%s)?/\d+/.+?)"><strong>\s*(.+?)\s*</strong></a><br/>\s*'%UrlTAG; s+='<span class="viewers">([0-9\,]+)</span>\s*'; s+='<span class="totalviews">([0-9\,]+)</span><br/>\s*'; s+='<a href="http://www.(?:ilive.to|streamlive.to)?/channels/.+?">([A-Za-z0-9\s]*)</a>\s*'; s+='<a href="http://www.(?:ilive.to|streamlive.to)?/channels\?lang=\d*">([A-Za-z0-9\s]*)</a>\s*</li>'; #debob(html); match=re.compile(s).findall(html); ItemCount=len(match); debob(match); #match=sorted(match, key=lambda item: item[2], reverse=False) #match=sorted(match, key=lambda item: item[3], reverse=False) ### \/ Links for thumb,url,name,iViewers,iTotalViews,Category,lang in match: unCacheAnImage(thumb); pars={'mode':'iLivePlay','site':site,'section':section,'title':name,'url':url,'fanart':thumb,'img':thumb}; PlotD=cFL("[CR]Language: "+lang+"[CR]Category: "+Category+"[CR]Viewers: "+iViewers+"[CR]TotalViews: "+iTotalViews,"tan"); #debob(pars); try: _addon.add_directory(pars,{'title':name+' ['+cFL(lang,colors['6'])+']','plot':PlotD},is_folder=False,fanart=thumb,img=thumb,total_items=ItemCount) except: pass ### set_view('movies',view_mode=addst('movies-view')); #set_view('tvshows',view_mode=addst('tvshows-view')); #set_view('list',view_mode=addst('default-view')); eod();
def Browse_Itemscen(html, name2, metamethod='', content='movies', view='515'): if (len(html) == 0): return html = html.encode('utf-8', '') html = GetDataBeetwenMarkers(html, 'Menu Główne</div>', 'pod_naglowek">Wiadomości</p>', False)[1] data = re.findall('">(.+?)</div>', html) ItemCount = len(data) if len(data) > 0: for item in data: strona = item name = strona img = '' plot = '' fanart = fanartAol labs = {} try: labs['plot'] = plot except: labs['plot'] = '' ## pars = {'mode': 'Browse_Itemslist', 'site': site, 'section': section, 'title': name, 'url': strona, 'img': img, 'fanart': fanart} contextLabs = {'title': name, 'url': strona, 'img': img, 'fanart': fanart, 'todoparams': _addon.build_plugin_url(pars), 'site': site, 'section': section, 'plot': labs['plot']} contextMenuItems = ContextMenu_Series(contextLabs) labs['title'] = name _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount) eod()
def Browse_Items(html, metamethod='', content='tvshows', view='515'): if (len(html) == 0): return r = re.compile("</a><div class='con'><a href='/(.+?)'>(.+?)</a><p>").findall(html) ItemCount = len(r) if len(r) > 0: for _url, _name in r: strona = 'http://diff-anime.pl/' + _url html2 = nURL(strona) _name2 = _name # grafika image = re.compile("</div><div class='content'><div class='con'><a href='(.+?)' class='fbox'>").findall(html2) ItemCount = len(image) if len(image) > 0: for foto in image: img = "http://diff-anime.pl" + foto else: img = "" # fanart if "Nie dodano kadrów do tej serii." in html2: fanart = fanartSite else: image2 = re.compile("<h2>Kadry</h2></div><div class='content'><a href='(.+?)' class='fbox'>").findall(html2) ItemCount = len(image) if len(image) > 0: for _fanart in image2: fanart = "http://diff-anime.pl" + _fanart else: fanart = img # opis opis = re.compile("<h2>Opis anime</h2></div><div class='content'><div class='con'>(.+?)</div>").findall(html2) ItemCount = len(opis) if len(opis) > 0: for desc in opis: plot = unicode(desc,"utf-8") else: opis = re.compile("<h2>Opis anime</h2></div><div class='content'><div class='con'>(.+?)<").findall(html2) ItemCount = len(opis) if len(opis) > 0: for desc in opis: plot = unicode(desc,"utf-8") else: opis = re.compile("<div id='pDesc' class='panel'><div class='head'><h2>Opis anime</h2></div><div class='content'><div class='con'>(.+?)<br />").findall(html2) ItemCount = len(opis) if len(opis) > 0: for desc in opis: plot = unicode(desc,"utf-8") else: plot = "Nie dodano jeszcze opisu do tej serii." labs = {} try: labs['plot'] = plot except: labs['plot'] = '' ### pars = {'mode': 'EpisodesDiff', 'site': site, 'section': section, 'title': _name2, 'url': strona, 'img': img, 'fanart': fanart} contextLabs = {'title': _name2, 'url': strona, 'img': img, 'fanart': fanart, 'todoparams': _addon.build_plugin_url(pars), 'site': site, 'section': section, 'plot': labs['plot']} if section == 'movie': contextMenuItems = ContextMenu_Movies(contextLabs) elif section == 'diffanime': contextMenuItems = ContextMenu_Series(contextLabs) else: contextMenuItems = [] labs['title'] = _name2 _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount) # szukanie następnej strony npage = url[:-2] + str(int(url[-2:]) + 10) if -1 != html.find("div class='pagenav") and -1 != html.find("class='img"): _addon.add_directory({'mode': 'Page', 'site': site, 'section': section, 'url': npage, 'page': npage}, {'title': "Next page"}, is_folder=True, fanart=fanartSite, img=nexticon) set_view(content, view_mode=addst('tvshows-view'))
def Browse_ItemSenpai(html, page, metamethod='', content='movies', view='515'): if (len(html) == 0): return html = GetDataBeetwenMarkers(html, '<h4>Lista anime</h4>', '<footer class="page-footer indigo">', False)[1] data = re.findall( 'href="\/anime\/' + page + '(.+?)">\n(\s+)<img src="/Resources/anime/covers/(.+?)"', html) ItemCount = len(data) for item in data: strona = mainSite + page + item[0] name = urllib2.unquote(page + item[0].encode("utf-8")) name = ParseDescription(name) ### scraper if (tfalse(addst("senpai-thumbs")) == True): import scraper scrap = scraper.scraper_check(host, name) try: if (name not in scrap): img = 'http://senpai.com.pl/Resources/anime/covers/' + urllib2.quote( item[2]) plot = '' scraper.scraper_add(host, name, img, plot, '') scrap = scraper.scraper_check(host, name) except: scrap = '' try: img = scrap[1] except: img = '' try: plot = scrap[2] except: plot = '' else: img = '' plot = '' fanart = fanartAol labs = {} try: labs['plot'] = plot except: labs['plot'] = '' fanart = fanartAol labs = {} try: labs['plot'] = plot except: labs['plot'] = '' ### pars = { 'mode': 'EpisodesSenpai', 'site': site, 'section': section, 'title': name, 'url': strona, 'img': img, 'fanart': fanart } contextLabs = { 'title': name, 'url': strona, 'img': img, 'fanart': fanart, 'todoparams': _addon.build_plugin_url(pars), 'site': site, 'section': section, 'plot': labs['plot'] } if section == 'senpai': contextMenuItems = ContextMenu_Series(contextLabs) else: contextMenuItems = [] labs['title'] = name _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount) set_view(content, view_mode=addst('tvshows-view'))
def Browse_ItemAol(html, metamethod='', content='tvshows', view='515'): if (len(html) == 0): return html = GetDataBeetwenMarkers(html, 'Tytu', '</table>', False)[1] data = re.findall('<a href="/(.+?)">(.+?)</a>', html) ItemCount = len(data) for item in data: strona = mainSite4 + item[0] + '?page=0' name = item[1].encode("utf-8") ### scraper meta = metaget.get_meta('tvshow', name) fanart = str(meta['backdrop_url']).replace('u','') img = str(meta['cover_url']).replace('u','') plot = meta['plot'] labs = {} try: labs['plot'] = plot except: labs['plot'] = '' ### pars = {'mode': 'EpisodesAnime', 'site': site, 'section': section, 'title': name, 'url': strona, 'img': img, 'fanart': fanart} contextLabs = {'title': name, 'url': strona, 'img': img, 'fanart': fanart, 'todoparams': _addon.build_plugin_url(pars), 'site': site, 'section': section, 'plot': labs['plot']} if section == 'movie': contextMenuItems = ContextMenu_Movies(contextLabs) elif section == 'animeonline': contextMenuItems = ContextMenu_Series(contextLabs) elif section == 'animedrama': contextMenuItems = ContextMenu_Series(contextLabs) else: contextMenuItems = [] labs['title'] = name _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount) set_view(content, view_mode=addst('tvshows-view'))
def Browse_Ongoing(): html=nURL(mainSite); html=messupText(html,True,True); html=spAfterSplit(html,'>Ongoing Anime and Cartoons</h'); html=spAfterSplit(html,'<div class="ani-popbox">'); html=spAfterSplit(html,'<ul class="nav nav-list">'); html=spBeforeSplit(html,'</div>'); html=spBeforeSplit(html,'</ul>') html=nolines(html).replace('</li>','</li\n\r>'); deb('Length of html',str(len(html))); #print html; #s='<li><a href="(http://www.cartoon-world.tv/watch/(.+?)/)">\s*<i class="icon-chevron-right"></i>\s*(.+?)\s*<span class="text-\D+">\s*(\D+)\s*</span></a></li' s='<li><a href="(http://www.cartoon-world.tv/watch/(.+?)/)">\s*<i class="icon-chevron-right"></i>\s*(.+?)\s*<span class="text-(?:Anime|Cartoon)?">((?:Anime|Cartoon)?)<' matches=re.compile(s).findall(html); ItemCount=len(matches); deb('Number of Matches Found',str(ItemCount)); #print matches; color_c=colors['5']; color_a=colors['8'] if ItemCount==0: return for _url,_folder,_name,_type in matches: fimg='http://www.dubbednetwork.net/static/'+_folder+'.jpg' img='http://www.dubbednetwork.net/static/'+_folder+'.jpg' fimg='%simages/%s.jpg'%(mainSite,_folder) img='%simages/%s.jpg'%(mainSite,_folder) if 'anime' in _type.lower(): color_z=color_a else: color_z=color_c _title=cFL_(''+_name+' '+cFL('['+_type+']',color_z),color_z) pars={'mode':'Episodes','site':site,'section':section,'title':_name,'url':_url,'fanart':fimg,'img':img} contextLabs={'title':_name,'year':'0000','url':_url,'img':img,'fanart':fimg,'todoparams':_addon.build_plugin_url(pars),'site':site,'section':section,'plot':''}; contextMenuItems=ContextMenu_Series(contextLabs) try: _addon.add_directory(pars,{'title':_title},is_folder=True,fanart=fimg,img=img,contextmenu_items=contextMenuItems,total_items=ItemCount) except: t='' set_view('tvshows',view_mode=addst('tvshows-view')); eod()
def Browse_Episodeswijam(url, page, content='episodes', view='515'): if url == '': return if 'blackclover'in url: html = nURL(url) else: html = nURL(url) html = html.encode('utf-8', '') if ('kolejnosc_ogladania.html' in html): data = GetDataBeetwenMarkers(html, 'html">Kolejność oglądania</a></li>', '</ul>', False)[1] data = re.findall('<a href="(.+?)">(.+?)</a>', data) ItemCount = len(data) if len(data) > 0: for item in data: strona = url + item[0] name = item[1] img = '' plot = '' fanart = fanartAol labs = {} try: labs['plot'] = plot except: labs['plot'] = '' pars = {'mode': 'Browse_Episodeswijaminne2', 'site': site, 'section': section, 'title': name, 'url': strona, 'page': url, 'img': img, 'fanart': fanart} contextLabs = {'title': name, 'url': strona, 'img': img, 'fanart': fanart, 'todoparams': _addon.build_plugin_url(pars), 'site': site, 'section': section, 'plot': labs['plot']} contextMenuItems = ContextMenu_Episodes(labs=contextLabs) labs['title'] = name _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount) eod() else: html2 = GetDataBeetwenMarkers(html, '<div class="pmenu_naglowek_red">Odcinki anime online</div>', '</ul>', False)[1] data = re.findall('<a href="(.+?)">(.+?)</a>', html2) ItemCount = len(data) if len(data) > 0: for item in data: strona = url + item[0] name = item[1] img = '' plot = '' fanart = fanartAol labs = {} try: labs['plot'] = plot except: labs['plot'] = '' pars = {'mode': 'Browse_Episodeswijaminne2', 'site': site, 'section': section, 'title': name, 'url': strona, 'page': url, 'img': img, 'fanart': fanart} contextLabs = {'title': name, 'url': strona, 'img': img, 'fanart': fanart, 'todoparams': _addon.build_plugin_url(pars), 'site': site, 'section': section, 'plot': labs['plot']} contextMenuItems = ContextMenu_Episodes(labs=contextLabs) labs['title'] = name _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount) eod()
def MenuListChannels(Url,Page='',TyPE='js',idList='[]',csrfToken='',MeTHoD='re.compile'): debob(['Url',Url,'TyPE',TyPE]) if len(Url)==0: debob("No url found."); eod(); return if (not mainSite in Url) and (not mainSite2 in Url) and (not mainSite3 in Url) and (not mainSite4 in Url): Url=mainSite+Url deb('Url',Url); html=messupText(nolines(nURL(Url,headers={'Referer':mainSite},cookie_file=CookFile,load_cookie=True)),True,True); deb('length of html',str(len(html))); #debob(html); if len(html)==0: debob("No html found."); eod(); return ## ### ## if (mainSite+"/app/topbar.php?s=") in Url: s='<div\s+class="topbar_img">\s*<a\s+href="(\D+://(?:www.)?(?:/|vapers.tv/|breakers.tv/|vaughnlive.tv/|instagib.tv/)?)(.*?)"\s*>(())\s*<img\s+name="mvnPicTopBar_.*?"\s+width="\d*"\s+height="\d*" border="\d*"\s+onerror="mvnImages.profileError\(\'mvnPicTopBar_[0-9A-Za-z_\-]+\',\'[0-9A-Za-z_\-]+\'\);"\s+class="[0-9A-Za-z_\-]*"\s+alt="[0-9A-Za-z_\-]+(?: - \D+.)?"\s+title="[0-9A-Za-z_\-]+(?: - \D+.)?"\s*/>\s*</a>\s*</div'; #MeTHoD='split' elif (mainSite+"/browse/") in Url: s='<a href="((?:http://)?(?:/|vapers.tv/|breakers.tv/|vaughnlive.tv/|instagib.tv/)?)(.+?)" target="_top"><img src="//(thumbnails.vaughnsoft.com/(\d+)/fetch/\D+/.+?.png)" class"browseThumb" width="\d*" height="\d*"\s*/></a>'; else: return html=html.replace('</div>','</div\n\r\a>'); #debob(html); if (MeTHoD=='split') and ('</MVN>' in html): debob(['MeTHoD',MeTHoD,'"</MVN>" is in HTML.']); matches=html.split('</MVN>')[-1].split(',') elif (MeTHoD=='re.compile') or (not '</MVN>' in html): #MeTHoD=='re.compile': debob(['MeTHoD',MeTHoD,'"</MVN>" is not in HTML.']); try: matches=re.compile(s).findall(html); deb('# of matches found',str(len(matches))); #debob(matches); except: matches=[]; debob('No matches were found.'); else: matches=[]; debob('No matching method was found.'); ## ### ## if len(matches) > 0: iC=len(matches); if MeTHoD=='re.compile': if tfalse(addst('sort-by-name'))==True: matches=sorted(matches,key=lambda i: i[1],reverse=False) try: # os.remove(_thumbArtPath) if isPath(_thumbArtPath)==True: shutil.rmtree(_thumbArtPath) except: pass for (PrefixD,match,img,iTS) in matches: #(img,url,name,genres) labs={}; cMI=[]; is_folder=False; plot=''; name=match.replace('_',' '); labs[u'plot']=plot; LocImgName=''; img=getThumb(match,FetchLoc='live',TimeStamp=iTS) fimg=getBg(match) #debob({'img':img,'fimg':fimg}) if tfalse(addst('thumbnail-type'))==True: ThumbFile=match+'.png' DownloadThisSilently(img,ThumbFile,_thumbArtPath,useResolver=False) ThumbFileWithPath=thumbart(ThumbFile) if os.path.isfile(ThumbFileWithPath)==True: img=''+ThumbFileWithPath unCacheAnImage(img) #unCacheAnImage(img) if '://' in PrefixD:url=PrefixD+"%s"%match; urlPage=PrefixD+"%s" % match; urlEmbedVideo=PrefixD+"embed/video/%s"%match; urlEmbedChat=PrefixD+"embed/chat/%s"%match; else: url=mainSite+"/%s"%match; urlPage=mainSite+"/%s" % match; urlEmbedVideo=mainSite+"/embed/video/%s"%match; urlEmbedChat=mainSite+"/embed/chat/%s"%match; labs[u'title']=cFL(name,colorA); #labs[u'title']=cFL(name+cFL(" ["+cFL(liVe,colorC)+"]",colorB),colorA); pars={'url':url,'title':name,'fimg':fimg,'img':img,'mode':'PlayLiveStream','channel':match,'site':site,'section':section,'sourcetype':'auto'}; Clabs={'title':name,'year':'','url':url,'commonid':'','img':img,'fanart':fimg,'plot':labs[u'plot'],'todoparams':_addon.build_plugin_url(pars),'site':site,'section':section}; try: cMI=ContextMenu_LiveStreams(Clabs); except: pass try: debob(['pars',pars,'labs',labs]); except: pass cMI.append(('Visit Page', 'XBMC.RunPlugin(%s)'%_addon.build_plugin_url({'mode':'BrowseUrl','url':urlPage}))) cMI.append(('Visit Video','XBMC.RunPlugin(%s)'%_addon.build_plugin_url({'mode':'BrowseUrl','url':urlEmbedVideo}))) cMI.append(('Visit Chat', 'XBMC.RunPlugin(%s)'%_addon.build_plugin_url({'mode':'BrowseUrl','url':urlEmbedChat}))) try: _addon.add_directory(pars,labs,is_folder=is_folder,fanart=fimg,img=img,contextmenu_items=cMI,total_items=iC,context_replace=False) except: pass elif MeTHoD=='split': if tfalse(addst('sort-by-name'))==True: matches=matches.sort() #matches=sorted(matches,key=lambda i: i[1],reverse=False) for (match) in matches: #(img,url,name,genres) if len(match.strip()) > 0: PrefixD=''; img=''; iTS='' labs={}; cMI=[]; is_folder=False; plot=''; name=match.replace('_',' '); labs[u'plot']=plot; LocImgName=''; img=getThumb(match,FetchLoc='live',TimeStamp='0') fimg=getBg(match) if '://' in PrefixD:url=PrefixD+"%s" % match; urlPage=PrefixD+"%s" % match; urlEmbedVideo=PrefixD+"embed/video/%s" % match; urlEmbedChat=PrefixD+"embed/chat/%s" % match; else: url=mainSite+"/%s" % match; urlPage=mainSite+"/%s" % match; urlEmbedVideo=mainSite+"/embed/video/%s" % match; urlEmbedChat=mainSite+"/embed/chat/%s" % match; labs[u'title']=cFL(name,colorA); #labs[u'title']=cFL(name+cFL(" ["+cFL(liVe,colorC)+"]",colorB),colorA); pars={'url':url,'title':name,'fimg':fimg,'img':img,'mode':'PlayLiveStream','channel':match,'site':site,'section':section,'sourcetype':'auto'}; Clabs={'title':name,'year':'','url':url,'commonid':'','img':img,'fanart':fimg,'plot':labs[u'plot'],'todoparams':_addon.build_plugin_url(pars),'site':site,'section':section}; try: cMI=ContextMenu_LiveStreams(Clabs); except: pass try: debob(['pars',pars,'labs',labs]); except: pass cMI.append(('Visit Page', 'XBMC.RunPlugin(%s)'%_addon.build_plugin_url({'mode':'BrowseUrl','url':urlPage}))) cMI.append(('Visit Video','XBMC.RunPlugin(%s)'%_addon.build_plugin_url({'mode':'BrowseUrl','url':urlEmbedVideo}))) cMI.append(('Visit Chat', 'XBMC.RunPlugin(%s)'%_addon.build_plugin_url({'mode':'BrowseUrl','url':urlEmbedChat}))) try: _addon.add_directory(pars,labs,is_folder=is_folder,fanart=fimg,img=img,contextmenu_items=cMI,total_items=iC,context_replace=False) except: pass # ## ### ## # #if is_folder==False: # # sDB=[]; # # #'pageurl, title, streamtype, live, thumb, fanart, roomid, roomslug, sourcetype, streamurl, streamkey, # # #youtubeposition, youtubecurrentindex, youtubeduration, youtubeplaylistcount, youtubevideoid, youtubeuuid, # # #plot, timestampyear, timestampmonth, timestampday' # # #'"%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s"' # # if url.startswith('/'): url=mainSite2+url # # GroupB=( urllib.quote_plus(str(url)),urllib.quote_plus(str(name)),urllib.quote_plus(str(liVe)),urllib.quote_plus(str(img)),urllib.quote_plus(str(roomId)),urllib.quote_plus(str(roomSlug)),urllib.quote_plus(str(plot)),str(datetime.date.today().year),str(datetime.date.today().month),str(datetime.date.today().day) ) # # #sDB.append( 'INSERT OR REPLACE INTO channels ('+ps('db channels tags1a')+') VALUES ("%s","%s","%s","%s","%s","%s","%s","%s","%s","%s")' % GroupB ) # # sDB.append( 'INSERT INTO channels ('+ps('db channels tags1a')+') VALUES ("%s","%s","%s","%s","%s","%s","%s","%s","%s","%s")' % GroupB ) # # debob(sDB); # # do_database(sDB); # # #do_database_test(sDB); # ## ### ## #NextPage=str(int(page)+1); #if (("page="+NextPage) in html) and (not TyPE=='js|featured'): # _addon.add_directory({'mode':'ListShows','site':site,'url':Url,'page':NextPage,'type':str(TyPE),'idlist':str(ListOfIds),'csrfToken':csrfToken},{'title':cFL('>> Next %s' % cFL(NextPage,colorA),colorB)},is_folder=True,fanart=fanartSite,img=psgn('next')) set_view('tvshows',view_mode=addst('tvshows-view')); eod()
def Browse_Itemslist(url, page='', content='episodes', view='515'): data = 'http://www.inne.wbijam.pl/' html = nURL(data) html = html.encode('utf-8', '') if 'Polecane serie anime' in url: data1 = '<div class="pmenu_naglowek_blue">Polecane serie anime</div>' data2 = '<a href="http://www.inne.wb' link = '' mode = 'Browse_Episodeswijam' elif 'Lżejsze klimaty' in url: data1 = '<div class="pmenu_naglowek_red">Lżejsze klimaty</div>' data2 = '<div class="pmenu_naglowek_blue">Polecane serie anime</div>' link = 'http://www.inne.wbijam.pl/' mode = 'Browse_Episodeswijaminne' elif 'Akcja' in url: data1 = '<div class="pmenu_naglowek_red">Akcja</div>' data2 = '<div class="pmenu_naglowek_red">Lżejsze klimaty</div>' link = 'http://www.inne.wbijam.pl/' mode = 'Browse_Episodeswijaminne' data = GetDataBeetwenMarkers(html, data1, data2, False)[1] data = re.findall('<a href="(.+?)">(.+?)</a></li>', data) data.sort() ItemCount = len(data) if len(data) > 0: for item in data: strona = link + item[0] name = item[1].encode('utf-8', '') ### scrper if (tfalse(addst("wbij-thumbs")) == True): import scraper scrap = scraper.scraper_check(host, name) try: if (name not in scrap): html = nURL(strona) data = re.findall('<img src="grafika/(.+?)">', html) ItemCount = len(data) if len(data) > 0: for item in data: img = url + '/grafika/' + item else: img = '' plot = '' scraper.scraper_add(host, name, img, plot, '') scrap = scraper.scraper_check(host, name) except: scrap = '' try: img = scrap[1] except: img = '' try: plot = scrap[2] except: plot = '' else: img = '' plot = '' fanart = fanartAol labs = {} try: labs['plot'] = plot except: labs['plot'] = '' ## pars = {'mode': mode, 'site': site, 'section': section, 'title': name, 'url': strona, 'page': url, 'img': img, 'fanart': fanart} contextLabs = {'title': name, 'url': strona, 'img': img, 'fanart': fanart, 'todoparams': _addon.build_plugin_url(pars), 'site': site, 'section': section, 'plot': labs['plot']} contextMenuItems = ContextMenu_Series(contextLabs) labs['title'] = name _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount) set_view(content, view_mode=addst('tvshows-view')) eod()
def Browse_ItemAol(html, url, metamethod='', content='movies', view='515'): if (len(html) == 0): return html = html.replace('\'', '') html = html.replace('\n', '') html = html.replace('\r', '') html = html.replace('\t', '') html = GetDataBeetwenMarkers(html, 'zaczynaj', '<header>Kresk', False)[1] # print html.encode('ascii','ignore') data = re.findall( 'href="/kreskowka/(.+?)"><div(.+?)<b class="larger white">(.+?)</b>', html) ItemCount = len(data) for item in data: strona = mainSite + '/kreskowka/' + item[0] name = item[2].encode("utf-8") name = ParseDescription(name) ### scraper if (tfalse(addst("kresk-thumbs")) == True): import scraper scrap = scraper.scraper_check(host, name) try: if (name not in scrap): html = nURL(strona) html = GetDataBeetwenMarkers(html, '>Serie</a>', '<div class="info-basic">', False)[1] #print html.encode('ascii','ignore') data = re.findall("/upload/cats/(.+?).jpg", html) ItemCount = len(data) if len(data) > 0: for item in data: img = mainSite + '/upload/cats/' + item + '.jpg' else: img = '' plot = '' scraper.scraper_add(host, name, img, plot, '') scrap = scraper.scraper_check(host, name) except: scrap = '' try: img = scrap[1] except: img = '' try: plot = scrap[2] except: plot = '' else: img = '' plot = '' fanart = fanartAol labs = {} try: labs['plot'] = plot except: labs['plot'] = '' ### pars = { 'mode': 'Episodeskresk', 'site': site, 'section': section, 'title': name, 'url': strona, 'img': img, 'fanart': fanart } contextLabs = { 'title': name, 'url': strona, 'img': img, 'fanart': fanart, 'todoparams': _addon.build_plugin_url(pars), 'site': site, 'section': section, 'plot': labs['plot'] } if section == 'kreskoweczki': contextMenuItems = ContextMenu_Series(contextLabs) else: contextMenuItems = [] labs['title'] = name _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount) set_view(content, view_mode=addst('tvshows-view')) eod()
def ListShows(Url,Page='',TyPE='js',idList='[]', csrfToken=''): if len(csrfToken)==0: maipageHtml=nURL('https://streamup.com/',cookie_file=CookFile,load_cookie=False,save_cookie=True) tokenParam='content="(.*?)" name="csrf-token"' csrfToken=re.compile(tokenParam).findall(maipageHtml)[0] ## ### ## debob(['Url',Url,'TyPE',TyPE]) if len(idList)==0: idList='[]' if len(Page)==0: page=1 else: page=int(Page) deb('page',str(page)) if len(Url)==0: return if (not mainSite in Url) and (not mainSite2 in Url) and (not mainSite3 in Url) and (not mainSite4 in Url): #Url=mainSite+Url Url=mainSite2+Url deb('Url',Url); if (page==1) or (len(Page)==0): IdsList=[]; html=nURL(Url ,cookie_file=CookFile,load_cookie=False,save_cookie=True,headers={'X-CSRF-Token':csrfToken,'X-Requested-With':'XMLHttpRequest'}); else: ##Url=Url.replace('http://','https://'); IdsList=eval(idList); IdLa=0; iLISTd=''; IdsListZ=eval(idList); post_data={} post_data=[('already_loaded_rooms[]', i) for i in IdsList] print 'post_data',post_data for IdL in IdsList: #if IdLa==0: iLISTd+=' '; IdLa=1 #if IdLa==0: iLISTd+='&'; IdLa=1 if IdLa==0: iLISTd+=''; IdLa=1 else: iLISTd+='&' iLISTd+=""+"already_loaded_rooms[]="+IdL+"" # #iLISTd+=""+"already_loaded_rooms[]="+IdL+"&" # #iLISTd+=""+"already_loaded_rooms%5B%5D="+IdL+"&" ## ### ## UrlBB=Url +"?page="+str(page)+""#+str(iLISTd); debob(['UrlBB',UrlBB]); debob(['iLISTd',iLISTd]); debob(['idList',idList]); ## ### ## #html=nURL(UrlBB,method='post',form_data={'page':str(page),'already_loaded_rooms':str(idList)},headers={'Referer':Url},cookie_file=CookFile,load_cookie=True); html=nURL(UrlBB,method='post',form_data=post_data,headers={'Referer':Url,'X-CSRF-Token':csrfToken,'X-Requested-With':'XMLHttpRequest','Origin': 'https://streamup.com','Accept':'text/javascript, application/javascript, application/ecmascript, application/x-ecmascript, */*; q='},cookie_file=CookFile,load_cookie=True,save_cookie=True) #html=nURL(UrlBB,method='post',form_data={'page':str(page),'already_loaded_rooms':str(idList)},headers={'Referer':Url}); #html=nURL(UrlBB,method='post',form_data={'page':str(page),'already_loaded_rooms[]':str(iLISTd)},headers={'Referer':Url}); #html=nURL(UrlBB,method='post',form_data={'page':str(page),'already_loaded_rooms[]':str(idList)},headers={'Referer':Url}); #html=nURL(UrlBB,method='post',form_data={'page':str(page),'already_loaded_rooms':str(idList)},headers={'Referer':Url}); #html=nURL(UrlBB,method='post',form_data={'page':str(page),'already_loaded_rooms':idList},headers={'Referer':Url}); #html=nURL(UrlBB,method='post',form_data={'page':str(page),'already_loaded_rooms[]':str(idList)},headers={'Referer':Url}); ##html=nURL(UrlBB,method='post',form_data={'page':str(page),'already_loaded_rooms[]':str(idList),'already_loaded_rooms':str(idList)},headers={'Referer':Url}); #html=nURL(UrlBB,method='post',form_data={'page':str(page),'already_loaded_rooms[]':str(idList)},headers={'Referer':Url}); #html=nURL(UrlBB,method='post',form_data={'page':str(page),'already_loaded_rooms':str(idList)},headers={'Referer':Url}); #html=nURL(UrlBB,method='post',form_data={'page':str(page),'already_loaded_rooms':str(idList)},headers={'Referer':Url}); ## ### ## ##html=nURL(Url,method='post',form_data={'page':str(page)},headers={'Referer':Url}) ##html=nURL(Url+"?page="+str(page)+"",method='post',form_data={'page':str(page),'already_loaded_rooms[]':str(idList),'already_loaded_rooms':str(idList)},headers={'Referer':Url}) #html=nURL(Url+"?page="+str(page),method='post',form_data={'page':str(page),'already_loaded_rooms%5B%5D':str(idList)},headers={'Referer':Url}) #html=nURL(Url+"?page="+str(page),method='post',form_data={'already_loaded_rooms[]':str(idList)},headers={'Referer':Url}) #html=nURL(Url+"?page="+str(page),method='post',form_data=post_data:str(idList)},headers={'Referer':Url} #html=nURL(Url+"?page="+str(page),method='post',form_data={'page':str(page),'already_loaded_rooms':str(idList)},headers={'Referer':Url}) ##html=nURL(Url+"?page="+str(page)+""+str(iLISTd),method='post',form_data={'page':str(page),'already_loaded_rooms[]':str(idList),'already_loaded_rooms':str(idList)},headers={'Referer':Url}) ## ### ## html=messupText(nolines(html),True,True); deb('length of html',str(len(html))); #debob(html); if len(html)==0: eod(); return if "<title>Offline for Maintenance</title>" in html: debob("Offline for Maintenance"); eod(); return html=html.replace('\\n','').replace('\n','').replace('\r','').replace('\a','').replace('\t','').replace("\\'","'").replace('\\"','"').replace('\\/','/') if TyPE=='xml': s="<a class='js-already-loaded-channel' data-room-id='(.*?)' data-room-slug='(.*?)' href='(.+?)'>"+"<div class='.*?'>(?:<div class='(homeChannel(?:Live|Title|Text)?)'>(.*?)</div>)?"+"<div class='(homeChannel(?:Live|Title|Text)?)'>(.*?)</div><div class='(homeChannel(?:Live|Title|Text)?)'>(.*?)</div>"+'<img alt="(.*?)(?: (\d+))?" onerror=".*?" src="(.*?)" /></div></a'; elif TyPE=='js': s="<a class='js-already-loaded-channel' data-room-id='(.*?)' data-room-slug='(.*?)' href='(.+?)'>"+"<div class='.*?'>(?:<div class='(homeChannel(?:Live|Title|Text)?)'>(.*?)</div>)?"+"<div class='(homeChannel(?:Live|Title|Text)?)'>(.*?)</div><div class='(homeChannel(?:Live|Title|Text)?)'>(.*?)</div>"+'<img alt="(.*?)(?: (\d+))?" onerror=".*?" src="(.*?)" /></div></a'; elif TyPE=='js|featured': s="<div class='homeChannelsFeatured-\d+' data-roomSlug='(.*?)'><div id='(homeChannelsFeaturedTextLabel)'>(Featured)</div><h2 id='(homeChannelsFeaturedTextChannelName)'>"+'<a href="(.+?)">(.+?)</a'; elif TyPE=='html': s="<a class='js-already-loaded-channel' data-room-id='(.*?)' data-room-slug='(.*?)' href='(.+?)'>"+"<div class='.*?'>(?:<div class='(homeChannel(?:Live|Title|Text)?)'>(.*?)</div>)?"+"<div class='(homeChannel(?:Live|Title|Text)?)'>(.*?)</div><div class='(homeChannel(?:Live|Title|Text)?)'>(.*?)</div>"+'<img alt="(.*?)(?: (\d+))?" onerror=".*?" src="(.*?)" /></div></a'; #s="<a class='js-already-loaded-channel' data-room-id='(.*?)' data-room-slug='(.*?)' href='(.+?)'>"+"<div class='.*?'><div class='homeChannelText'>(.*?)</div>"+"(?:<div class='homeChannelLive'>(.*?)</div>)?"+"<div class='homeChannelTitle'>(.*?)</div>"+'<img alt="(.*?)(?: (\d+))?" onerror=".*?" src="(.*?)" /></div></a'; if 'users.js' in Url: TyPE='html|user'; #s="<a href='(.+?)'><div class='homePerson(?: homePerson\d+)?'><div class='homePersonText'>(.*?)</div><div class='homePersonUsername'>(.*?)</div>"+'<img alt="(.*?)" class="js-user-avatar-image" onerror=".*?" src="(.*?)" /></div></a'; s="<a href='(.+?)'><div class='homePerson(?: homePerson\d+)?'><div class='(homePerson(?:Username|Text)?)'>(.*?)</div><div class='(homePerson(?:Username|Text)?)'>(.*?)</div>"+'<img alt="(.*?)" class="js-user-avatar-image" onerror=".*?" src="(.*?)" /></div></a'; elif TyPE=='html|user': s="<a href='(.+?)'><div class='homePerson(?: homePerson\d+)?'><div class='(homePerson(?:Username|Text)?)'>(.*?)</div><div class='(homePerson(?:Username|Text)?)'>(.*?)</div>"+'<img alt="(.*?)" class="js-user-avatar-image" onerror=".*?" src="(.*?)" /></div></a'; else: return html=html.replace('</a>','</a\n>'); ListOfIds=[]; ListOfIds=IdsList debob(html); try: matches=re.compile(s).findall(html); deb('# of matches found',str(len(matches))); debob(matches) except: matches=''; if len(matches) > 0: #for (url,img,name,genres,status,NoEps,year) in matches: iC=len(matches); for match in matches: #(img,url,name,genres) labs={}; cMI=[]; genres=''; Genres2=""; is_folder=False; if TyPE=='html|user': #(url,name,img,liVe,plot,roomId,roomSlug)=(match[0],match[2],match[4],'',match[1],'',''); is_folder=True; na=1; nnn=[[na,na+1],[na+2,na+3]]; (name,liVe,plot)=('','',''); is_folder=True; (roomId,roomSlug)=('',''); (url,img)=(match[0],match[6].replace('https://','http://')); for (nb,nc) in nnn: if (len(name)==0) and (match[nb]=='homePersonUsername') and (len(match[nc]) > 0): name=match[nc] if (len(plot)==0) and (match[nb]=='homePersonText') and (len(match[nc]) > 0): plot=match[nc] #debob(['url',url,'name',name,'img',img,'plot',plot]) elif TyPE=='js|featured': #s="<div class='homeChannelsFeatured-\d+' data-roomSlug='(.*?)'><div id='(homeChannelsFeaturedTextLabel)'>(Featured)</div><h2 id='(homeChannelsFeaturedTextChannelName)'>"+'<a href="(.+?)">(.+?)</a></h2>'; na=1; nnn=[[na,na+1],[na+2,na+3]]; (name,liVe,plot)=(match[5],match[2],''); is_folder=True; (roomId,roomSlug)=('',match[0]); (url,img)=(match[4],artp('default_channel')); #for (nb,nc) in nnn: # if (len(name)==0) and (match[nb]=='homePersonUsername') and (len(match[nc]) > 0): name=match[nc] # if (len(plot)==0) and (match[nb]=='homePersonText') and (len(match[nc]) > 0): plot=match[nc] #debob(['url',url,'name',name,'img',img,'plot',plot]) else: #(url,name,img,liVe,plot,roomId,roomSlug)=(match[2],match[5],match[8],match[4],match[3],match[0],match[1]); na=3; nnn=[[na,na+1],[na+2,na+3],[na+4,na+5]]; (name,liVe,plot)=('','',''); (roomId,roomSlug,url,img)=(match[0],match[1],match[2],match[11].replace('https://','http://')); for (nb,nc) in nnn: if (len(name)==0) and (match[nb]=='homeChannelTitle') and (len(match[nc]) > 0): name=match[nc] if (len(liVe)==0) and (match[nb]=='homeChannelLive') and (len(match[nc]) > 0): liVe=match[nc] if (len(plot)==0) and (match[nb]=='homeChannelText') and (len(match[nc]) > 0): plot=match[nc] #debob(['url',url,'name',name,'img',img,'liVe',liVe,'plot',plot]) #(homeChannel(?:Live|Title|Text)?) #debob(['url',url,'name',name,'img',img]) #(url,name)=(match[2],match[1]) ListOfIds.append(roomId); #IdsList.append(roomId); #name=name.replace('<span class="epnum">',' (').replace('</span>',')') #img=iconSite; #img=FixImage(img); #img=img.replace(' ','%20'); fimg=fanartSite; deb('img',img); imgHtml=nURL(img,method='get',headers={'Referer':Url}) #debob(['imgHtml',imgHtml]) if len(imgHtml)==0: img=artp('default_channel') labs[u'plot']=plot; if len(liVe) > 0: labs[u'title']=cFL(name+cFL(" ["+cFL(liVe,colorC)+"]",colorB),colorA); else: labs[u'title']=cFL(name,colorA); #plot+=CR+"Genres: [COLOR purple]"+Genres2+"[/COLOR]"; #plot+="[CR]Year: [COLOR purple]"+year+"[/COLOR]"; #plot+="[CR]Status: [COLOR purple]"+status+"[/COLOR]"; #plot+="[CR]Number of Episodes: [COLOR purple]"+NoEps+"[/COLOR]"; pars={'roomid':roomId,'roomslug':roomSlug,'url':url,'title':name,'fimg':fimg,'type':TyPE,'live':liVe,'imdb_id':'','img':img,'mode':'PlayStreamUP','site':site,'section':section}; if TyPE=='html|user': pars['mode']='ListShows'; pars['page']=''; pars['type']='html'; Clabs={'title':name,'year':'','url':url,'commonid':'','img':img,'fanart':fimg,'plot':labs[u'plot'],'todoparams':_addon.build_plugin_url(pars),'site':site,'section':section}; try: cMI=ContextMenu_Series(Clabs); except: pass debob(['pars',pars,'labs',labs]); try: _addon.add_directory(pars,labs,is_folder=is_folder,fanart=fimg,img=img,contextmenu_items=cMI,total_items=iC,context_replace=False) except: pass NextPage=str(int(page)+1); if (("page="+NextPage) in html) and (not TyPE=='js|featured'): _addon.add_directory({'mode':'ListShows','site':site,'url':Url,'page':NextPage,'type':str(TyPE),'idlist':str(ListOfIds),'csrfToken':csrfToken},{'title':cFL('>> Next %s' % cFL(NextPage,colorA),colorB)},is_folder=True,fanart=fanartSite,img=psgn('next')) set_view('tvshows',view_mode=addst('tvshows-view')); eod()
def MenuListChannels(Url,Page='',TyPE='js',idList='[]',csrfToken='',MeTHoD='re.compile'): debob(['Url',Url,'TyPE',TyPE]) if len(Url)==0: debob("No url found."); eod(); return if (not mainSite in Url) and (not mainSite2 in Url) and (not mainSite3 in Url) and (not mainSite4 in Url): Url=mainSite+Url deb('Url',Url); html=messupText(nolines(nURL(Url,headers={'Referer':mainSite},cookie_file=CookFile,load_cookie=True)),True,True); deb('length of html',str(len(html))); #debob(html); if len(html)==0: debob("No html found."); eod(); return ## ### ## if (mainSite+"/app/topbar.php?s=") in Url: s='<div\s+class="topbar_img">\s*<a\s+href="(\D+://(?:www.)?(?:/|vapers.tv/|breakers.tv/|vaughnlive.tv/|instagib.tv/)?)(.*?)"\s*>(())\s*<img\s+name="mvnPicTopBar_.*?"\s+width="\d*"\s+height="\d*" border="\d*"\s+onerror="mvnImages.profileError\(\'mvnPicTopBar_[0-9A-Za-z_\-]+\',\'[0-9A-Za-z_\-]+\'\);"\s+class="[0-9A-Za-z_\-]*"\s+alt="[0-9A-Za-z_\-]+(?: - \D+.)?"\s+title="[0-9A-Za-z_\-]+(?: - \D+.)?"\s*/>\s*</a>\s*</div'; #MeTHoD='split' elif (mainSite+"/browse/") in Url: s='<a href="((?:http://)?(?:/|vapers.tv/|breakers.tv/|vaughnlive.tv/|instagib.tv/)?)(.+?)" target="_top"><img src="//(thumbnails.vaughnsoft.com/(\d+)/fetch/\D+/.+?.png)" class"browseThumb" width="\d*" height="\d*"\s*/></a>'; else: return html=html.replace('</div>','</div\n\r\a>'); #debob(html); if (MeTHoD=='split') and ('</MVN>' in html): debob(['MeTHoD',MeTHoD,'"</MVN>" is in HTML.']); matches=html.split('</MVN>')[-1].split(',') elif (MeTHoD=='re.compile') or (not '</MVN>' in html): #MeTHoD=='re.compile': debob(['MeTHoD',MeTHoD,'"</MVN>" is not in HTML.']); try: matches=re.compile(s).findall(html); deb('# of matches found',str(len(matches))); #debob(matches); except: matches=[]; debob('No matches were found.'); else: matches=[]; debob('No matching method was found.'); ## ### ## if len(matches) > 0: iC=len(matches); if MeTHoD=='re.compile': if tfalse(addst('sort-by-name'))==True: matches=sorted(matches,key=lambda i: i[1],reverse=False) for (PrefixD,match,img,iTS) in matches: #(img,url,name,genres) labs={}; cMI=[]; is_folder=False; plot=''; name=match.replace('_',' '); labs[u'plot']=plot; LocImgName=''; img=getThumb(match,FetchLoc='live',TimeStamp=iTS) fimg=getBg(match) if '://' in PrefixD:url=PrefixD+"%s"%match; urlPage=PrefixD+"%s" % match; urlEmbedVideo=PrefixD+"embed/video/%s"%match; urlEmbedChat=PrefixD+"embed/chat/%s"%match; else: url=mainSite+"/%s"%match; urlPage=mainSite+"/%s" % match; urlEmbedVideo=mainSite+"/embed/video/%s"%match; urlEmbedChat=mainSite+"/embed/chat/%s"%match; labs[u'title']=cFL(name,colorA); #labs[u'title']=cFL(name+cFL(" ["+cFL(liVe,colorC)+"]",colorB),colorA); pars={'url':url,'title':name,'fimg':fimg,'img':img,'mode':'PlayLiveStream','channel':match,'site':site,'section':section,'sourcetype':'auto'}; Clabs={'title':name,'year':'','url':url,'commonid':'','img':img,'fanart':fimg,'plot':labs[u'plot'],'todoparams':_addon.build_plugin_url(pars),'site':site,'section':section}; try: cMI=ContextMenu_LiveStreams(Clabs); except: pass try: debob(['pars',pars,'labs',labs]); except: pass cMI.append(('Visit Page', 'XBMC.RunPlugin(%s)'%_addon.build_plugin_url({'mode':'BrowseUrl','url':urlPage}))) cMI.append(('Visit Video','XBMC.RunPlugin(%s)'%_addon.build_plugin_url({'mode':'BrowseUrl','url':urlEmbedVideo}))) cMI.append(('Visit Chat', 'XBMC.RunPlugin(%s)'%_addon.build_plugin_url({'mode':'BrowseUrl','url':urlEmbedChat}))) try: _addon.add_directory(pars,labs,is_folder=is_folder,fanart=fimg,img=img,contextmenu_items=cMI,total_items=iC,context_replace=False) except: pass elif MeTHoD=='split': if tfalse(addst('sort-by-name'))==True: matches=matches.sort() #matches=sorted(matches,key=lambda i: i[1],reverse=False) for (match) in matches: #(img,url,name,genres) if len(match.strip()) > 0: PrefixD=''; img=''; iTS='' labs={}; cMI=[]; is_folder=False; plot=''; name=match.replace('_',' '); labs[u'plot']=plot; LocImgName=''; img=getThumb(match,FetchLoc='live',TimeStamp='0') fimg=getBg(match) if '://' in PrefixD:url=PrefixD+"%s" % match; urlPage=PrefixD+"%s" % match; urlEmbedVideo=PrefixD+"embed/video/%s" % match; urlEmbedChat=PrefixD+"embed/chat/%s" % match; else: url=mainSite+"/%s" % match; urlPage=mainSite+"/%s" % match; urlEmbedVideo=mainSite+"/embed/video/%s" % match; urlEmbedChat=mainSite+"/embed/chat/%s" % match; labs[u'title']=cFL(name,colorA); #labs[u'title']=cFL(name+cFL(" ["+cFL(liVe,colorC)+"]",colorB),colorA); pars={'url':url,'title':name,'fimg':fimg,'img':img,'mode':'PlayLiveStream','channel':match,'site':site,'section':section,'sourcetype':'auto'}; Clabs={'title':name,'year':'','url':url,'commonid':'','img':img,'fanart':fimg,'plot':labs[u'plot'],'todoparams':_addon.build_plugin_url(pars),'site':site,'section':section}; try: cMI=ContextMenu_LiveStreams(Clabs); except: pass try: debob(['pars',pars,'labs',labs]); except: pass cMI.append(('Visit Page', 'XBMC.RunPlugin(%s)'%_addon.build_plugin_url({'mode':'BrowseUrl','url':urlPage}))) cMI.append(('Visit Video','XBMC.RunPlugin(%s)'%_addon.build_plugin_url({'mode':'BrowseUrl','url':urlEmbedVideo}))) cMI.append(('Visit Chat', 'XBMC.RunPlugin(%s)'%_addon.build_plugin_url({'mode':'BrowseUrl','url':urlEmbedChat}))) try: _addon.add_directory(pars,labs,is_folder=is_folder,fanart=fimg,img=img,contextmenu_items=cMI,total_items=iC,context_replace=False) except: pass # ## ### ## # #if is_folder==False: # # sDB=[]; # # #'pageurl, title, streamtype, live, thumb, fanart, roomid, roomslug, sourcetype, streamurl, streamkey, # # #youtubeposition, youtubecurrentindex, youtubeduration, youtubeplaylistcount, youtubevideoid, youtubeuuid, # # #plot, timestampyear, timestampmonth, timestampday' # # #'"%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s"' # # if url.startswith('/'): url=mainSite2+url # # GroupB=( urllib.quote_plus(str(url)),urllib.quote_plus(str(name)),urllib.quote_plus(str(liVe)),urllib.quote_plus(str(img)),urllib.quote_plus(str(roomId)),urllib.quote_plus(str(roomSlug)),urllib.quote_plus(str(plot)),str(datetime.date.today().year),str(datetime.date.today().month),str(datetime.date.today().day) ) # # #sDB.append( 'INSERT OR REPLACE INTO channels ('+ps('db channels tags1a')+') VALUES ("%s","%s","%s","%s","%s","%s","%s","%s","%s","%s")' % GroupB ) # # sDB.append( 'INSERT INTO channels ('+ps('db channels tags1a')+') VALUES ("%s","%s","%s","%s","%s","%s","%s","%s","%s","%s")' % GroupB ) # # debob(sDB); # # do_database(sDB); # # #do_database_test(sDB); # ## ### ## #NextPage=str(int(page)+1); #if (("page="+NextPage) in html) and (not TyPE=='js|featured'): # _addon.add_directory({'mode':'ListShows','site':site,'url':Url,'page':NextPage,'type':str(TyPE),'idlist':str(ListOfIds),'csrfToken':csrfToken},{'title':cFL('>> Next %s' % cFL(NextPage,colorA),colorB)},is_folder=True,fanart=fanartSite,img=psgn('next')) set_view('tvshows',view_mode=addst('tvshows-view')); eod()
def Browse_ItemAnimecentrum(html, url, metamethod='', content='movies', view='515'): if (len(html) == 0): return data = re.findall('tb-cell"><a href="(.+?)"><img src="(.+?)" alt="(.+?)"><\/a>', html) ItemCount = len(data) for item in data: strona = mainSite + item[0] strona = strona + '?page=1' name = item[2].encode("utf-8") name = ParseDescription(name) ### scraper if (tfalse(addst("acentr-thumbs")) == True): import scraper scrap = scraper.scraper_check(host, name) try: if (name not in scrap): html = nURL(strona) html = GetDataBeetwenMarkers(html, '<article class="content-1">', '<section class="gap-2">', False)[1] data = re.findall('<img src="(.+?)" alt=', html) ItemCount = len(data) if len(data) > 0: for item in data: img = item+ "|Referer=http://anime-centrum.pl/" else: img = '' data = re.findall('<p>(.+?)</p>', html) ItemCount = len(data) if len(data) > 0: for item in data: plot = item plot = ParseDescription(item) else: plot = '' scraper.scraper_add(host, name, img, plot, '') scrap = scraper.scraper_check(host, name) except: scrap = '' try: img = scrap[1] except: img = '' try: plot = scrap[2] except: plot = '' else: img = '' plot = '' fanart = fanartAol labs = {} try: labs['plot'] = plot except: labs['plot'] = '' ### pars = {'mode': 'EpisodesAnimecentrum', 'site': site, 'section': section, 'title': name, 'url': strona, 'img': img, 'fanart': fanart} contextLabs = {'title': name, 'url': strona, 'img': img, 'fanart': fanart, 'todoparams': _addon.build_plugin_url(pars), 'site': site, 'section': section, 'plot': labs['plot']} if section == 'animecentrum': contextMenuItems = ContextMenu_Series(contextLabs) else: contextMenuItems = [] labs['title'] = name _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount) set_view(content, view_mode=addst('tvshows-view'))
def Browse_ItemsDramaQueen_drama(html, metamethod='', content='tvshows', view='515'): if (len(html) == 0): return idx = html.find("<div id='main' data-scroll-offset='88'>") if idx == -1: return idx2 = html.find('<span class="seperator extralight-border">', idx) if idx2 == -1: return html = html[idx:idx2] html = html.replace("\"", "\'") r = re.compile("uploads(.+?)'(.+?)href='(.+?)' title='(.+?)'>").findall( html) ItemCount = len(r) if len(r) > 0: for img, xxx, _url, _name in r: strona = _url _name2 = _name html2 = nURL(strona) image = re.compile( "<img src='http://www.dramaqueen.pl/wp-content/uploads/(.+?)' width" ).findall(html2) ItemCount = len(image) if len(image) > 0: for foto in image: fanart = "http://www.dramaqueen.pl/wp-content/uploads/" + foto else: fanart = "" img = "http://www.dramaqueen.pl/wp-content/uploads/" + img #szukanie opisu idx = html2.find( '<footer class="entry-footer"></footer></article>') if idx == -1: return idx2 = html2.find('</div> </div>', idx) if idx2 == -1: return desc = html2[idx:idx2] desc = desc.replace("\n", "") opis = re.compile( 'itemprop="text" ><p><em>(.+?)</em></p>').findall(desc) ItemCount = len(opis) if len(opis) > 0: for desc in opis: plot = clean_html(desc) labs = {} try: labs['plot'] = plot except: labs['plot'] = '' pars = { 'mode': 'EpisodesDramaQueen_drama', 'site': site, 'section': section, 'title': _name2, 'url': strona, 'img': img, 'fanart': fanart } contextLabs = { 'title': _name2, 'url': strona, 'img': img, 'fanart': fanart, 'todoparams': _addon.build_plugin_url(pars), 'site': site, 'section': section, 'plot': labs['plot'] } if section == 'movie': contextMenuItems = ContextMenu_Movies(contextLabs) elif section == 'Dramadrama': contextMenuItems = ContextMenu_Series(contextLabs) else: contextMenuItems = [] labs['title'] = _name2 _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount) set_view(content, view_mode=addst('tvshows-view'))
def Browse_ItemShniden(html, metamethod='', content='tvshows', view='515'): if (len(html) == 0): return idx = html.find('<dl class="sub-nav">') if idx == -1: return idx2 = html.find('</body>', idx) if idx2 == -1: return html = html[idx:idx2] r = re.compile('<a href="(.+?.html)">(.+?) </a>').findall(html) ItemCount = len(r) if len(r) > 0: for _url, _tytul in r: _name2 = html_entity_decode(_tytul) strona = _url ### scraper meta = metaget.get_meta('tvshow', _name2) fanart = str(meta['backdrop_url']).replace('u', '') img = str(meta['cover_url']).replace('u', '') plot = meta['plot'] labs = {} try: labs['plot'] = plot except: labs['plot'] = '' ### pars = { 'mode': 'EpisodesShniden', 'site': site, 'section': section, 'title': _name2, 'url': strona, 'img': img, 'fanart': fanart } contextLabs = { 'title': _name2, 'url': strona, 'img': img, 'fanart': fanart, 'todoparams': _addon.build_plugin_url(pars), 'site': site, 'section': section, 'plot': labs['plot'] } if section == 'movie': contextMenuItems = ContextMenu_Movies(contextLabs) elif section == 'shnidenodc': contextMenuItems = ContextMenu_Series(contextLabs) else: contextMenuItems = [] labs['title'] = _name2 _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount) set_view(content, view_mode=addst('tvshows-view'))
def Browse_ItemAol(html, url, metamethod='', content='movies', view='515'): if (len(html) == 0): return html = GetDataBeetwenMarkers(html, 'Anime na liter', '<ul class="pagination">', False)[1] data = re.findall('<a href="(.+?)">(.+?)</a>', html) ItemCount = len(data) for item in data: strona = mainSite7 + item[0] name = item[1].encode("utf-8") name = ParseDescription(name) ### scraper if (tfalse(addst("zone-thumbs")) == True): import scraper scrap = scraper.scraper_check(host, name) try: if (name not in scrap): html = nURL(strona) html = GetDataBeetwenMarkers(html, 'og:image', '<h5>Odcinki</h5>', False)[1] html = html.replace('\'', '') html = html.replace('\n', '') html = html.replace('\r', '') html = html.replace('\t', '') print html.encode('ascii', 'ignore') data = re.findall('content="(.+?)"></head>', html) ItemCount = len(data) if len(data) > 0: for item in data: img = item else: img = '' data = re.findall('summary">(.+?)<div class', html) ItemCount = len(data) if len(data) > 0: for item in data: plot = ParseDescription(item) else: plot = '' scraper.scraper_add(host, name, img, plot, '') scrap = scraper.scraper_check(host, name) except: scrap = '' try: img = scrap[1] except: img = '' try: plot = scrap[2] except: plot = '' else: img = '' plot = '' fanart = fanartAol labs = {} try: labs['plot'] = plot except: labs['plot'] = '' ### pars = { 'mode': 'Episodeszone', 'site': site, 'section': section, 'title': name, 'url': strona, 'img': img, 'fanart': fanart } contextLabs = { 'title': name, 'url': strona, 'img': img, 'fanart': fanart, 'todoparams': _addon.build_plugin_url(pars), 'site': site, 'section': section, 'plot': labs['plot'] } if section == 'animezone': contextMenuItems = ContextMenu_Series(contextLabs) else: contextMenuItems = [] labs['title'] = name _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount) # next page npage = url[:-1] + str(int(url[-1:]) + 1) # if -1 != html.find("do strony "): _addon.add_directory( { 'mode': 'Pagezone', 'site': site, 'section': section, 'url': npage, 'page': npage }, {'title': "Next page"}, is_folder=True, fanart=fanartAol, img=nexticon) set_view(content, view_mode=addst('tvshows-view'))