def mode_subcheck(mode='',site='',section='',url=''): #if (mode=='SectionMenu'): SectionMenu() #(site) deb('mode',mode); if (mode=='SectionMenu'): SectionMenu() elif (mode=='') or (mode=='main') or (mode=='MainMenu'): SectionMenu() elif (mode=='SubMenu'): SubMenu() elif (mode=='VidPlay'): VidPlay(addpr('title',''),url,thumbnail) elif (mode=='Hosts'): Browse_Hosts(url,addpr('title',''),thumbnail) # elif (mode=='BrowseLatestEpisodes'): Browse_LatestEpisodes(addpr('title','')) elif (mode=='BrowseOngoing'): Browse_Ongoing() elif (mode=='Listings'): Browse_List(url) # elif (mode=='Page'): Browse_Page(url=url,page=page,metamethod=addpr('metamethod','')) #(site,section) elif (mode=='Episodes'): Browse_Episodes(url,page) elif (mode=='AZ'): Browse_AZ() elif (mode=='Genres'): Browse_Genres() elif (mode=='PlayFromHost'): PlayFromHost(url) elif (mode=='Search'): Search_Site(title=addpr('title',''),url=url,page=page,metamethod=addpr('metamethod','')) #(site,section) ### \/ Testing \/ #elif (mode=='SearchLast'): # Search_Site(title=addst('LastSearchTitle'+SiteTag),url=url,page=page,metamethod=addpr('metamethod',''),endit=tfalse(addpr('endit','true'))) #(site,section) # Search_Site(title=addst('LastSearchTitle'+SiteTag),url=url,page=page,metamethod=addpr('metamethod',''),endit=True) #(site,section) elif (mode=='SearchLast'): Search_Site(title=addst('LastSearchTitle'+SiteTag),url=url,page=page,metamethod=addpr('metamethod',''),endit=tfalse(addpr('endit','true'))) #(site,section) elif (mode=='About'): About() elif (mode=='FavoritesList'): Fav_List(site=site,section=section,subfav=addpr('subfav','')) #else: myNote(header='Site: "'+site+'"',msg=mode+' (mode) not found.'); import mMain ## elif (mode=='PlayURL'): PlayURL(url) elif (mode=='PlayURLs'): PlayURLs(url) elif (mode=='PlayURLstrm'): PlayURLstrm(url) elif (mode=='PlayFromHost'): PlayFromHost(url) elif (mode=='PlayVideo'): PlayVideo(url) elif (mode=='PlayItCustom'): PlayItCustom(url,addpr('streamurl',''),addpr('img',''),addpr('title','')) elif (mode=='PlayItCustomL2A'): PlayItCustomL2A(url,addpr('streamurl',''),addpr('img',''),addpr('title','')) elif (mode=='Settings'): _addon.addon.openSettings() # Another method: _plugin.openSettings() ## Settings for this addon. elif (mode=='ResolverSettings'): import urlresolver; urlresolver.display_settings() ## Settings for UrlResolver script.module. elif (mode=='ResolverUpdateHostFiles'): import urlresolver; urlresolver.display_settings() ## Settings for UrlResolver script.module. elif (mode=='TextBoxFile'): TextBox2().load_file(url,addpr('title','')); #eod() elif (mode=='TextBoxUrl'): TextBox2().load_url(url,addpr('title','')); #eod() elif (mode=='Download'): try: _addon.resolve_url(url) except: pass debob([url,addpr('destfile',''),addpr('destpath',''),str(tfalse(addpr('useResolver','true')))]) DownloadThis(url,addpr('destfile',''),addpr('destpath',''),tfalse(addpr('useResolver','true'))) elif (mode=='toJDownloader'): SendTo_JDownloader(url,tfalse(addpr('useResolver','true'))) elif (mode=='cFavoritesEmpty'): fav__COMMON__empty( site=site,section=section,subfav=addpr('subfav','') ); xbmc.executebuiltin("XBMC.Container.Refresh"); elif (mode=='cFavoritesRemove'): fav__COMMON__remove( site=site,section=section,subfav=addpr('subfav',''),name=addpr('title',''),year=addpr('year','') ) elif (mode=='cFavoritesAdd'): fav__COMMON__add( site=site,section=section,subfav=addpr('subfav',''),name=addpr('title',''),year=addpr('year',''),img=addpr('img',''),fanart=addpr('fanart',''),plot=addpr('plot',''),commonID=addpr('commonID',''),commonID2=addpr('commonID2',''),ToDoParams=addpr('todoparams',''),Country=addpr('country',''),Genres=addpr('genres',''),Url=url ) #,=addpr('',''),=addpr('','') elif (mode=='AddVisit'): try: visited_add(addpr('title')); RefreshList(); except: pass elif (mode=='RemoveVisit'): try: visited_remove(addpr('title')); RefreshList(); except: pass elif (mode=='EmptyVisit'): try: visited_empty(); RefreshList(); except: pass elif (mode=='refresh_meta'): refresh_meta(addpr('video_type',''),TagAnimeName(addpr('title','')),addpr('imdb_id',''),addpr('alt_id',''),addpr('year','')) else: myNote(header='Site: "'+site+'"',msg=mode+' (mode) not found.'); import mMain
def check_mode(mode='',site='',section='',url=''): deb('Mode',mode); deb('param >> site',site); deb('param >> section',section); deb('param >> url',url); deb('param >> title',addpr('title','')) if (mode=='') or (mode=='main') or (mode=='MainMenu'): if (site==''): import mMain; return if (mode=='PlayURL'): PlayURL(url) elif (mode=='PlayURLs'): PlayURLs(url) elif (mode=='PlayURLstrm'): PlayURLstrm(url) elif (mode=='PlayFromHost'): PlayFromHost(url) elif (mode=='PlayVideo'): PlayVideo(url) elif (mode=='PlayItCustom'): PlayItCustom(url,addpr('streamurl',''),addpr('img',''),addpr('title','')) elif (mode=='PlayItCustomL2A'): PlayItCustomL2A(url,addpr('streamurl',''),addpr('img',''),addpr('title','')) elif (mode=='Settings'): _addon.addon.openSettings() # Another method: _plugin.openSettings() ## Settings for this addon. elif (mode=='ResolverSettings'): import urlresolver; urlresolver.display_settings() ## Settings for UrlResolver script.module. elif (mode=='ResolverUpdateHostFiles'): import urlresolver; urlresolver.display_settings() ## Settings for UrlResolver script.module. elif (mode=='TextBoxFile'): TextBox2().load_file(url,addpr('title','')); #eod() elif (mode=='TextBoxUrl'): TextBox2().load_url(url,addpr('title','')); #eod() elif (mode=='Download'): try: _addon.resolve_url(url) except: pass debob([url,addpr('destfile',''),addpr('destpath',''),str(tfalse(addpr('useResolver','true')))]) DownloadThis(url,addpr('destfile',''),addpr('destpath',''),tfalse(addpr('useResolver','true'))) elif (mode=='toJDownloader'): SendTo_JDownloader(url,tfalse(addpr('useResolver','true'))) elif (mode=='cFavoritesEmpty'): fav__COMMON__empty( site=site,section=section,subfav=addpr('subfav','') ) elif (mode=='cFavoritesRemove'): fav__COMMON__remove( site=site,section=section,subfav=addpr('subfav',''),name=addpr('title',''),year=addpr('year','') ) elif (mode=='cFavoritesAdd'): fav__COMMON__add( site=site,section=section,subfav=addpr('subfav',''),name=addpr('title',''),year=addpr('year',''),img=addpr('img',''),fanart=addpr('fanart',''),plot=addpr('plot',''),commonID=addpr('commonid',''),commonID2=addpr('commonid2',''),ToDoParams=addpr('todoparams',''),Country=addpr('country',''),Genres=addpr('genres',''),Url=url ) #,=addpr('',''),=addpr('','') else: ### Handle importing .py file for the right site. siteL=site.lower() debob(xbmc.translatePath(os.path.join(_addonPath,site+'.py'))) if (siteL=='others'): import mOthers elif (siteL=='misc'): import mMisc elif (siteL=='mMovies_and_TV'): import mMovies_and_TV elif (siteL=='mAnime'): import mAnime elif (siteL=='mLiveStreams'): import mLiveStreams elif (siteL=='resolverplugins'): import mResolverPlugins # elif isFile(xbmc.translatePath(os.path.join(_addonPath,site+'.py')))==True: __import__(site) else: myNote(header='Mode: "'+mode+'"',msg=site+' (site) not found.'); import mMain; return
def Browse_ItemAnimecentrum(html, url, metamethod='', content='movies', view='515'): if (len(html) == 0): return data = re.findall('tb-cell"><a href="(.+?)"><img src="(.+?)" alt="(.+?)"><\/a>', html) ItemCount = len(data) for item in data: strona = mainSite + item[0] strona = strona + '?page=1' name = item[2].encode("utf-8") name = ParseDescription(name) ### scraper if (tfalse(addst("acentr-thumbs")) == True): import scraper scrap = scraper.scraper_check(host, name) try: if (name not in scrap): html = nURL(strona) html = GetDataBeetwenMarkers(html, '<article class="content-1">', '<section class="gap-2">', False)[1] data = re.findall('<img src="(.+?)" alt=', html) ItemCount = len(data) if len(data) > 0: for item in data: img = item+ "|Referer=http://anime-centrum.pl/" else: img = '' data = re.findall('<p>(.+?)</p>', html) ItemCount = len(data) if len(data) > 0: for item in data: plot = item plot = ParseDescription(item) else: plot = '' scraper.scraper_add(host, name, img, plot, '') scrap = scraper.scraper_check(host, name) except: scrap = '' try: img = scrap[1] except: img = '' try: plot = scrap[2] except: plot = '' else: img = '' plot = '' fanart = fanartAol labs = {} try: labs['plot'] = plot except: labs['plot'] = '' ### pars = {'mode': 'EpisodesAnimecentrum', 'site': site, 'section': section, 'title': name, 'url': strona, 'img': img, 'fanart': fanart} contextLabs = {'title': name, 'url': strona, 'img': img, 'fanart': fanart, 'todoparams': _addon.build_plugin_url(pars), 'site': site, 'section': section, 'plot': labs['plot']} if section == 'animecentrum': contextMenuItems = ContextMenu_Series(contextLabs) else: contextMenuItems = [] labs['title'] = name _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount) set_view(content, view_mode=addst('tvshows-view'))
def Browse_ItemSenpai(html, page, metamethod='', content='movies', view='515'): if (len(html) == 0): return html = GetDataBeetwenMarkers(html, '<h4>Lista anime</h4>', '<footer class="page-footer indigo">', False)[1] data = re.findall( 'href="\/anime\/' + page + '(.+?)">\n(\s+)<img src="/Resources/anime/covers/(.+?)"', html) ItemCount = len(data) for item in data: strona = mainSite + page + item[0] name = urllib2.unquote(page + item[0].encode("utf-8")) name = ParseDescription(name) ### scraper if (tfalse(addst("senpai-thumbs")) == True): import scraper scrap = scraper.scraper_check(host, name) try: if (name not in scrap): img = 'http://senpai.com.pl/Resources/anime/covers/' + urllib2.quote( item[2]) plot = '' scraper.scraper_add(host, name, img, plot, '') scrap = scraper.scraper_check(host, name) except: scrap = '' try: img = scrap[1] except: img = '' try: plot = scrap[2] except: plot = '' else: img = '' plot = '' fanart = fanartAol labs = {} try: labs['plot'] = plot except: labs['plot'] = '' fanart = fanartAol labs = {} try: labs['plot'] = plot except: labs['plot'] = '' ### pars = { 'mode': 'EpisodesSenpai', 'site': site, 'section': section, 'title': name, 'url': strona, 'img': img, 'fanart': fanart } contextLabs = { 'title': name, 'url': strona, 'img': img, 'fanart': fanart, 'todoparams': _addon.build_plugin_url(pars), 'site': site, 'section': section, 'plot': labs['plot'] } if section == 'senpai': contextMenuItems = ContextMenu_Series(contextLabs) else: contextMenuItems = [] labs['title'] = name _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount) set_view(content, view_mode=addst('tvshows-view'))
def Browse_Itemslist(url, page='', content='episodes', view='515'): data = 'http://www.inne.wbijam.pl/' html = nURL(data) html = html.encode('utf-8', '') if 'Polecane serie anime' in url: data1 = '<div class="pmenu_naglowek_blue">Polecane serie anime</div>' data2 = '<a href="http://www.inne.wb' link = '' mode = 'Browse_Episodeswijam' elif 'Lżejsze klimaty' in url: data1 = '<div class="pmenu_naglowek_red">Lżejsze klimaty</div>' data2 = '<div class="pmenu_naglowek_blue">Polecane serie anime</div>' link = 'http://www.inne.wbijam.pl/' mode = 'Browse_Episodeswijaminne' elif 'Akcja' in url: data1 = '<div class="pmenu_naglowek_red">Akcja</div>' data2 = '<div class="pmenu_naglowek_red">Lżejsze klimaty</div>' link = 'http://www.inne.wbijam.pl/' mode = 'Browse_Episodeswijaminne' data = GetDataBeetwenMarkers(html, data1, data2, False)[1] data = re.findall('<a href="(.+?)">(.+?)</a></li>', data) data.sort() ItemCount = len(data) if len(data) > 0: for item in data: strona = link + item[0] name = item[1].encode('utf-8', '') ### scrper if (tfalse(addst("wbij-thumbs")) == True): import scraper scrap = scraper.scraper_check(host, name) try: if (name not in scrap): html = nURL(strona) data = re.findall('<img src="grafika/(.+?)">', html) ItemCount = len(data) if len(data) > 0: for item in data: img = url + '/grafika/' + item else: img = '' plot = '' scraper.scraper_add(host, name, img, plot, '') scrap = scraper.scraper_check(host, name) except: scrap = '' try: img = scrap[1] except: img = '' try: plot = scrap[2] except: plot = '' else: img = '' plot = '' fanart = fanartAol labs = {} try: labs['plot'] = plot except: labs['plot'] = '' ## pars = {'mode': mode, 'site': site, 'section': section, 'title': name, 'url': strona, 'page': url, 'img': img, 'fanart': fanart} contextLabs = {'title': name, 'url': strona, 'img': img, 'fanart': fanart, 'todoparams': _addon.build_plugin_url(pars), 'site': site, 'section': section, 'plot': labs['plot']} contextMenuItems = ContextMenu_Series(contextLabs) labs['title'] = name _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount) set_view(content, view_mode=addst('tvshows-view')) eod()
def Browse_ItemAol(html, page, metamethod='', content='movies', view='515'): if (len(html) == 0): return html = GetDataBeetwenMarkers(html, 'Tytu', '</table>', False)[1] page = page.lower() data = re.findall('<a href="https://a-o.ninja/anime/' + page+ '(.+?)">(.+?)</a>', html) ItemCount = len(data) for item in data: strona = 'https://a-o.ninja/anime/' + page + item[0] name = item[1].encode("utf-8") name = ParseDescription(name) ### scraper if (tfalse(addst("aodc-thumbs")) == True): import scraper scrap = scraper.scraper_check(host, name) try: if (name not in scrap): if '?page=0'in strona: strona2 = strona.replace('?page=0','') else: strona2 = strona html = nURL(strona2) html = GetDataBeetwenMarkers(html, 'field-name-field-okladka field-type-image field-label-above', '<p> </p>', False)[1] data = re.findall('<img src="(.+?)"', html) ItemCount = len(data) if len(data) > 0: for item in data: img = item else: img = '' data = re.findall('<p><p>(.+?)</p>', html) ItemCount = len(data) if len(data) > 0: for item in data: plot = ParseDescription(item) else: plot = '' scraper.scraper_add(host, name, img, plot, '') scrap = scraper.scraper_check(host, name) except: scrap = '' try: img = scrap[1] except: img = '' try: plot = scrap[2] except: plot = '' else: img = '' plot ='' fanart = fanartAol labs = {} try: labs['plot'] = plot except: labs['plot'] = '' ### pars = {'mode': 'EpisodesAnime', 'site': site, 'section': section, 'title': name, 'url': strona, 'img': img, 'fanart': fanart} contextLabs = {'title': name, 'url': strona, 'img': img, 'fanart': fanart, 'todoparams': _addon.build_plugin_url(pars), 'site': site, 'section': section, 'plot': labs['plot']} if section == 'animeonline': contextMenuItems = ContextMenu_Series(contextLabs) else: contextMenuItems = [] labs['title'] = name _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount) set_view(content, view_mode=addst('tvshows-view'))
def Browse_ItemAol(html, url, metamethod='', content='movies', view='515'): if (len(html) == 0): return html = html.replace('\'', '') html = html.replace('\n', '') html = html.replace('\r', '') html = html.replace('\t', '') html = GetDataBeetwenMarkers(html, 'zaczynaj', '<header>Kresk', False)[1] # print html.encode('ascii','ignore') data = re.findall( 'href="/kreskowka/(.+?)"><div(.+?)<b class="larger white">(.+?)</b>', html) ItemCount = len(data) for item in data: strona = mainSite + '/kreskowka/' + item[0] name = item[2].encode("utf-8") name = ParseDescription(name) ### scraper if (tfalse(addst("kresk-thumbs")) == True): import scraper scrap = scraper.scraper_check(host, name) try: if (name not in scrap): html = nURL(strona) html = GetDataBeetwenMarkers(html, '>Serie</a>', '<div class="info-basic">', False)[1] #print html.encode('ascii','ignore') data = re.findall("/upload/cats/(.+?).jpg", html) ItemCount = len(data) if len(data) > 0: for item in data: img = mainSite + '/upload/cats/' + item + '.jpg' else: img = '' plot = '' scraper.scraper_add(host, name, img, plot, '') scrap = scraper.scraper_check(host, name) except: scrap = '' try: img = scrap[1] except: img = '' try: plot = scrap[2] except: plot = '' else: img = '' plot = '' fanart = fanartAol labs = {} try: labs['plot'] = plot except: labs['plot'] = '' ### pars = { 'mode': 'Episodeskresk', 'site': site, 'section': section, 'title': name, 'url': strona, 'img': img, 'fanart': fanart } contextLabs = { 'title': name, 'url': strona, 'img': img, 'fanart': fanart, 'todoparams': _addon.build_plugin_url(pars), 'site': site, 'section': section, 'plot': labs['plot'] } if section == 'kreskoweczki': contextMenuItems = ContextMenu_Series(contextLabs) else: contextMenuItems = [] labs['title'] = name _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount) set_view(content, view_mode=addst('tvshows-view')) eod()
def Browse_Itemscen(html, name, metamethod='', content='movies', view='515'): if (len(html) == 0): return data = re.compile('<div class="anim"><a href="(.+?)"> ' + name + '(.+?)</a>').findall(html) ItemCount = len(data) for item in data: strona = 'http://anime-joy.tv/' + item[0] name2 = name + item[1] ### scraper if (tfalse(addst("ajoy-thumbs")) == True): import scraper scrap = scraper.scraper_check(host, name2) try: if (name2 not in scrap): html = nURL(strona) htmlimg = GetDataBeetwenMarkers( html, '<div class="animedetay">', '<div class="addcolumn centerbutton">', False)[1] data = re.findall('<img src="(.+?)"/>', htmlimg) ItemCount = len(data) if len(data) > 0: for item in data: img = item else: img = '' htmlplot = GetDataBeetwenMarkers( html, '<strong>Summary:</strong> </br>', '</div>', False)[1] ItemCount = len(htmlplot) if len(data) > 0: plot = clean_html(htmlplot) else: plot = '' scraper.scraper_add(host, name2, img, plot, '') scrap = scraper.scraper_check(host, name2) except: scrap = '' try: img = scrap[1] except: img = '' try: plot = scrap[2] except: plot = '' else: img = '' plot = '' fanart = fanartAol labs = {} try: labs['plot'] = plot except: labs['plot'] = '' ## pars = { 'mode': 'Episodesjoy', 'site': site, 'section': section, 'title': name2, 'url': strona, 'img': img, 'fanart': fanart } contextLabs = { 'title': name2, 'url': strona, 'img': img, 'fanart': fanart, 'todoparams': _addon.build_plugin_url(pars), 'site': site, 'section': section, 'plot': labs['plot'] } if section == 'animejoy': contextMenuItems = ContextMenu_Series(contextLabs) else: contextMenuItems = [] labs['title'] = name2 _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount) set_view(content, view_mode=addst('links-view')) eod()
def Browse_ItemAol(html, url, metamethod='', content='movies', view='515'): if (len(html) == 0): return html = GetDataBeetwenMarkers(html, 'Anime na liter', '<ul class="pagination">', False)[1] data = re.findall('<a href="(.+?)">(.+?)</a>', html) ItemCount = len(data) for item in data: strona = mainSite7 + item[0] name = item[1].encode("utf-8") name = ParseDescription(name) ### scraper if (tfalse(addst("zone-thumbs")) == True): import scraper scrap = scraper.scraper_check(host, name) try: if (name not in scrap): html = nURL(strona) html = GetDataBeetwenMarkers(html, 'og:image', '<h5>Odcinki</h5>', False)[1] html = html.replace('\'', '') html = html.replace('\n', '') html = html.replace('\r', '') html = html.replace('\t', '') print html.encode('ascii', 'ignore') data = re.findall('content="(.+?)"></head>', html) ItemCount = len(data) if len(data) > 0: for item in data: img = item else: img = '' data = re.findall('summary">(.+?)<div class', html) ItemCount = len(data) if len(data) > 0: for item in data: plot = ParseDescription(item) else: plot = '' scraper.scraper_add(host, name, img, plot, '') scrap = scraper.scraper_check(host, name) except: scrap = '' try: img = scrap[1] except: img = '' try: plot = scrap[2] except: plot = '' else: img = '' plot = '' fanart = fanartAol labs = {} try: labs['plot'] = plot except: labs['plot'] = '' ### pars = { 'mode': 'Episodeszone', 'site': site, 'section': section, 'title': name, 'url': strona, 'img': img, 'fanart': fanart } contextLabs = { 'title': name, 'url': strona, 'img': img, 'fanart': fanart, 'todoparams': _addon.build_plugin_url(pars), 'site': site, 'section': section, 'plot': labs['plot'] } if section == 'animezone': contextMenuItems = ContextMenu_Series(contextLabs) else: contextMenuItems = [] labs['title'] = name _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount) # next page npage = url[:-1] + str(int(url[-1:]) + 1) # if -1 != html.find("do strony "): _addon.add_directory( { 'mode': 'Pagezone', 'site': site, 'section': section, 'url': npage, 'page': npage }, {'title': "Next page"}, is_folder=True, fanart=fanartAol, img=nexticon) set_view(content, view_mode=addst('tvshows-view'))