def SubMenu():
###Anime-Centrum###
    if section == 'animecentrum':
        tUrl = mainSite2 + 'anime-online/'
        _addon.add_directory({'mode': 'PageCentrum', 'site': site, 'section': section, 'url': tUrl + 'others'}, {'title': '#'}, is_folder=True, fanart=fanartSiteCentrum, img=addonPath + '/art/znak.png')
        for az in MyAlphabet:
            _addon.add_directory({'mode': 'PageCentrum', 'site': site, 'section': section, 'url': tUrl + az + '.html'}, {'title': az}, is_folder=True, fanart=fanartSiteCentrum, img=addonPath + '/art/'+ az +'.png')
###Diff-Anime###
    if section == 'diffanime':
        _addon.add_directory({'mode': 'AZ', 'site': site, 'section': section}, {'title': "Lista anime A-Z."}, is_folder=True, fanart=fanartSite, img=iconDiff)
        _addon.add_directory({'mode': 'aktualnosci', 'site': site, 'section': section, 'url': 'http://diff-anime.pl/newsy'}, {'title': "Aktualności"}, is_folder=True, fanart=fanartSiteCentrum, img=iconDiff)
###DramaQueen###
    if section == 'DramaQueen':
        _addon.add_directory({'mode': 'SubSubMenu', 'site': site, 'section': 'Dramadrama', 'url': mainSite3 + 'drama/'}, {'title': "Drama"}, is_folder=True, fanart=fanartDrama, img=iconSite)
        _addon.add_directory({'mode': 'SubSubMenu', 'site': site, 'section': 'Dramamovie'}, {'title': 'Film'}, is_folder=True, fanart=fanartDrama, img=iconSite)
###Anime-Online###
    if section == 'animeonline':
        _addon.add_directory({'mode': 'SubSubMenu', 'site': site, 'section': 'animeonline'}, {'title': "Odcinki Anime"}, is_folder=True, fanart=fanartAol, img=iconOdcinki)
        _addon.add_directory({'mode': 'SubSubMenu', 'site': site, 'section': 'animedrama'}, {'title': "Drama Anime"}, is_folder=True, fanart=fanartAol, img=iconOdcinki)
        _addon.add_directory({'mode': 'recenzje', 'site': site, 'section': section, 'url': 'https://www.youtube.com/user/Spychu91/videos?sort=dd&shelf_id=0&view=0'}, {'title': "Recenzje Spycha"}, is_folder=True, fanart=fanartAol, img=iconspychu)
###Anime-Shinden###
    if section == 'animeshinden':
        _addon.add_directory({'mode': 'SubSubMenu', 'site': site, 'section': 'shnidenodc'}, {'title': "Alfabetycznie."}, is_folder=True, fanart=fanartAol, img=iconShniden)
        url = mainSite5 + 'animelist/index.php'
        _addon.add_directory({'mode': 'Browse_GenreShniden', 'site': site, 'section': 'shnidengat', 'url': url }, {'title': "Lista wg gatunku."}, is_folder=True, fanart=fanartAol, img=iconShniden)
###Anime-On###
    if section == 'animeon':
        tUrl = mainSite6 + 'anime.php?letter='
        for az in AnimeonAlphabet:
            _addon.add_directory({'mode': 'Pageanimeon', 'site': site, 'section': section, 'url': tUrl + az + '&rowstart=00'}, {'title': az}, is_folder=True, fanart=fanartSiteCentrum, img=addonPath + '/art/'+ az +'.png')
    set_view('list', view_mode=addst('default-view'))
    eod()
def SubSubMenu():
###DramaQueen###
    if section == 'Dramadrama':
        _addon.add_directory({'mode': 'dramaqueen_drama', 'site': site, 'section': section, 'url': mainSite3 + 'drama/japonskie/'}, {'title': "Japońskie"}, is_folder=True, fanart=fanartDrama, img=iconSite)
        _addon.add_directory({'mode': 'dramaqueen_drama', 'site': site, 'section': section, 'url': mainSite3 + 'drama/koreanska/'}, {'title': "Koreańskie"}, is_folder=True, fanart=fanartDrama, img=iconSite)
        _addon.add_directory({'mode': 'dramaqueen_drama', 'site': site, 'section': section, 'url': mainSite3 + '/drama/tajwanska/'}, {'title': "Tajwańskie"}, is_folder=True, fanart=fanartDrama, img=iconSite)
    if section == 'Dramamovie':
        _addon.add_directory({'mode': 'dramaqueen_drama_movie', 'site': site, 'section': section, 'url': mainSite3 + 'drama/film/japonski/'}, {'title': "Film - japoński"}, is_folder=True, fanart=fanartDrama, img=iconSite)
        _addon.add_directory({'mode': 'dramaqueen_drama_movie', 'site': site, 'section': section, 'url': mainSite3 + 'drama/film/koreanski/'}, {'title': "Film - koreański"}, is_folder=True, fanart=fanartDrama, img=iconSite)
###Anime-Online###
    if section == 'animeonline':
        tUrl = mainSite4 + 'lista-anime/'
        for az, xy in zip(AonlineAlphabet, AonlineAlphabet):
            _addon.add_directory({'mode': 'Pageanimeonline', 'site': site, 'section': section, 'url': tUrl + xy}, {'title': az}, is_folder=True, fanart=fanartAol, img=addonPath + '/art/'+ az +'.png')
    if section == 'animedrama':
        tUrl = mainSite4 + 'Drama/viewpage.php?page_id='
        for az, xy in zip(MyAlphabet, AonlineDrama):
            _addon.add_directory({'mode': 'Pageanimeonline', 'site': site, 'section': section, 'url': tUrl + xy}, {'title': az}, is_folder=True, fanart=fanartAol, img=addonPath + '/art/'+ az +'.png')
###Anime-Shniden###
    if section == 'shnidenodc':
        tUrl = mainSite5 + 'animelist/index.php?&first=&year_from=&year_to=&eps_from=&eps_to=&eps_other=&letter='
        _addon.add_directory({'mode': 'Pageshniden', 'site': site, 'section': section, 'url': tUrl + '0'}, {'title': '#'}, is_folder=True, fanart=fanartSite, img=addonPath + '/art/znak.png')
        for az in MyAlphabet:
            _addon.add_directory({'mode': 'Pageshniden', 'site': site, 'section': section, 'url': tUrl + az}, {'title': az}, is_folder=True, fanart=fanartSite, img=addonPath + '/art/'+ az +'.png')
    set_view('list', view_mode=addst('default-view'))
    eod()
Пример #3
0
def seasons(collection_ids = common.args.url):
	for collection_id in collection_ids.split(','):
		if ',' not in collection_ids:
			season_url = SEASONSEPISODES
		else:
			season_url = SEASONSEPISODESEXTRA
		season_data = connection.getURL(season_url % collection_id)
		season_tree = BeautifulSoup(season_data, 'html.parser')
		episode_count = int(season_tree.episodes['totalitems'])
		if episode_count > 0:
			if ',' not in collection_ids:
				display = 'Episodes'
			else:
				display = 'Episodes - %s' % season_tree.episode['collectiontitle']
			common.add_directory(display,  SITE, 'episodes', FULLEPISODES % collection_id)
	for collection_id in collection_ids.split(','):
		if ',' not in collection_ids:
			seasonclips_url = SEASONSCLIPS
		else:
			seasonclips_url = SEASONSCLIPSEXTRA
		season_data2 = connection.getURL(seasonclips_url % collection_id)
		season_tree2 = BeautifulSoup(season_data2, 'html.parser')
		episode_count = int(season_tree2.episodes['totalitems'])
		if episode_count > 0:
			if ',' not in collection_ids:
				display = 'Clips'
			else:
				display = 'Clips - %s' % season_tree2.episode['collectiontitle']
			common.add_directory(display,  SITE, 'episodes', CLIPS % collection_id)
	common.set_view('seasons')
Пример #4
0
def episodes(episode_url = common.args.url):
	episode_data = connection.getURL(episode_url)
	episode_menu = simplejson.loads(episode_data)
	for episode_item in episode_menu['items']:
		if episode_item['videos']:
			url = episode_item['guid']
			episode_name = episode_item['title']
			episode_plot = episode_item['description']
			episode_airdate = common.format_date(episode_item['airdate'], '%Y-%m-%d %H:%M:%S', '%d.%m.%Y')
			episode_duration = int(episode_item['videos'].itervalues().next()['length']) / 1000
			try:
				episode_thumb = episode_item['images']['kids-mezzannine-16x9']['url']
			except:
				try:
					episode_thumb = episode_item['images']['kids-mezzannine-4x3']['url']
				except:
					episode_thumb = episode_item['images']['mezzanine']['url']
			HD = False
			for video in episode_item['videos']['flash'].itervalues():
				try:
					if video['bitrate'] > 2000:
						HD = True
				except:
					pass
			u = sys.argv[0]
			u += '?url="' + urllib.quote_plus(url) + '"'
			u += '&mode="' + SITE + '"'
			u += '&sitemode="play_video"'
			infoLabels={	'title' : episode_name,
							'durationinseconds' : episode_duration,
							'plot' : episode_plot,
							'premiered' : episode_airdate }
			common.add_video(u, episode_name, episode_thumb, infoLabels = infoLabels, HD = HD, quality_mode = 'select_quailty')
	common.set_view('episodes')
def Browse_EpisodesAnime(url, page='', content='episodes', view='515'):
    if url == '':
        return
    html = GetDataBeetwenMarkers(nURL(url), '<div class="views-row views-row-1 views-row-odd views-row-first">', '</section> <!-- /.block -->', False)[1]
    data = re.findall('<div class="field-content lista_odc_tytul_pozycja"><a href="/(.+?)">(.+?)</a>', html)
    ItemCount = len(data)
    print data
    for item in data:
        url2 = mainSite4 + item[0]
        name = item[1].encode("utf-8")
        img = ""
        fanart = fanartAol
        plot = ""
        labs = {}
        try:
            labs['plot'] = plot
        except:
            labs['plot'] = ''
###
        contextLabs = {'title': name, 'year': '0000', 'url': url2, 'img': img, 'fanart': fanart, 'DateAdded': '', 'plot': labs['plot']}
        contextMenuItems = ContextMenu_Episodes(labs=contextLabs)
        pars = {'mode': 'PlayAnime', 'site': site, 'section': section, 'title': name, 'url': url2, 'img': img, 'fanart': fanart}
        labs['title'] = name
        _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount)
# next page
    npage = url[:-1] + str(int(url[-1:]) + 1)
    print 'bbb' , npage
    if -1 != html.find("do strony "):
        _addon.add_directory({'mode': 'EpisodesAnime', 'site': site, 'section': section, 'url': npage, 'page': npage}, {'title': "Next page"}, is_folder=True, fanart=fanartAol, img=nexticon)
    set_view(content, view_mode=addst('links-view'))
    eod()
Пример #6
0
def webepisodes():
	episode_url = common.args.url
	episode_data = connection.getURL(episode_url)
	web_tree = BeautifulSoup(episode_data, 'html.parser')
	episode_menu = web_tree.find_all('div', class_ = 'view-mode-vid_teaser_show_episode')
	for i, episode_item in enumerate(episode_menu):
		if 'tve-video-auth' not in episode_item['class']:
			episode_name = episode_item['omniture-title']
			try:
				season_number = int(re.compile('Season (\d+)').findall(episode_item.find(class_ = 'caption'))[0])
			except:
				season_number = -1
			try:
				episode_number = int(re.compile('Episode (\d+)').findall(episode_item.find(class_ = 'caption'))[0])
			except:
				episode_number = -1
			try:
				episode_thumb = episode_item.img['src']
			except:
				episode_thumb = None
			url = BASE + episode_item['omniture-id']
			u = sys.argv[0]
			u += '?url="' + urllib.quote_plus(url) + '"'
			u += '&mode="' + SITE + '"'
			u += '&sitemode="play_video"'
			infoLabels={	'title' : episode_name,
						 'season' : season_number,
						 'episode' : episode_number,
						}
			common.add_video(u, episode_name, episode_thumb, infoLabels = infoLabels, quality_mode  = 'list_qualities')
	common.set_view('episodes')
Пример #7
0
def Browse_ItemAon(html, metamethod='', content='tvshows', view='515'):
    if (len(html) == 0):
        return
    data = re.findall("<img src='http://animeon.pl/images/anime_min/mins/(.+?)'(.+?)<a href='(.+?)'>(.+?)</a></strong>(.+?)<div>(.+?)</div>", html)
    ItemCount = len(data)
    for item in data:
            _url = item[2]
            name = item[3]
            img = 'http://animeon.pl/images/anime_min/' + item [0]
            fanart = fanartAol
            plot = item[5]
            labs = {}
            try:
                labs['plot'] = plot
            except:
                labs['plot'] = ''
###
            pars = {'mode': 'EpisodesAnimeon', 'site': site, 'section': section, 'title': name, 'url': _url, 'img': img, 'fanart': fanart}
            contextLabs = {'title': name, 'url': _url, 'img': img, 'fanart': fanart, 'todoparams': _addon.build_plugin_url(pars), 'site': site, 'section': section, 'plot': labs['plot']}
            if   section == 'movie':
                contextMenuItems = ContextMenu_Movies(contextLabs)
            elif section == 'animeon':
                contextMenuItems = ContextMenu_Series(contextLabs)
            else:
                contextMenuItems = []
            labs['title'] = name
            _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount)
# szukanie następnej strony
    npage = url[:-2] + str(int(url[-2:]) + 20)
    if -1 != html.find("div class='pagenav"):
            _addon.add_directory({'mode': 'Pageanimeon', 'site': site, 'section': section, 'url': npage, 'page': npage}, {'title': "Next page"}, is_folder=True, fanart=fanartAol, img=nexticon)
    set_view(content, view_mode=addst('tvshows-view'))
Пример #8
0
def seasons(SITE, FULLEPISODES, CLIPS, FULLEPISODESWEB = None):
	season_urls = common.args.url
	for season_url in season_urls.split(','):
		season_data = connection.getURL(FULLEPISODES % urllib.quote_plus(season_url) + '&range=0-1')
		try:
			season_menu = int(simplejson.loads(season_data)['totalResults'])
		except:
			season_menu = 0
		if season_menu > 0:
			season_url2 = FULLEPISODES % urllib.quote_plus(season_url) + '&range=0-' + str(season_menu)
			common.add_directory('Full Episodes',  SITE, 'episodes', season_url2)
		elif FULLEPISODESWEB:
			show = season_url.split('/')[-1].replace(' ', '')
			web_data = connection.getURL(FULLEPISODESWEB % show)
			web_tree = BeautifulSoup(web_data, 'html.parser')
			all = len(web_tree.find_all('div', class_ = 'view-mode-vid_teaser_show_episode'))
			auth = len(web_tree.find_all('div', class_ = 'tve-video-auth'))
			if all > auth:
				common.add_directory('Full Episodes',  SITE, 'webepisodes', FULLEPISODESWEB % show)
		season_data2 = connection.getURL(CLIPS % urllib.quote_plus(season_url) + '&range=0-1')
		try:
			season_menu2 = int(simplejson.loads(season_data2)['totalResults'])
		except:
			season_menu2 = 0
		if season_menu2 > 0:
			season_url3 = CLIPS % urllib.quote_plus(season_url) + '&range=0-' + str(season_menu2)
			if ',' in season_urls:
				common.add_directory('Clips %s'%season_url,  SITE, 'episodes', season_url3)
			else:
				common.add_directory('Clips',  SITE, 'episodes', season_url3)
	common.set_view('seasons')
Пример #9
0
def Browse_EpisodesShniden(url, page='', content='episodes', view='515'):
    if url == '':
        return
    html = nURL(url + '/episodes')
    html = GetDataBeetwenMarkers(html, 'list-episode-checkboxes', '</tbody>', False)[1]
    html = html.replace('\r\n', '')
    html = html.replace(' ', '')
    data = re.findall('<td>(.+?)</td>(.+?)<ahref="(.+?)"class="buttonactive">', html)
    ItemCount = len(data)
    for item in data:
        strona = mainSite5 + item[2]
        if 'fafa-fwfa-times' in item[1]:
            name2 = ' - niedostępny'
        else:
            name2 = ''
        name = "Odcinek " + html_entity_decode(item[0]) + name2
        img = ''
        fanart = fanartAol
        plot = ""
        labs = {}
        try:
            labs['plot'] = plot
        except:
            labs['plot'] = ''
###
        contextLabs = {'title': name, 'year': '0000', 'url': strona, 'img': img, 'fanart': fanart, 'DateAdded': '', 'plot': labs['plot']}
        contextMenuItems = ContextMenu_Episodes(labs=contextLabs)
        pars = {'mode': 'PlayShniden', 'site': site, 'section': section, 'title': name, 'url': strona, 'img': img, 'fanart': fanart}
        labs['title'] = name
        _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount)
    set_view(content, int(addst('links-view')))
    eod()
Пример #10
0
def Browse_List(url):
	html=nURL(url); html=messupText(html,True,True); 
	html=spAfterSplit(html,'<div class="ani-post">'); 
	html=spAfterSplit(html,'<div id="ddmcc_container">'); 
	html=spAfterSplit(html,'<div class="ddmcc">'); 
	html=spBeforeSplit(html,'</div>'); 
	#s='<li.*?><a href="(http://www.cartoon-world.tv/watch/(.+?)/)"><i class="icon-\D*-\D*"></i>\s*(.+?)\s*</a></li>'
	s='<li.*?><a.*?href="(http://www.cartoon-world.tv/watch/(.+?)/)".*?><i.*?></i>\s*(.+?)\s*</a></li>'
	matches=re.compile(s).findall(html); ItemCount=len(matches)
	if ItemCount==0: return
	if '/anime' in url.lower(): color_z=colors['8']
	elif '/cartoon' in url.lower(): color_z=colors['5']
	elif '/movie' in url.lower(): color_z=colors['1']
	else: color_z=colors['0']
	for _url,_folder,_name in matches:
		#fimg='http://www.dubbednetwork.net/static/'+_folder+'.jpg'
		#img='http://www.dubbednetwork.net/static/'+_folder+'.jpg'
		fimg='%simages/%s.jpg'%(mainSite,_folder)
		img='%simages/%s.jpg'%(mainSite,_folder)
		_title=cFL_(''+_name+'',color_z)
		pars={'mode':'Episodes','site':site,'section':section,'title':_name,'url':_url,'fanart':fimg,'img':img}
		contextLabs={'title':_name,'year':'0000','url':_url,'img':img,'fanart':fimg,'todoparams':_addon.build_plugin_url(pars),'site':site,'section':section,'plot':''}; 
		if 'movie' in url.lower(): contextMenuItems=ContextMenu_Movies(contextLabs)
		else: contextMenuItems=ContextMenu_Series(contextLabs)
		try: _addon.add_directory(pars,{'title':_title},is_folder=True,fanart=fimg,img=img,contextmenu_items=contextMenuItems,total_items=ItemCount)
		except: t=''
	set_view('tvshows',view_mode=addst('tvshows-view')); eod()
Пример #11
0
def Fav_List(site='',section='',subfav=''):
	debob(['test1',site,section,subfav])
	favs=fav__COMMON__list_fetcher(site=site,section=section,subfav=subfav)
	ItemCount=len(favs)
	debob('test2 - '+str(ItemCount))
	if len(favs)==0: myNote('Favorites','None Found'); eod(); return
	debob(favs)
	for (_name,_year,_img,_fanart,_Country,_Url,_plot,_Genres,_site,_subfav,_section,_ToDoParams,_commonID,_commonID2) in favs:
		if _img > 0: img=_img
		else: img=iconSite
		if _fanart > 0: fimg=_fanart
		else: fimg=fanartSite
		debob('_ToDoParams'); debob(_ToDoParams)
		pars=_addon.parse_query(_ToDoParams)
		debob('pars'); debob(pars)
		_title=cFL_(_name,'white')
		if (len(_year) > 0) and (not _year=='0000'): _title+=cFL('  ('+cFL(_year,'deeppink')+')','pink')
		if len(_Country) > 0: _title+=cFL('  ['+cFL(_Country,'deeppink')+']','pink')
		
		contextLabs={'title':_name,'year':_year,'img':_img,'fanart':_fanart,'country':_Country,'url':_Url,'plot':_plot,'genres':_Genres,'site':_site,'subfav':_subfav,'section':_section,'todoparams':_ToDoParams,'commonid':_commonID,'commonid2':_commonID2}
		##contextLabs={'title':_name,'year':'0000','url':_url,'img':img,'fanart':fimg,'DateAdded':'','todoparams':_addon.build_plugin_url(pars),'site':site,'section':section}
		contextMenuItems=ContextMenu_Favorites(contextLabs)
		#contextMenuItems=[]
		_addon.add_directory(pars,{'title':_title,'plot':_plot},is_folder=True,fanart=fimg,img=img,total_items=ItemCount,contextmenu_items=contextMenuItems)
		#
	#
	if 'movie' in section.lower(): content='movies'
	else: content='tvshows'
	set_view(content,view_mode=int(addst('tvshows-view'))); eod()
Пример #12
0
def Browse_Episodes(url,page=''):
	html=nURL(url); html=messupText(html,True,True); htmlA=''+html
	html=spAfterSplit(html,'<div class="clearboth"></div>'); 
	html=spAfterSplit(html,'<ul'); 
	html=spBeforeSplit(html,'</ul>'); 
	try: _simg=re.compile('<div class=".*?-row">\s*\n*\s*<img.*?src="(.+?)".*?>\s*\n*\s*<table class=".*?-table">').findall(htmlA)[0]
	except: _simg=thumbnail
	try: _plot=re.compile('</tbody>\s*\n*\s*</table>\s*\n*\s*\n*\s*(.*?)\s*\n*\s*</div>\s*\n*\s*<div class="clearboth"></div>\s*\n*\s*<div class="ani-eps">').findall(htmlA)[0]
	except: _plot=''
	try: _type=re.compile('<tr><td.*?>Type</td><td.*?>\s*(.*?)\s*</td></tr>').findall(htmlA)[0]
	except: _type=''
	try: _year=re.compile('<tr><td.*?>Aired</td><td.*?>\s*(.*?)\s*</td></tr>').findall(htmlA)[0]
	except: _year=''
	try: _stitle=re.compile('<tr><td.*?>Title</td><td.*?>\s*(.*?)\s*</td></tr>').findall(htmlA)[0]
	except: _stitle=''
	debob([_stitle,_year,_type,_plot,_simg])
	s='<li.*?><a.*?href="(http://www.cartoon-world.tv/(.+?)/)".*?><i.*?></i>\s*(.+?)\s*</a></li>'
	matches=re.compile(s).findall(html); ItemCount=len(matches)
	if ItemCount==0: return
	if '/anime' in url.lower(): color_z=colors['8']
	elif '/cartoon' in url.lower(): color_z=colors['5']
	elif '/movie' in url.lower(): color_z=colors['1']
	else: color_z=colors['0']
	for _url,_folder,_name in matches:
		fimg=_simg; img=_simg #fimg=fanart; img=thumbnail
		
		_title=cFL_(''+_name+'',color_z)
		contextLabs={'title':_name,'year':'0000','url':_url,'img':img,'fanart':fimg}; contextMenuItems=ContextMenu_Episodes(contextLabs)
		labs={'title':_title,'plot':cFL('Year:  ',colors['11'])+cFL(_year,colors['10'])+cFL('    |    ','black')+cFL('Type:  ',colors['11'])+cFL(_type,colors['10'])+CR+cFL(_plot,colors['12']),'year':_year,'type':_type,'showtitle':_stitle,'fanart':fimg,'img':img}
		try: _addon.add_directory({'mode':'Hosts','site':site,'section':section,'title':_name,'url':_url,'fanart':fimg,'img':img},labs,is_folder=True,fanart=fimg,img=img,contextmenu_items=contextMenuItems,total_items=ItemCount)
		except: t=''
	set_view('episodes',view_mode=addst('episode-view')); eod()
Пример #13
0
def Browse_LatestEpisodes(mmurl):
	url_content=nolines(nURL(mainSite)).replace('</li>','</li\n\r>'); 
	#les=re.search(r"(?s)<h\d>Latest Episodes</h\d>(.+?)<h\d", url_content).group(1); 
	les=url_content.split('>Latest Episodes</h')[-1].split('</ul>')[0] #.split('>Site News</h')[0]
	les=_addon.unescape(les); les=unescape(les)
	# from universal import _common
	# les=_common.str_conv(les)
	les = les.encode('ascii', 'ignore')
	deb('Length of html',str(len(les))); 
	s='<li class="(.*?)">\s*<a href="(.+?)">\s*<span class=".*?rib">.*?</span>\s*'; 
	s+='(?:<noscript>\s*)?<img .*?src="(.+?)".*?/>\s*(?:</noscript>\s*)?'; 
	s+='<div class="ftitle">\s*(.+?)\s*</div>\s*</a>\s*</li'
	#<li class="Anime"><a href="http://www.cartoon-world.tv/future-card-buddyfight-episode-49/"><span class="Animerib">Anime</span> 
	#<img width="210" height="90" src="data:image/gif;base64,R0lGODdhAQABAPAAAP///wAAACwAAAAAAQABAEACAkQBADs=" data-lazy-type="image" data-lazy-src="http://www.cartoon-world.tv/static/future-card-buddyfight-episode-49-210x90.jpg" class="lazy lazy-hidden latest-ep wp-post-image" alt="Future Card Buddyfight Episode 49" /><noscript><img width="210" height="90" src="http://www.cartoon-world.tv/static/future-card-buddyfight-episode-49-210x90.jpg" class="latest-ep wp-post-image" alt="Future Card Buddyfight Episode 49" /></noscript><div class="ftitle">Future Card Buddyfight Episode 49</div></a></li>
	leS=re.compile(s).findall(les)
	#print les
	#for le in re.finditer(r'<li .*?class="(.+?)".*?>\s*<a .*?href="(.+?)".*?>\s*(?:<span>\s*\D*\s*</span>\s*)?<img.+? src="(.+?)".*?>\s*<div class=".*?">\s*(.+?)\s*</div></a></li', les):
	#print leS
	for (le_typ,le_url,le_img,le_ttl) in leS:
		#print le
		#le_typ=le.group(1); le_url=le.group(2); le_img=le.group(3); le_ttl=le.group(4)
		contextLabs={'title':le_ttl,'year':'0000','url':le_url,'img':le_img,'fanart':le_img,'DateAdded':'','type':le_typ}; contextMenuItems=ContextMenu_Episodes(labs=contextLabs)
		try: _addon.add_directory({'mode':'Hosts','site':site,'section':section,'title':le_ttl,'url':le_url,'fanart':le_img,'img':le_img},{'title':cFL_(le_ttl,colors['6'])},is_folder=True,fanart=le_img,img=le_img,contextmenu_items=contextMenuItems)
		except: t=''
	set_view('episodes',int(addst('episode-view'))); eod()
Пример #14
0
def get_baseline_activities(view, baseline, pred=None):
    if (view, baseline) in BASELINE_ACTIVITIES:
        return BASELINE_ACTIVITIES[(view, baseline)]

    set_view(view)
    # FIXME: Find a better way
    stream = view_to_stream(view)
    replica = replica_name(stream)

    if not pred:
        pred = "-pred"
    else:
        pred += replica

    full_base = baseline + replica

    activities = []
    for line in cleartool("diffbl %s %s" % (pred, full_base)):
        line = line.strip()
        if not line:
            continue
        fields = line.split()
        activities.append(fields[1])

    BASELINE_ACTIVITIES[(view, baseline)] = activities
    return activities
Пример #15
0
def seasons(show_url = common.args.url):
	""" Load the items for a show. This can be "Full Epiodes" and "Clips", or something based
	    on the data.
	    Southpark has a different site structure, so this is redirected to a different function.
	    Some pages have a manifest Javascript object that contains JSON feeds to all episodes.
	    Other pages do not have this. This function tries to find if the show home page has such
	    a feed. If so, only data from the feed is used. If the home page does not have the feed,
	    try to find the URL for the full episodes and the clips pages. For each of these pages
	    the script tries to load the manifest feed. If this cannot be found, add items based on
	    the HTML page. A consequence of this is that some shows can have mixed results: full
	    episides pages does not have a manifest, but clips does. This can lead to duplication of
	    container items. Many shows seem to contain a feed for full episodes, but this feed is empty """
	triforceManifestFeed = _get_manifest(show_url)
	if triforceManifestFeed:
		add_items_from_manifestfile(triforceManifestFeed, show_url)
	else:
		full_episodes_url = get_full_episodes_url(show_url)
		clips_url = get_clips_url(show_url)
		if full_episodes_url:
			triforceManifestFeed = _get_manifest(full_episodes_url)
			if triforceManifestFeed:
				add_items_from_manifestfile(triforceManifestFeed, full_episodes_url)
			else:
				common.add_directory('Full Episodes',  SITE, 'episodes', full_episodes_url)
		if clips_url:
			triforceManifestFeed = _get_manifest(clips_url)
			if triforceManifestFeed:
				add_items_from_manifestfile(triforceManifestFeed, clips_url)
			else:
				common.add_directory('Full Episodes',  SITE, 'episodes', clips_url)
	common.set_view('seasons')
Пример #16
0
def Browse_EpisodesAnimeon(url, page='', content='episodes', view='515'):
    if url == '':
        return
    html = nURL(url)
    if "Odcinków w poczekalni" in html:
        url = url.replace('http://animeon.pl/anime/', 'http://animeon.pl/anime/poczekalnia/')
    else:
        url = url
    html = nURL(url)
    html = GetDataBeetwenMarkers(html, '<h2 class="float-left">Odcinki</h2>', '<div class="float-left"><h2 class="commentsFormH">Komentarze</h2></div>', False)[1]
    data = re.findall("<a href='(.+?)' title='(.+?)' ><strong>", html)
    ItemCount = len(data)
    for item in data:
        url = item[0]
        name = item[1].replace('odcinek', 'Odcinek')
        img = ""
        fanart = fanartAol
        plot = ""
        labs = {}
        try:
            labs['plot'] = plot
        except:
            labs['plot'] = ''
###
        contextLabs = {'title': name, 'year': '0000', 'url': url, 'img': img, 'fanart': fanart, 'DateAdded': '', 'plot': labs['plot']}
        contextMenuItems = ContextMenu_Episodes(labs=contextLabs)
        pars = {'mode': 'Version', 'site': site, 'section': section, 'title': name, 'url': url, 'img': img, 'fanart': fanart}
        labs['title'] = name
        _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount)
    set_view(content, int(addst('links-view')))
    eod()
def Browse_ItemAol(html, metamethod='', content='tvshows', view='515'):
    if (len(html) == 0):
        return
    html = GetDataBeetwenMarkers(html, 'Tytu', '</table>', False)[1]
    data = re.findall('<a href="/(.+?)">(.+?)</a>', html)
    ItemCount = len(data)
    for item in data:
        strona = mainSite4 + item[0] + '?page=0'
        name = item[1].encode("utf-8")
### scraper
        meta = metaget.get_meta('tvshow', name)
        fanart = str(meta['backdrop_url']).replace('u','')
        img = str(meta['cover_url']).replace('u','')
        plot = meta['plot']
        labs = {}
        try:
            labs['plot'] = plot
        except:
            labs['plot'] = ''
###
        pars = {'mode': 'EpisodesAnime', 'site': site, 'section': section, 'title': name, 'url': strona, 'img': img, 'fanart': fanart}
        contextLabs = {'title': name, 'url': strona, 'img': img, 'fanart': fanart, 'todoparams': _addon.build_plugin_url(pars), 'site': site, 'section': section, 'plot': labs['plot']}
        if   section == 'movie':
            contextMenuItems = ContextMenu_Movies(contextLabs)
        elif section == 'animeonline':
            contextMenuItems = ContextMenu_Series(contextLabs)
        elif section == 'animedrama':
            contextMenuItems = ContextMenu_Series(contextLabs)
        else:
            contextMenuItems = []
        labs['title'] = name
        _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount)
    set_view(content, view_mode=addst('tvshows-view'))
Пример #18
0
def seasons(season_url = common.args.url):
	base_url = season_url
	season_dict = ordereddict.OrderedDict({})
	if 'the-tonight-show' in season_url:
		add_show_thetonightshow(season_url)
		return
	has_episodes = False
	video_url = season_url + '/video'
	episode_url = season_url 
	for season_url in (episode_url, video_url):
		season_data = connection.getURL(season_url)
		season_menu = re.compile('<div class="nbc_mpx_carousel.*? id="(.*?)">\s*<h2.*?>(.*?)</h2>', re.DOTALL).findall(season_data)
		for season_id, season_title in season_menu:
			try:
				tag = re.compile(r'<.*?>')
				season_title = tag.sub('', season_title)
				season_title = re.sub(' +',' ', season_title)
				season_title = season_title.strip()
				season_node = season_id.split('_')[-1]
				if season_title not in season_dict.keys():
					season_dict[season_title] =  EPISODES % season_node
					if 'full episodes' == season_title.lower() or 'Season' in season_title:
						has_episodes = True
			except:
				pass
	if not has_episodes:
		common.add_directory('Full Episodes', SITE, 'episodes',  base_url + '/episodes')
	for season_title in season_dict:
		season_url = season_dict[season_title]
		common.add_directory(season_title, SITE, 'episodes',  season_url)
	common.set_view('seasons')
Пример #19
0
def seasons(season_url = common.args.url):
	season_data = connection.getURL(season_url)
	season_tree = BeautifulSoup(season_data, 'html.parser')
	season_menu = season_tree.find_all('div', class_ = 'tab-wrap')
	for season_item in season_menu:
		season_name = season_item.h2.text
		common.add_directory(season_name, SITE, 'episodes', season_url)
	common.set_view('seasons')
Пример #20
0
def Browse_ItemAol(html, metamethod='', content='tvshows', view='515'):
    if (len(html) == 0):
        return
    html = GetDataBeetwenMarkers(html, 'Tytu', '</table>', False)[1]
    data = re.findall('<a href="/(.+?)">(.+?)</a>', html)
    ItemCount = len(data)
    for item in data:
        strona = mainSite4 + item[0] + '?page=0'
        name = item[1].encode("utf-8")
        ### scraper
        meta = metaget.get_meta('tvshow', name)
        fanart = str(meta['backdrop_url']).replace('u', '')
        img = str(meta['cover_url']).replace('u', '')
        plot = meta['plot']
        labs = {}
        try:
            labs['plot'] = plot
        except:
            labs['plot'] = ''


###
        pars = {
            'mode': 'EpisodesAnime',
            'site': site,
            'section': section,
            'title': name,
            'url': strona,
            'img': img,
            'fanart': fanart
        }
        contextLabs = {
            'title': name,
            'url': strona,
            'img': img,
            'fanart': fanart,
            'todoparams': _addon.build_plugin_url(pars),
            'site': site,
            'section': section,
            'plot': labs['plot']
        }
        if section == 'movie':
            contextMenuItems = ContextMenu_Movies(contextLabs)
        elif section == 'animeonline':
            contextMenuItems = ContextMenu_Series(contextLabs)
        elif section == 'animedrama':
            contextMenuItems = ContextMenu_Series(contextLabs)
        else:
            contextMenuItems = []
        labs['title'] = name
        _addon.add_directory(pars,
                             labs,
                             is_folder=True,
                             fanart=fanart,
                             img=img,
                             contextmenu_items=contextMenuItems,
                             total_items=ItemCount)
    set_view(content, view_mode=addst('tvshows-view'))
Пример #21
0
def episodes_json(SITE):
    episode_url = common.args.url
    master_name = episode_url.split('#')[0]
    episode_url = episode_url.split('#')[1]
    episode_data = connection.getURL(episode_url)
    episode_menu = simplejson.loads(episode_data)
    for episode_item in episode_menu:
        url = episode_item['episodeID']
        try:
            episode_duration = episode_item['length']
        except:
            episode_duration = -1
        try:
            episode_airdate = common.format_date(episode_item['airDate'].split('on ')[1],'%B %d, %Y')
        except:
            episode_airdate = -1
        try:
            episode_plot = episode_item['summary']
        except:
            episode_plot = episode_item['shortdescription']
        episode_name = episode_item['title']
        if episode_name == master_name:
            video_url = EPISODE % url
            video_data = connection.getURL(video_url)
            video_tree = BeautifulSoup(video_data, 'html.parser')
            episode_name = video_tree.headline.string
        elif episode_name == "":
            episode_name = episode_plot
        try:
            season_number = int(episode_item['identifier'].split(',')[0].split(' ')[1])
        except:
            season_number = -1
        try:
            episode_number =  int(episode_item['identifier'].split(', ')[1].split(' ')[1].replace(' Episode ', ''))
        except:
            try:
                episode_number =  int(episode_item['identifier'].split(', ')[1].split(' ')[1])
            except:
                episode_number = -1
        if episode_number > 100:
            episode_number = int(re.compile('episode-(\d*)').findall(connection.getRedirect(episode_item['shareURL']))[0])
        try:
            episode_thumb = episode_item['640x360_jpg']
        except:
            episode_thumb = None
        u = sys.argv[0]
        u += '?url="' + urllib.quote_plus(url) + '"'
        u += '&mode="' + SITE + '"'
        u += '&sitemode="play_video"'
        infoLabels={    'title' : episode_name,
                        'durationinseconds' : episode_duration,
                        'season' : season_number,
                        'episode' : episode_number,
                        'plot' : episode_plot,
                        'premiered' : episode_airdate }
        common.add_video(u, episode_name, episode_thumb, infoLabels = infoLabels, quality_mode  = 'list_qualities')
    common.set_view('episodes')
Пример #22
0
def get_baseline_changes(view, baseline, pred=None):
    set_view(view)
    changes = []
    for activity in get_baseline_activities(view, baseline, pred):
        changes += get_activity_changes(activity)

    merged = merge_changes(changes)

    return merged
Пример #23
0
def episodes(episode_url = common.args.url):
	try:
		season_number = episode_url.split('#')[1]
	except:
		season_number = -1
	episode_url = episode_url.split('#')[0]
	episode_data = connection.getURL(episode_url)
	episode_menu = simplejson.loads(episode_data)['FolderList'][0]['PlaylistList'][0]
	for episode_item in episode_menu['MediaList']:
		if episode_item['Season'] == season_number or season_number == -1:
			''' THX to foreverguest '''
			path_pattern = re.compile('http:\\/\\/.+?\/(.+?)_[a-zA-Z0-9]+')
			pattern_url = episode_item['Thumbnail_Wide']
			path = path_pattern.findall(pattern_url)
			if not path and episode_item['ClosedCaptionFiles']:
				path = path_pattern.findall(episode_item['ClosedCaptionFiles'][0]['Path'])
			if not path:
				continue
			video_url = BASE + path[0] + '_'
			episode_duration = int(episode_item['DurationInSeconds'])
			episode_name = episode_item['Title']
			episode_plot = episode_item['Description']
			try:
				episode_airdate = common.format_date(episode_item['ReleaseDate'], '%m/%d/%Y')
			except:
				episode_airdate = None
			try:
				episode_number = int(episode_item['Episode'])
			except:
				episode_number = -1
			try:
				episode_thumb = episode_item['Thumbnail_854x480']
			except:
				episode_thumb = None
			try:
				episode_caption = episode_item['ClosedCaptionFiles'][0]['Path']
			except:
				episode_caption = ''
			episode_MPAA = episode_item['Rating']
			episode_genre = episode_item['Genre']
			episode_showtitle = episode_item['ParentChannelName']
			video_url = video_url + '#' + episode_caption
			u = sys.argv[0]
			u += '?url="' + urllib.quote_plus(video_url) + '"'
			u += '&mode="' + SITE + '"'
			u += '&sitemode="play_video"'
			infoLabels={	'title' : episode_name,
							'durationinseconds' : episode_duration,
							'season' : season_number,
							'episode' : episode_number,
							'plot' : episode_plot,
							'premiered' : episode_airdate,
							'MPAA' : episode_MPAA,
							'Genre' : episode_genre,
							'TVShowTitle' : episode_showtitle}
			common.add_video(u, episode_name, episode_thumb, infoLabels = infoLabels, quality_mode = 'list_qualities')
	common.set_view('episodes')
Пример #24
0
def Browse_LatestEpisodes(mmurl):
	url_content=nURL(mainSite); les=re.search(r"(?s)<h4>Latest Episodes</h4>(.+?)<h4", url_content).group(1); les=_addon.unescape(les); les=unescape(les)
	from universal import _common
	les=_common.str_conv(les)
	for le in re.finditer(r'<li.+?class="(.+?)".+?<a.+?href="(.+?)".+?<img.+? src="(.+?)".+?title">(.+?)</div>', les):
		le_typ=le.group(1); le_url=le.group(2); le_img=le.group(3); le_ttl=le.group(4)
		contextLabs={'title':le_ttl,'year':'0000','url':le_url,'img':le_img,'fanart':le_img,'DateAdded':'','type':le_typ}; contextMenuItems=ContextMenu_Episodes(labs=contextLabs)
		try: _addon.add_directory({'mode':'Hosts','site':site,'section':section,'title':le_ttl,'url':le_url,'fanart':le_img,'img':le_img},{'title':cFL_(le_ttl,colors['6'])},is_folder=True,fanart=le_img,img=le_img,contextmenu_items=contextMenuItems)
		except: t=''
	set_view('episodes',int(addst('episode-view'))); eod()
Пример #25
0
def episodes(episode_url = common.args.url):
	""" Add individual episodes. If the URL is a manifest feed, load from JSON, else analyse
	    the HTML of the page """
	if episode_url.endswith('#ManifestFeed'):
		triforceManifestFeed = _get_manifest_feed(episode_url)
		if triforceManifestFeed:
			add_video_from_manifestfile(triforceManifestFeed)
	else:
		episodes_from_html(episode_url)
	common.set_view('episodes')
Пример #26
0
def videos(SITE):
	episode_url = common.args.url
	episode_data = connection.getURL(episode_url)
	episode_tree = simplejson.loads(episode_data)
	for episode_item in episode_tree['series']['playlists']:
		show_name = episode_tree['series']['title']
		episode_item = episode_item['playlist']
		if '|' in episode_item['headline']:
			episode_name = episode_item['headline'].split('|')[-1].strip()
		elif '- ' in episode_item['headline']:
			episode_name = episode_item['headline'].split('- ')[-1].strip()
		else:
			try:
				episode_name = episode_item['headline'].split(':')[1].strip()
			except:
				episode_name = episode_item['headline']
		try:
			episode_info = re.compile('[s|S]([0-9]).[e|E]?([0-9]{0,2}).*').findall(episode_item['title'])
			try:
				episode_season, episode_number = episode_info[0]
			except:
				episode_season = episode_info
				episode_number = -1
		except:
			episode_season = -1
			episode_number = -1
		url = episode_item['id']
		try:
			episode_plot = episode_item['subhead']
		except:
			episode_plot = ''
		episode_thumb = episode_item['image']
		try:
			episode_duration = common.format_seconds(episode_item['duration']['timecode'])
		except:
			continue
		u = sys.argv[0]
		u += '?url="' + urllib.quote_plus(url) + '"'
		u += '&mode="' + SITE + '"'
		u += '&sitemode="play"'
		infoLabels = {	'title' : episode_name,
						'plot' : episode_plot,
						'durationinseconds' : episode_duration,
						'tvshowtitle' : show_name,
						'season' : episode_season,
						'episode' : episode_number }
		try:
			if (episode_item['distributionPolicies'][0]['distributionPolicy']['policyType'] == 'playable'):
				common.add_video(u, episode_name, episode_thumb, infoLabels = infoLabels, quality_mode = 'list_qualities')
		except:
			if (episode_item['distributionPolicies'][0]['policyType'] == 'playable'):
				common.add_video(u, episode_name, episode_thumb, infoLabels = infoLabels, quality_mode = 'list_qualities')
		else:
			pass
	common.set_view('episodes')
Пример #27
0
def seasons(season_url = common.args.url):
	seasons = []
	season_data = connection.getURL(season_url)
	media_list = simplejson.loads(season_data)['FolderList'][0]['PlaylistList'][0]['MediaList']
	for media in media_list:
		season_number = media['Season']
		if season_number not in seasons:
			season_title = 'Season %s' % season_number
			seasons.append(season_number)
			common.add_directory(season_title,  SITE, 'episodes', season_url + '#' + season_number)
	common.set_view('seasons')
Пример #28
0
def seasons(season_url = common.args.url):
	season_data = connection.getURL(season_url)
	season_menu = BeautifulSoup(season_data, 'html.parser').find('a', class_ = 'full_episodes')
	season_menu2 = BeautifulSoup(season_data, 'html.parser').find('a', class_ = 'video_clips')
	if season_menu is not None:
		season_url2 = BASE + season_menu['href']
		common.add_directory('Full Episodes',  SITE, 'episodes', season_url2)
	if season_menu2 is not None:
		season_url3 = BASE + season_menu2['href']
		common.add_directory('Clips',  SITE, 'episodes', season_url3)
	common.set_view('seasons')
Пример #29
0
def episodes(episode_url = common.args.url):
	episode_data = connection.getURL(episode_url)
	episode_tree = BeautifulSoup(episode_data, 'html.parser')
	add_videos(episode_tree.find('ul', class_ = 'large-grid-list'))
	pagedata = episode_tree.find('span', class_ = 'pagination-next')
	if pagedata:
		try:
			episodes(episode_url.split('?')[0] + pagedata.a['href'] + '&type=' + episode_url.rsplit('=', 1)[1])
		except:
			pass
	common.set_view('episodes')	
Пример #30
0
def Browse_Itemdragon(html, url, metamethod='', content='movies', view='515'):
    if (len(html) == 0):
        return
    html = GetDataBeetwenMarkers(html, '<ul class="lista-odcinkow">',
                                 '<div class="kontener">')[1]
    data = re.findall('<li>(.+?)</li>\n<li><a href="(.+?)">(.+?)</a>', html)
    ItemCount = len(data)
    for item in data:
        strona = mainSite + item[1]
        name = item[0] + " - " + item[2]
        name = name.encode("utf-8")
        img = ''
        plot = ''
        fanart = fanartAol
        labs = {}
        try:
            labs['plot'] = plot
        except:
            labs['plot'] = ''


###
        pars = {
            'mode': 'Playdragon',
            'site': site,
            'section': section,
            'title': name,
            'url': strona,
            'img': img,
            'fanart': fanart
        }
        contextLabs = {
            'title': name,
            'url': strona,
            'img': img,
            'fanart': fanart,
            'todoparams': _addon.build_plugin_url(pars),
            'site': site,
            'section': section,
            'plot': labs['plot']
        }
        if section == 'dragonball':
            contextMenuItems = ContextMenu_Series(contextLabs)
        else:
            contextMenuItems = []
        labs['title'] = name
        _addon.add_directory(pars,
                             labs,
                             is_folder=False,
                             fanart=fanart,
                             img=img,
                             contextmenu_items=contextMenuItems,
                             total_items=ItemCount)
    set_view(content, view_mode=addst('tvshows-view'))
def Browse_Itemscen(html, metamethod='', content='tvshows', view='515'):
    if (len(html) == 0):
        return
    r = re.compile('<a href="http://anime-centrum.net/(.+?)" class="tip-(.+?) tip-style-2"').findall(html)
    ItemCount = len(r)
    if len(r) > 0:
        for _url, _x in r:
            strona = 'http://anime-centrum.net/' + _url
            html2 = nURL(strona)
#  nazwa
            tytul = re.compile('<meta property="og:title" content="(.+?)" />').findall(html2)
            ItemCount = len(tytul)
            if len(tytul) > 0:
                for _tytul in tytul:
                    _name2 = _tytul.encode('utf-8')
#  grafika
            image = re.compile('<meta property="og:image" content="(.+?)" />').findall(html2)
            ItemCount = len(image)
            if len(image) > 0:
                for foto in image:
                    img = foto
            else:
                img = ""
#  fanart
            image3 = re.compile('http:(.+?).jpg').findall(html2)
            ItemCount = len(image3)
            if len(image3) > 0:
                for foto3 in image3:
                    fanart = "http:" + foto3 + ".jpg"
            else:
                    fanart = img
#  opis
            opis = re.compile('<strong>Opis:</strong>(.+)').findall(html2)
            ItemCount = len(opis)
            if len(opis) > 0:
                for desc in opis:
                    plot = desc
            labs = {}
            try:
                labs['plot'] = plot
            except:
                labs['plot'] = ''
###
            pars = {'mode': 'EpisodesCentrum', 'site': site, 'section': section, 'title': _name2, 'url': strona, 'img': img, 'fanart': fanart}
            contextLabs = {'title': _name2, 'url': strona, 'img': img, 'fanart': fanart, 'todoparams': _addon.build_plugin_url(pars), 'site': site, 'section': section, 'plot': labs['plot']}
            if   section == 'movie':
                contextMenuItems = ContextMenu_Movies(contextLabs)
            elif section == 'animecentrum':
                contextMenuItems = ContextMenu_Series(contextLabs)
            else:
                contextMenuItems = []
            labs['title'] = _name2
            _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount)
    set_view(content, view_mode=addst('tvshows-view'))
Пример #32
0
def seasons(show_name = common.args.url):
	for type in TYPES:
		season_data = connection.getURL(SEASON % (show_name, type))
		season_menu = simplejson.loads(season_data)
		try:
			season_count = int(season_menu['matched'])
		except:
			season_count = 0
		if season_count > 0:
			common.add_directory(type + 's',  SITE, 'episodes', EPISODES % (show_name, type))
		common.set_view('seasons')
def Browse_AZ():
###Diff-Anime###
    if section == 'diffanime':
        tUrl = mainSite + 'lista-anime'
        _addon.add_directory({'mode': 'Page', 'site': site, 'section': section, 'url': tUrl + '?letter=0&rowstart=00'}, {'title': '#'}, is_folder=True, fanart=fanartSite, img=addonPath + '/art/znak.png')
        for az in MyAlphabet:
            _addon.add_directory({'mode': 'Page', 'site': site, 'section': section, 'url': tUrl + '?letter='+ az + '&rowstart=00' }, {'title': az}, is_folder=True, fanart=fanartSite, img=addonPath + '/art/'+ az +'.png')
        if (len(addst('username' '')) == 0) or (len(addst('password', '')) == 0):
            d = xbmcgui.Dialog()
            d.ok('Komunikat', "Musisz się zalogować, aby móc oglądać odcinki.")
    set_view('list', view_mode=addst('default-view'))
    eod()
Пример #34
0
def Browse_Episodes4fun(url, page='', content='episodes', view='515'):
    if url == '':
        return
    html = nURL(url)
    html = GetDataBeetwenMarkers(html, '<div class="list_episode">',
                                 '</section>', False)[1]
    data = re.findall('<a href="(.+?)" title="(.+?)">', html)
    ItemCount = len(data)
    for item in data:
        strona = item[0]
        name = item[1]
        plot = ''
        img = ''
        labs = {}
        try:
            labs['plot'] = plot
        except:
            labs['plot'] = ''


###
        contextLabs = {
            'title': name,
            'year': '0000',
            'url': strona,
            'img': img,
            'fanart': fanart,
            'DateAdded': '',
            'plot': labs['plot']
        }
        contextMenuItems = ContextMenu_Episodes(labs=contextLabs)
        pars = {
            'mode': 'PlayAnime4fun',
            'site': site,
            'section': section,
            'title': name,
            'url': strona,
            'img': img,
            'fanart': fanart
        }
        labs['title'] = name
        _addon.add_directory(pars,
                             labs,
                             is_folder=False,
                             fanart=fanart,
                             img=img,
                             contextmenu_items=contextMenuItems,
                             total_items=ItemCount)
    set_view(content, view_mode=addst('links-view'))
    eod()
Пример #35
0
def Browse_ItemRecenzje(html, metamethod='', content='tvshows', view='515'):
    if (len(html) == 0):
        return
    html = GetDataBeetwenMarkers(html, '<div class="yt-lockup-content">',
                                 '<span class="yt-spinner">', False)[1]
    data = re.findall(
        'href="(.+?)">(.+?)</a><span class="accessible-description"', html)
    ItemCount = len(data)
    for item in data:
        strona = 'plugin://plugin.video.youtube/?action=play_video&videoid=%s' % item[
            0].replace('/watch?v=', '')
        name = item[1].encode("ascii", 'replace')
        fanart = fanartAol
        img = 'https://i.ytimg.com/vi_webp/' + item[0].replace(
            '/watch?v=', '') + '/mqdefault.webp'
        plot = ''
        labs = {}
        try:
            labs['plot'] = plot
        except:
            labs['plot'] = ''
        contextLabs = {
            'title': name,
            'year': '0000',
            'url': strona,
            'img': img,
            'fanart': fanart,
            'DateAdded': '',
            'plot': labs['plot']
        }
        contextMenuItems = ContextMenu_Episodes(labs=contextLabs)
        pars = {
            'mode': 'PlayFromHost',
            'site': site,
            'section': section,
            'title': name,
            'url': strona,
            'img': img,
            'fanart': fanart
        }
        labs['title'] = name
        _addon.add_directory(pars,
                             labs,
                             is_folder=False,
                             fanart=fanart,
                             img=img,
                             contextmenu_items=contextMenuItems,
                             total_items=ItemCount)
    set_view(content, int(addst('links-view')))
    eod()
Пример #36
0
def Browse_Aktualnosci(html, metamethod='', content='tvshows', view='515'):
    if (len(html) == 0):
        return
    r = re.compile("<div class='head'><h2><a href='/news/(.+?)'>(.+?)</a>").findall(html)
    ItemCount = len(r)
    if len(r) > 0:
        for _url, _name in r:
            strona = 'http://diff-anime.pl' + '/news/' + _url
            html2 = nURL(strona)
            _name2 = clean_html(_name)
            _name2 = _name.replace(";", "")
#
            idx = html2.find("class='news-category' />")
            if idx == -1:
                return
            idx2 = html2.find("</div>", idx)
            if idx2 == -1:
                return
            plot = html2[idx:idx2]
            plot = clean_html(plot)
            plot = plot.replace("class='news-category' />", "")

#
            image = re.compile("<div class='content'><img src='(.+?)' alt='(.+?)' class='news-category' />(.+?).<br />").findall(html2)
            ItemCount = len(image)
            if len(image) > 0:
                for foto, plot1, plot2 in image:
                    img = "http://diff-anime.pl" + foto
            fanart = fanartSite
            labs = {}
            try:
                labs['plot'] = plot
            except:
                labs['plot'] = ''
# YOUTUBE LINK
            _link = re.compile('src="//www.youtube.com/embed/(.+?)"').findall(html2)
            ItemCount = len(_link)
            if len(_link) > 0:
                for link in _link:
                    _url = 'plugin://plugin.video.youtube/?action=play_video&videoid=%s' % link
            contextLabs = {'title': _name2, 'year': '0000', 'url': strona, 'img': img, 'fanart': fanart, 'DateAdded': '', 'plot': labs['plot']}
            contextMenuItems = ContextMenu_Episodes(labs=contextLabs)
            pars = {'mode': 'PlayFromHost', 'site': site, 'section': section, 'title': _name2, 'url': _url, 'img': img, 'fanart': fanart}
            labs['title'] = _name2
            _addon.add_directory(pars, labs, is_folder=False, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount)
    set_view(content, int(addst('tvshows-view')))
    eod()
Пример #37
0
def Browse_Version(url, page='', content='episodes', view='515'):
    if url == '':
        return
    html = nURL(url)
    html = GetDataBeetwenMarkers(html, "<div class='version-list'>", "</ul>", False)[1]
    data = re.findall("<li><a href='(.+?)'>(.+?)</a><li>", html)
    ItemCount = len(data)
    for item in data:
        url = mainSite + item[0].replace('http://animeon.pl/', '')
        print url
        name = item[1]
        print name
        fanart = fanartAol
        plot = ""
        labs = {}
        try:
            labs['plot'] = plot
        except:
            labs['plot'] = ''
        html = nURL(url)
        html = GetDataBeetwenMarkers(html, "<div class='float-left player-container'>", "</div>", False)[1]
        data = re.findall("<iframe src='(.+?)' allowfullscreen", html)
        for item in data:
            html = nURL(item)
            data = re.findall("src='(.+?)'", html)
            for item in data:
                url2 = item
                print url2
                if ('video.sibnet.ru' in url2):
                        url2 = url2.replace('swf', 'php')
                elif ('archive.org' in url2):
                        url2 = url2.replace('http:', '')
                        url2 = 'http:' + url2
                elif ('animeon.com.pl/episodes/players/vk.php' in url2):
                        html = nURL(url2)
                        data = re.findall("src='(.+?)'", html)
                        for item in data:
                            url2 = item
###
            contextLabs = {'title': name, 'year': '0000', 'url': url, 'fanart': fanart, 'DateAdded': ''}
            contextMenuItems = ContextMenu_Episodes(labs=contextLabs)
            pars = {'mode': 'PlayFromHost', 'site': site, 'section': section, 'title': name, 'url': url2, 'fanart': fanart}
            labs['title'] = name
            _addon.add_directory(pars, labs, is_folder=False, fanart=fanart, contextmenu_items=contextMenuItems, total_items=ItemCount)
    set_view(content, int(addst('tvshows-view')))
    eod()
Пример #38
0
def Browse_GenreShniden(url, content='episodes'):
    if url == '':
        return
    html = nURL(url)
    r = re.compile(
        '<input id=".+?"  type="checkbox" name="genre.." value="(.+?)">\n(.+?)</label'
    ).findall(html)
    ItemCount = len(r)
    if len(r) > 0:
        for title, xx in r:
            _url = mainSite5 + 'animelist/index.php?genre[]=' + title
            _name = title
            print _name
            _title = _name
            img = iconShniden
            labs = {}
            strona = _url
            contextLabs = {
                'title': _name,
                'year': '0000',
                'url': _url,
                'img': img,
                'fanart': fanartAol,
                'DateAdded': ''
            }
            contextMenuItems = ContextMenu_Episodes(labs=contextLabs)
            pars = {
                'mode': 'Pageshniden',
                'site': site,
                'section': section,
                'title': _name,
                'url': strona,
                'fanart': fanartAol
            }
            labs['title'] = _title
            _addon.add_directory(pars,
                                 labs,
                                 is_folder=True,
                                 fanart=fanartAol,
                                 img=img,
                                 contextmenu_items=contextMenuItems,
                                 total_items=ItemCount)
    set_view(content, int(addst('links-view')))
    eod()
Пример #39
0
def Pageshniden(url, nmr,  metamethod='', content='tvshows', view='515'):
    if 'search' in url:
        from common import getUserInput
        url = url + getUserInput('Szukaj','')
        html = nURL(url.replace(' ', '+'))
    else:
        html = nURL(url + nmr)
    html = GetDataBeetwenMarkers(html, 'data-view-table-cover', '<nav class="pagination">', False)[1]
    html = html.replace('\r\n', '')
    html = html.replace(' ', '')
    data = re.findall('src="(.+?)"/></td><tdclass="desc-col"><h3><ahref="(.+?)">(.+?)</a></h3>', html)
    ItemCount = len(data)
    for item in data:
        strona = mainSite5 + item[1]
        name = item[2].encode("utf-8")
        img = (mainSite5 + item[0]).replace('/resources/images/100x100/','/resources/images/genuine/')
        img = img.replace('100x100', '225x350')
        plot = ''
        fanart = fanartAol
        labs = {}
        try:
            labs['plot'] = plot
        except:
            labs['plot'] = ''
###
        pars = {'mode': 'EpisodesShniden', 'site': site, 'section': section, 'title': name, 'url': strona, 'img': img, 'fanart': fanart}
        contextLabs = {'title': name, 'url': strona, 'img': img, 'fanart': fanart, 'todoparams': _addon.build_plugin_url(pars), 'site': site, 'section': section, 'plot': labs['plot']}
        if   section == 'movie':
            contextMenuItems = ContextMenu_Movies(contextLabs)
        elif section == 'shnidenodc':
            contextMenuItems = ContextMenu_Series(contextLabs)
        else:
            contextMenuItems = []
        labs['title'] = name
        _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount)
    if not 'search' in url:
        npage = str(int(nmr) + 1)
        print npage
#    if -1 != html.find("do strony "):
        _addon.add_directory({'mode': 'Pageshniden', 'site': site, 'section': section, 'url': url, 'page': url, 'nmr': npage}, {'title': "Next page"}, is_folder=True, fanart=fanartAol, img=nexticon)
    set_view(content, view_mode=addst('links-view'))
    eod()
def SectionMenu():
###Anime-Centrum###
        _addon.add_directory({'mode': 'SubMenu', 'site': site, 'section': 'animecentrum'}, {'title': cFL('Anime-Centrum', 'blue')}, is_folder=True, fanart=fanartSiteCentrum, img=iconCentrum)
###Diff-Anime###
#        _addon.add_directory({'mode': 'SubMenu', 'site': site, 'section': 'diffanime'}, {'title': cFL('Diff-Anime', 'blue')}, is_folder=True, fanart=fanartSite, img=iconDiff)
###DramaQueen###
#        _addon.add_directory({'mode': 'SubMenu', 'site': site, 'section': 'DramaQueen'}, {'title': cFL('DramaQueen', 'blue')}, is_folder=True, fanart=fanartDrama, img=iconSite)
###Anime-Online###
        _addon.add_directory({'mode': 'SubMenu', 'site': site, 'section': 'animeonline'}, {'title': cFL('Anime-Odcinki', 'blue')}, is_folder=True, fanart=fanartAol, img=iconOdcinki)
###Anime-Shinden###
        _addon.add_directory({'mode': 'SubMenu', 'site': site, 'section': 'animeshinden'}, {'title': cFL('Anime-Shinden', 'blue')}, is_folder=True, fanart=fanartAol, img=iconShniden)
###Anime-On###
        _addon.add_directory({'mode': 'SubMenu', 'site': site, 'section': 'animeon'}, {'title': cFL('Anime-On', 'blue')}, is_folder=True, fanart=fanartAol, img=iconAnimeon)
###Ulubione###
        _addon.add_directory({'mode': 'FavoritesList', 'site': site, 'section': ''}, {'title': cFL_(ps('WhatRFavsCalled') + addst('fav.tv.1.name'), ps('cFL_color3'))}, fanart=fanartIPTV, img=iconFavs)
        _addon.add_directory({'mode': 'FavoritesList', 'site': site, 'section': '', 'subfav': '2'}, {'title': cFL_(ps('WhatRFavsCalled') + addst('fav.tv.2.name'), ps('cFL_color3'))}, fanart=fanartIPTV, img=iconFavs)
        _addon.add_directory({'mode': 'FavoritesList', 'site': site, 'section': '', 'subfav': '3'}, {'title': cFL_(ps('WhatRFavsCalled') + addst('fav.tv.3.name'), ps('cFL_color3'))}, fanart=fanartIPTV, img=iconFavs)
        _addon.add_directory({'mode': 'FavoritesList', 'site': site, 'section': '', 'subfav': '4'}, {'title': cFL_(ps('WhatRFavsCalled') + addst('fav.tv.4.name'), ps('cFL_color3'))}, fanart=fanartIPTV, img=iconFavs)
        set_view('list', view_mode=addst('default-view'))
        eod()
Пример #41
0
def Browse_PlayShniden(url, page='', content='episodes', view='515'):
    if url == '':
        return
    html = nURL(url)
    r = re.compile('{"online_id":"(.+?)","player":"(.+?)"').findall(html)
    ItemCount = len(r)
    if len(r) > 0:
        for  _url, player in r:
            url = _url
            strona = url
            _name = player
            _title = player
            fanart = fanartAol
            labs = {}
            img=''
            contextLabs = {'title': _name, 'year': '0000', 'url': _url, 'img': img, 'fanart': fanart, 'DateAdded': '', 'plot': ''}
            contextMenuItems = ContextMenu_Episodes(labs=contextLabs)
            pars = {'mode': 'PlayShniden2', 'site': site, 'section': section, 'title': _name, 'url': strona, 'img': img, 'fanart': fanart}
            labs['title'] = _title
            _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount)
    set_view(content, int(addst('links-view')))
    eod()
Пример #42
0
def Browse_PlayShniden(url, page='', content='episodes', view='515'):
    if url == '':
        return
    html = nURL(url)
    idx = html.find('<!-- tab containers -->')
    if idx == -1:
        return
    idx2 = html.find("<script>", idx)
    if idx2 == -1:
        return
    data = html[idx:idx2]
    r = re.compile("flashvars=.+?hd\.file=(.+?)&").findall(data)
    ItemCount = len(r)
    if len(r) > 0:
        for _url in r:
            url = _url
    r = re.compile('<iframe src="http://(.+?)"').findall(data)
    ItemCount = len(r)
    if len(r) > 0:
        for _url in r:
            url = _url
    r = re.compile('<embed src="http://(.+?) quality="high" ').findall(data)
    ItemCount = len(r)
    if len(r) > 0:
        for _url in r:
            url = _url
    r = re.compile('flashvars="streamer=(.+?)"').findall(data)
    ItemCount = len(r)
    if len(r) > 0:
        for _url in r:
            url = _url
    r = re.compile('src="(.+?)"').findall(data)
    ItemCount = len(r)
    if len(r) > 0:
        for _url in r:
            print _url
            url = _url
            print url
            if ('vk' in url):
                _name = 'VK'
            elif ('google' in url):
                _name = 'Google video'
            elif ('video.sibnet.ru' in url):
                url = url.replace('swf', 'php')
                print url
                _name = 'Sibnet.ru'
            elif ('mp4upload.com' in url):
                _name = 'Mp4upload'
            elif ('dailymotion' in url):
                _name = 'Dailymotion'
            elif ('tune.pk' in url):
                _name = 'Tune'
            elif ('archive.org' in url):
                _name = 'Archive'
                url = url.replace('http:', '')
                url = 'http:' + url
            elif ('www.wrzuta.pl' in url):
                _name = 'Wrzuta'
            elif ('http://myvi.ru/' in url):
                _name = 'Myvi.ru - brak obsługi'
            elif ('anime-shinden.info/player' in url):
                _name = 'AnimeShniden player'
            elif ('peteava.ro' in url):
                _name = 'Peteava'
            elif ('vplay.ro' in url):
                _name = 'Vplay'
            else:
                _name = 'Inny Host'
            fanart = fanartAol
            labs = {}
            contextLabs = {
                'title': _name,
                'year': '0000',
                'url': url,
                'fanart': fanart,
                'DateAdded': ''
            }
            contextMenuItems = ContextMenu_Episodes(labs=contextLabs)
            pars = {
                'mode': 'PlayFromHost',
                'site': site,
                'section': section,
                'title': _name,
                'url': url,
                'fanart': fanart
            }
            labs['title'] = _name
            _addon.add_directory(pars,
                                 labs,
                                 is_folder=False,
                                 fanart=fanart,
                                 contextmenu_items=contextMenuItems,
                                 total_items=ItemCount)
    set_view(content, int(addst('tvshows-view')))
    eod()
Пример #43
0
def Browse_EpisodesShniden(url, page='', content='episodes', view='515'):
    if url == '':
        return
    html = nURL(url)
    idx = html.find('><div id="news-id')
    if idx == -1:
        return
    idx2 = html.find('</td>', idx)
    if idx2 == -1:
        return
    htmllink = html[idx:idx2]
    r = re.compile('<a href=".+?(/.+?.html)".+?>(?:<b>)*(.+?)(?:</b>)*</a>'
                   ).findall(htmllink)
    ItemCount = len(r)
    if len(r) > 0:
        for _url, _tytul in r:
            _name = _tytul
            _url = 'http:' + _url
            _title = _name
            #            image = re.compile("<img src='http://(.+?)' style='margin:2px").findall(html)
            #           ItemCount = len(image)
            ##             for foto in image:
            #                img = "http://" + foto
            #       else:
            #          image = re.compile("<img src='(.+?)' style='margin:").findall(html)
            #         ItemCount = len(image)
            ####     else:
            #        img = ""
            img = ''
            fanart = fanartAol
            #            opis = re.compile('<font face="Trebuchet MS">(.+?)</font>').findall(html)
            #            ItemCount = len(opis)
            #            if len(opis) > 0:
            #                for desc in opis:
            plot = ""
            strona = _url
            labs = {}
            try:
                labs['plot'] = plot
            except:
                labs['plot'] = ''


###
            contextLabs = {
                'title': _name,
                'year': '0000',
                'url': _url,
                'img': img,
                'fanart': fanart,
                'DateAdded': '',
                'plot': labs['plot']
            }
            contextMenuItems = ContextMenu_Episodes(labs=contextLabs)
            pars = {
                'mode': 'PlayShniden',
                'site': site,
                'section': section,
                'title': _name,
                'url': strona,
                'img': img,
                'fanart': fanart
            }
            labs['title'] = _title
            _addon.add_directory(pars,
                                 labs,
                                 is_folder=True,
                                 fanart=fanart,
                                 img=img,
                                 contextmenu_items=contextMenuItems,
                                 total_items=ItemCount)
    set_view(content, int(addst('links-view')))
    eod()
def Browse_Itemscen(html, metamethod='', content='tvshows', view='515'):
    if (len(html) == 0):
        return
    r = re.compile(
        '<a href="http://anime-centrum.net/(.+?)" class="tip-(.+?) tip-style-2"'
    ).findall(html)
    ItemCount = len(r)
    if len(r) > 0:
        for _url, _x in r:
            strona = 'http://anime-centrum.net/' + _url
            html2 = nURL(strona)
            #  nazwa
            tytul = re.compile(
                '<meta property="og:title" content="(.+?)" />').findall(html2)
            ItemCount = len(tytul)
            if len(tytul) > 0:
                for _tytul in tytul:
                    _name2 = _tytul.encode('utf-8')
#  grafika
            image = re.compile(
                '<meta property="og:image" content="(.+?)" />').findall(html2)
            ItemCount = len(image)
            if len(image) > 0:
                for foto in image:
                    img = foto
            else:
                img = ""
#  fanart
            image3 = re.compile('http:(.+?).jpg').findall(html2)
            ItemCount = len(image3)
            if len(image3) > 0:
                for foto3 in image3:
                    fanart = "http:" + foto3 + ".jpg"
            else:
                fanart = img
#  opis
            opis = re.compile('<strong>Opis:</strong>(.+)').findall(html2)
            ItemCount = len(opis)
            if len(opis) > 0:
                for desc in opis:
                    plot = desc
            labs = {}
            try:
                labs['plot'] = plot
            except:
                labs['plot'] = ''


###
            pars = {
                'mode': 'EpisodesCentrum',
                'site': site,
                'section': section,
                'title': _name2,
                'url': strona,
                'img': img,
                'fanart': fanart
            }
            contextLabs = {
                'title': _name2,
                'url': strona,
                'img': img,
                'fanart': fanart,
                'todoparams': _addon.build_plugin_url(pars),
                'site': site,
                'section': section,
                'plot': labs['plot']
            }
            if section == 'movie':
                contextMenuItems = ContextMenu_Movies(contextLabs)
            elif section == 'animecentrum':
                contextMenuItems = ContextMenu_Series(contextLabs)
            else:
                contextMenuItems = []
            labs['title'] = _name2
            _addon.add_directory(pars,
                                 labs,
                                 is_folder=True,
                                 fanart=fanart,
                                 img=img,
                                 contextmenu_items=contextMenuItems,
                                 total_items=ItemCount)
    set_view(content, view_mode=addst('tvshows-view'))
def Browse_EpisodesCentrum(url, page='', content='episodes', view='515'):
    if url == '':
        return
    html = nURL(url)
    html = messupText(html, True, True)
    s = '<a href="http://(.+?)">Odcinek(.+?)</a>'
    matches = re.compile(s).findall(html)
    ItemCount = len(matches)
    if ItemCount > 0:
        for _url, _nazwa in matches:
            _url2 = 'http://' + urllib.quote(_url)
            _name = 'Odcinek' + _nazwa
            _title = '' + cFL_(_name)
            #  grafika
            image = re.compile(
                '<meta property="og:image" content="(.+?)" />').findall(html)
            ItemCount = len(image)
            if len(image) > 0:
                for foto in image:
                    img = foto
            else:
                img = ""
#  fanart
            image2 = re.compile(
                '<!--dle_image_begin:(.+?)</a><!--dle_image_end-->').findall(
                    html)
            ItemCount = len(image2)
            if len(image2) > 0:
                for foto2 in image2:
                    strona2 = foto2
                    image3 = re.compile('http:(.+?).jpg').findall(strona2)
                    ItemCount = len(image3)
                    if len(image3) > 0:
                        for foto3 in image3:
                            fanart = "http:" + foto3 + ".jpg"
                    else:
                        fanart = img
#  opis
            opis = re.compile('<strong>Opis:</strong>(.+)').findall(html)
            ItemCount = len(opis)
            if len(opis) > 0:
                for desc in opis:
                    plot = desc
            labs = {}
            try:
                labs['plot'] = plot
            except:
                labs['plot'] = ''
#  wyciąganie linku do mp4
            html2 = nURL(_url2)
            _link = re.compile('file: "(.+?)"').findall(html2)
            #            _link = re.compile("<source src='(.+?)' type='video/mp4' />").findall(html2)
            ItemCount = len(_link)
            if len(_link) > 0:
                for link in _link:
                    strona = link.replace(' ', '%20')
###
            contextLabs = {
                'title': _name,
                'year': '0000',
                'url': _url2,
                'img': img,
                'fanart': fanart,
                'DateAdded': '',
                'plot': labs['plot']
            }
            contextMenuItems = ContextMenu_Episodes(labs=contextLabs)
            pars = {
                'mode': 'PlayFromHost',
                'site': site,
                'section': section,
                'title': _name,
                'url': strona,
                'img': img,
                'fanart': fanart
            }
            labs['title'] = _title
            _addon.add_directory(pars,
                                 labs,
                                 is_folder=False,
                                 fanart=fanart,
                                 img=img,
                                 contextmenu_items=contextMenuItems,
                                 total_items=ItemCount)
    set_view(content, int(addst('links-view')))
    eod()
def Browse_ItemAnimecentrum(html, url, metamethod='', content='movies', view='515'):
    if (len(html) == 0):
        return
    data = re.findall('tb-cell"><a href="(.+?)"><img src="(.+?)" alt="(.+?)"><\/a>', html)
    ItemCount = len(data)
    for item in data:
        strona = mainSite + item[0]
        strona = strona + '?page=1'
        name = item[2].encode("utf-8")
        name = ParseDescription(name)
### scraper
        if (tfalse(addst("acentr-thumbs")) == True):
            import scraper
            scrap = scraper.scraper_check(host, name)
            try:
                if (name not in scrap):
                    html = nURL(strona)
                    html = GetDataBeetwenMarkers(html, '<article class="content-1">', '<section class="gap-2">', False)[1]
                    data = re.findall('<img src="(.+?)" alt=', html)
                    ItemCount = len(data)
                    if len(data) > 0:
                        for item in data:
                            img = item+ "|Referer=http://anime-centrum.pl/"
                    else:
                        img = ''
                    data = re.findall('<p>(.+?)</p>', html)
                    ItemCount = len(data)
                    if len(data) > 0:
                        for item in data:
                            plot = item
                            plot = ParseDescription(item)
                    else:
                        plot = ''
                    scraper.scraper_add(host, name, img, plot, '')
                    scrap = scraper.scraper_check(host, name)
            except:
                scrap = ''
            try:
                img = scrap[1]
            except:
                img = ''
            try:
                plot = scrap[2]
            except:
                plot = ''
        else:
            img = ''
            plot = ''
        fanart = fanartAol
        labs = {}
        try:
            labs['plot'] = plot
        except:
            labs['plot'] = ''
###
        pars = {'mode': 'EpisodesAnimecentrum', 'site': site, 'section': section, 'title': name, 'url': strona, 'img': img, 'fanart': fanart}
        contextLabs = {'title': name, 'url': strona, 'img': img, 'fanart': fanart, 'todoparams': _addon.build_plugin_url(pars), 'site': site, 'section': section, 'plot': labs['plot']}
        if section == 'animecentrum':
            contextMenuItems = ContextMenu_Series(contextLabs)
        else:
            contextMenuItems = []
        labs['title'] = name
        _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount)
    set_view(content, view_mode=addst('tvshows-view'))
Пример #47
0
def Browse_EpisodesDiff(url, page='', content='episodes', view='515'):
    html = gethtml.get(url + '/odcinki', addonPath)
    htmlplot = gethtml.get(url , addonPath)
    html = messupText(html, ciastko, True, True)
    s = "#(.+?)</div><div class=.+?</div><div class='con3'><a href='(.+?)' class='i'>"
    matches = re.compile(s).findall(html)
    ItemCount = len(matches)
    if ItemCount > 0:
        for  _nazwa, _url in matches:
            _url2 = 'http://diff-anime.pl' + _url
            _name = 'Odcinek' + _nazwa
            _title = '' + _name
#  grafika
            image = re.compile("</div><div class='content'><div class='con'><a href='(.+?)' class='fbox'>").findall(html)
            ItemCount = len(image)
            if len(image) > 0:
                for foto in image:
                    img = "http://diff-anime.pl" + foto
            else:
                    img = ""
#  fanart
            if "Nie dodano kadrów do tej serii." in html:
                fanart = fanartSite
            else:
                image2 = re.compile("<h2>Kadry</h2></div><div class='content'><a href='(.+?)' class='fbox'>").findall(html)
                ItemCount = len(image)
                if len(image) > 0:
                    for _fanart in image2:
                        fanart = "http://diff-anime.pl" + _fanart
                else:
                        fanart = img
#  opis
            opis = re.compile("<h2>Opis anime</h2></div><div class='content'><div class='con'>(.+?)</div>").findall(htmlplot)
            ItemCount = len(opis)
            if len(opis) > 0:
                for desc in opis:
                    plot = unicode(desc,"utf-8")
            else:
                    opis = re.compile("<h2>Opis anime</h2></div><div class='content'><div class='con'>(.+?)<").findall(htmlplot)
                    ItemCount = len(opis)
                    if len(opis) > 0:
                        for desc in opis:
                            plot = unicode(desc,"utf-8")
                    else:
                            opis = re.compile("<div id='pDesc' class='panel'><div class='head'><h2>Opis anime</h2></div><div class='content'><div class='con'>(.+?)<br />").findall(htmlplot)
                            ItemCount = len(opis)
                            if len(opis) > 0:
                                for desc in opis:
                                    plot = unicode(desc,"utf-8")
                            else:
                                    plot = ""
            labs = {}
            try:
                labs['plot'] = plot
            except:
                labs['plot'] = ''
#  wyciąganie linku do mp4
            html2 = gethtml.get(_url2, addonPath)
            _link = re.compile("'file': '(.+?)',").findall(html2)
            ItemCount = len(_link)
            if len(_link) > 0:
                for link in _link:
                    strona = link.replace(' ', '%20')
###
            contextLabs = {'title': _name, 'year': '0000', 'url': _url2, 'img': img, 'fanart': fanart, 'DateAdded': '', 'plot': labs['plot']}
            contextMenuItems = ContextMenu_Episodes(labs=contextLabs)
            pars = {'mode': 'PlayFromHost', 'site': site, 'section': section, 'title': _name, 'url': strona, 'img': img, 'fanart': fanart}
            labs['title'] = _title
            _addon.add_directory(pars, labs, is_folder=False, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount)
    set_view(content, int(addst('links-view')))
    eod()
Пример #48
0
def Browse_Itemslist(url, page='', content='episodes', view='515'):

    data = 'http://www.inne.wbijam.pl/'
    html = nURL(data)
    html = html.encode('utf-8', '')
    if 'Polecane serie anime' in url:
        data1 = '<div class="pmenu_naglowek_blue">Polecane serie anime</div>'
        data2 = '<a href="http://www.inne.wb'
        link = ''
        mode = 'Browse_Episodeswijam'
    elif 'Lżejsze klimaty' in url:
        data1 = '<div class="pmenu_naglowek_red">Lżejsze klimaty</div>'
        data2 = '<div class="pmenu_naglowek_blue">Polecane serie anime</div>'
        link = 'http://www.inne.wbijam.pl/'
        mode = 'Browse_Episodeswijaminne'
    elif 'Akcja' in url:
        data1 = '<div class="pmenu_naglowek_red">Akcja</div>'
        data2 = '<div class="pmenu_naglowek_red">Lżejsze klimaty</div>'
        link = 'http://www.inne.wbijam.pl/'
        mode = 'Browse_Episodeswijaminne'

    data = GetDataBeetwenMarkers(html, data1, data2, False)[1]
    data = re.findall('<a href="(.+?)">(.+?)</a></li>', data)
    data.sort()
    ItemCount = len(data)
    if len(data) > 0:
        for item in data:
            strona = link + item[0]
            name = item[1].encode('utf-8', '')
### scrper
            if (tfalse(addst("wbij-thumbs")) == True):
                import scraper
                scrap = scraper.scraper_check(host, name)
                try:
                    if (name not in scrap):
                        html = nURL(strona)
                        data = re.findall('<img src="grafika/(.+?)">', html)
                        ItemCount = len(data)
                        if len(data) > 0:
                            for item in data:
                                img = url + '/grafika/' + item

                        else:
                            img = ''
                        plot = ''
                        scraper.scraper_add(host, name, img, plot, '')
                        scrap = scraper.scraper_check(host, name)
                except:
                    scrap = ''
                try:
                    img = scrap[1]
                except:
                    img = ''
                try:
                    plot = scrap[2]
                except:
                    plot = ''
            else:
                img = ''
                plot = ''
            fanart = fanartAol
            labs = {}
            try:
                labs['plot'] = plot
            except:
                labs['plot'] = ''
    ##
            pars = {'mode': mode, 'site': site, 'section': section, 'title': name, 'url': strona, 'page': url, 'img': img, 'fanart': fanart}
            contextLabs = {'title': name, 'url': strona, 'img': img, 'fanart': fanart, 'todoparams': _addon.build_plugin_url(pars), 'site': site, 'section': section, 'plot': labs['plot']}
            contextMenuItems = ContextMenu_Series(contextLabs)
            labs['title'] = name
            _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount)
    set_view(content, view_mode=addst('tvshows-view'))
    eod()
Пример #49
0
def Browse_ItemAol(html, url, metamethod='', content='movies', view='515'):
    if (len(html) == 0):
        return
    html = html.replace('\'', '')
    html = html.replace('\n', '')
    html = html.replace('\r', '')
    html = html.replace('\t', '')
    html = GetDataBeetwenMarkers(html, 'zaczynaj', '<header>Kresk', False)[1]
    #    print html.encode('ascii','ignore')
    data = re.findall(
        'href="/kreskowka/(.+?)"><div(.+?)<b class="larger white">(.+?)</b>',
        html)
    ItemCount = len(data)
    for item in data:
        strona = mainSite + '/kreskowka/' + item[0]
        name = item[2].encode("utf-8")
        name = ParseDescription(name)
        ### scraper
        if (tfalse(addst("kresk-thumbs")) == True):
            import scraper
            scrap = scraper.scraper_check(host, name)
            try:
                if (name not in scrap):
                    html = nURL(strona)
                    html = GetDataBeetwenMarkers(html, '>Serie</a>',
                                                 '<div class="info-basic">',
                                                 False)[1]
                    #print html.encode('ascii','ignore')
                    data = re.findall("/upload/cats/(.+?).jpg", html)
                    ItemCount = len(data)
                    if len(data) > 0:
                        for item in data:
                            img = mainSite + '/upload/cats/' + item + '.jpg'
                    else:
                        img = ''
                    plot = ''
                    scraper.scraper_add(host, name, img, plot, '')
                    scrap = scraper.scraper_check(host, name)
            except:
                scrap = ''
            try:
                img = scrap[1]
            except:
                img = ''
            try:
                plot = scrap[2]
            except:
                plot = ''
        else:
            img = ''
            plot = ''
        fanart = fanartAol
        labs = {}
        try:
            labs['plot'] = plot
        except:
            labs['plot'] = ''


###
        pars = {
            'mode': 'Episodeskresk',
            'site': site,
            'section': section,
            'title': name,
            'url': strona,
            'img': img,
            'fanart': fanart
        }
        contextLabs = {
            'title': name,
            'url': strona,
            'img': img,
            'fanart': fanart,
            'todoparams': _addon.build_plugin_url(pars),
            'site': site,
            'section': section,
            'plot': labs['plot']
        }
        if section == 'kreskoweczki':
            contextMenuItems = ContextMenu_Series(contextLabs)
        else:
            contextMenuItems = []
        labs['title'] = name
        _addon.add_directory(pars,
                             labs,
                             is_folder=True,
                             fanart=fanart,
                             img=img,
                             contextmenu_items=contextMenuItems,
                             total_items=ItemCount)
    set_view(content, view_mode=addst('tvshows-view'))
    eod()
def Browse_ItemAol(html, page, metamethod='', content='movies', view='515'):
    if (len(html) == 0):
        return
    html = GetDataBeetwenMarkers(html, 'Tytu', '</table>', False)[1]
    page = page.lower()
    data = re.findall('<a href="https://a-o.ninja/anime/' + page+ '(.+?)">(.+?)</a>', html)
    ItemCount = len(data)
    for item in data:
        strona =  'https://a-o.ninja/anime/' + page + item[0]
        name = item[1].encode("utf-8")
        name = ParseDescription(name)
### scraper
        if (tfalse(addst("aodc-thumbs")) == True):
            import scraper
            scrap = scraper.scraper_check(host, name)
            try:
                if (name not in scrap):
                    if '?page=0'in strona:
                        strona2 = strona.replace('?page=0','')
                    else:
                        strona2 = strona
                    html = nURL(strona2)
                    html = GetDataBeetwenMarkers(html, 'field-name-field-okladka field-type-image field-label-above', '<p>&nbsp;</p>', False)[1]
                    data = re.findall('<img src="(.+?)"', html)
                    ItemCount = len(data)
                    if len(data) > 0:
                        for item in data:
                            img = item
                    else:
                        img = ''
                    data = re.findall('<p><p>(.+?)</p>', html)
                    ItemCount = len(data)
                    if len(data) > 0:
                        for item in data:
                            plot = ParseDescription(item)
                    else:
                        plot = ''
                    scraper.scraper_add(host, name, img, plot, '')
                    scrap = scraper.scraper_check(host, name)
            except:
                scrap = ''
            try:
                img = scrap[1]
            except:
                img = ''
            try:
                plot = scrap[2]
            except:
                plot = ''
        else:
            img = ''
            plot =''
        fanart = fanartAol
        labs = {}
        try:
            labs['plot'] = plot
        except:
            labs['plot'] = ''
###
        pars = {'mode': 'EpisodesAnime', 'site': site, 'section': section, 'title': name, 'url': strona, 'img': img, 'fanart': fanart}
        contextLabs = {'title': name, 'url': strona, 'img': img, 'fanart': fanart, 'todoparams': _addon.build_plugin_url(pars), 'site': site, 'section': section, 'plot': labs['plot']}
        if section == 'animeonline':
            contextMenuItems = ContextMenu_Series(contextLabs)
        else:
            contextMenuItems = []
        labs['title'] = name
        _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount)
    set_view(content, view_mode=addst('tvshows-view'))
Пример #51
0
def Browse_Itemscen(html, name, metamethod='', content='movies', view='515'):
    if (len(html) == 0):
        return
    html = GetDataBeetwenMarkers(html, '<div class="content_episode">',
                                 '<div class="clr"></div>', False)[1]
    html = html.replace('\'', '')
    html = html.replace('\n', '')
    html = html.replace('\r', '')
    html = html.replace('\t', '')
    html.encode("utf-8")
    data = re.compile(
        '<a href="(.+?)" title="(.+?)">(.+?)url\((.+?)\);">').findall(html)
    ItemCount = len(data)
    for item in data:
        img = item[3].replace(' ', '%20')
        strona = item[0]
        name2 = item[1].encode("utf-8")
        plot = ''
        labs = {}
        try:
            labs['plot'] = plot
        except:
            labs['plot'] = ''
##
        pars = {
            'mode': 'Episodes4fun',
            'site': site,
            'section': section,
            'title': name2,
            'url': strona,
            'img': img,
            'fanart': fanart
        }
        contextLabs = {
            'title': name2,
            'url': strona,
            'img': img,
            'fanart': fanart,
            'todoparams': _addon.build_plugin_url(pars),
            'site': site,
            'section': section,
            'plot': labs['plot']
        }
        if section == 'anime4fun':
            contextMenuItems = ContextMenu_Series(contextLabs)
        else:
            contextMenuItems = []
        labs['title'] = name2
        _addon.add_directory(pars,
                             labs,
                             is_folder=True,
                             fanart=fanart,
                             img=img,
                             contextmenu_items=contextMenuItems,
                             total_items=ItemCount)


# next page
    npage = url[:-1] + str(int(url[-1:]) + 1)
    #    if -1 != html.find("do strony "):
    _addon.add_directory(
        {
            'mode': 'Page4fun',
            'site': site,
            'section': section,
            'url': npage,
            'page': npage
        }, {'title': "Next page"},
        is_folder=True,
        fanart=fanart,
        img=nexticon)
    set_view(content, view_mode=addst('links-view'))
    eod()
Пример #52
0
def Browse_ItemShniden(html, metamethod='', content='tvshows', view='515'):
    if (len(html) == 0):
        return
    idx = html.find('<dl class="sub-nav">')
    if idx == -1:
        return
    idx2 = html.find('</body>', idx)
    if idx2 == -1:
        return
    html = html[idx:idx2]
    r = re.compile('<a href="(.+?.html)">(.+?) </a>').findall(html)
    ItemCount = len(r)
    if len(r) > 0:
        for _url, _tytul in r:
            _name2 = html_entity_decode(_tytul)
            strona = _url
            ### scraper
            meta = metaget.get_meta('tvshow', _name2)
            fanart = str(meta['backdrop_url']).replace('u', '')
            img = str(meta['cover_url']).replace('u', '')
            plot = meta['plot']
            labs = {}
            try:
                labs['plot'] = plot
            except:
                labs['plot'] = ''


###
            pars = {
                'mode': 'EpisodesShniden',
                'site': site,
                'section': section,
                'title': _name2,
                'url': strona,
                'img': img,
                'fanart': fanart
            }
            contextLabs = {
                'title': _name2,
                'url': strona,
                'img': img,
                'fanart': fanart,
                'todoparams': _addon.build_plugin_url(pars),
                'site': site,
                'section': section,
                'plot': labs['plot']
            }
            if section == 'movie':
                contextMenuItems = ContextMenu_Movies(contextLabs)
            elif section == 'shnidenodc':
                contextMenuItems = ContextMenu_Series(contextLabs)
            else:
                contextMenuItems = []
            labs['title'] = _name2
            _addon.add_directory(pars,
                                 labs,
                                 is_folder=True,
                                 fanart=fanart,
                                 img=img,
                                 contextmenu_items=contextMenuItems,
                                 total_items=ItemCount)
    set_view(content, view_mode=addst('tvshows-view'))
Пример #53
0
def Browse_ItemSenpai(html, page, metamethod='', content='movies', view='515'):
    if (len(html) == 0):
        return
    html = GetDataBeetwenMarkers(html, '<h4>Lista anime</h4>',
                                 '<footer class="page-footer indigo">',
                                 False)[1]
    data = re.findall(
        'href="\/anime\/' + page +
        '(.+?)">\n(\s+)<img src="/Resources/anime/covers/(.+?)"', html)
    ItemCount = len(data)
    for item in data:
        strona = mainSite + page + item[0]
        name = urllib2.unquote(page + item[0].encode("utf-8"))
        name = ParseDescription(name)
        ### scraper
        if (tfalse(addst("senpai-thumbs")) == True):
            import scraper
            scrap = scraper.scraper_check(host, name)
            try:
                if (name not in scrap):
                    img = 'http://senpai.com.pl/Resources/anime/covers/' + urllib2.quote(
                        item[2])
                    plot = ''
                    scraper.scraper_add(host, name, img, plot, '')
                    scrap = scraper.scraper_check(host, name)
            except:
                scrap = ''
            try:
                img = scrap[1]
            except:
                img = ''
            try:
                plot = scrap[2]
            except:
                plot = ''
        else:
            img = ''
            plot = ''
        fanart = fanartAol
        labs = {}
        try:
            labs['plot'] = plot
        except:
            labs['plot'] = ''
        fanart = fanartAol
        labs = {}
        try:
            labs['plot'] = plot
        except:
            labs['plot'] = ''


###
        pars = {
            'mode': 'EpisodesSenpai',
            'site': site,
            'section': section,
            'title': name,
            'url': strona,
            'img': img,
            'fanart': fanart
        }
        contextLabs = {
            'title': name,
            'url': strona,
            'img': img,
            'fanart': fanart,
            'todoparams': _addon.build_plugin_url(pars),
            'site': site,
            'section': section,
            'plot': labs['plot']
        }
        if section == 'senpai':
            contextMenuItems = ContextMenu_Series(contextLabs)
        else:
            contextMenuItems = []
        labs['title'] = name
        _addon.add_directory(pars,
                             labs,
                             is_folder=True,
                             fanart=fanart,
                             img=img,
                             contextmenu_items=contextMenuItems,
                             total_items=ItemCount)
    set_view(content, view_mode=addst('tvshows-view'))
Пример #54
0
    def __init__(self, logger, ev_quit, queues, logtype='normal'):

        self.logger = logger
        self.ev_quit = ev_quit
        self.queue = queues
        self.logtype = logtype
        self.lock = threading.RLock()
        # Used for tagging commands
        self.cmdcount = 0
        # ugh--ugly race condition hack
        common.set_view(self)

        self.gui_queue = Queue.Queue()
        self.placeholder = '--notdone--'
        self.gui_thread_id = None

        # Options that can be set graphically
        self.audible_errors = True
        self.suppress_confirm_exec = True
        self.embed_dialogs = False
        self.clear_obs_info = True

        # This is the home directory for loading all kinds of files
        self.procdir = None
        # This is the list of directories to search for include
        # (e.g. PRM) files named by other files
        self.include_dirs = []

        # Set default location, until changed
        procdir = os.path.join(os.environ['HOME'], 'Procedure')
        self.set_procdir(procdir, 'SUKA')
        
        # Create the GUI
        self.w = Bunch.Bunch()

        # hack required to use threads with GTK
        gobject.threads_init()
        gtk.gdk.threads_init()

        # Create top-level window
        root = gtk.Window(gtk.WINDOW_TOPLEVEL)
        root.set_size_request(1900, 1050)
        root.set_title('Gen2 Integrated GUI II')
        root.connect("delete_event", self.delete_event)
        root.set_border_width(2)

        # These are sometimes needed
        self.display = gtk.gdk.display_manager_get().get_default_display()
        self.clipboard = gtk.Clipboard(self.display, "CLIPBOARD")

        # create main frame
        self.w.mframe = gtk.VBox(spacing=2)
        root.add(self.w.mframe)
        #self.w.mframe.show()

        self.w.root = root

        self.w.menubar = gtk.MenuBar()
        self.w.mframe.pack_start(self.w.menubar, expand=False)

        # Create a "desktop" the holder for workspaces
        self.ds = Desktop(self.w.mframe, 'desktop', 'IntegGUI Desktop')
        # Some attributes we force on our children
        self.ds.logger = self.logger

        self.add_statusbar()
        
        # Add workspaces
        self.ojws = self.ds.addws('ul', 'obsjrn', "Upper Left Workspace")
        self.oiws = self.ds.addws('ur', 'obsinfo', "Upper Right Workspace")
        self.umws = self.ds.addws('um', 'umws', "Upper Middle Workspace")
        self.lmws  = self.ds.addws('lm', 'lmws', "Lower Middle Workspace")
        self.lws  = self.ds.addws('ll', 'launchers', "Lower Left Workspace")
        self.exws = self.ds.addws('lr', 'executor', "Lower Right Workspace")

        # Populate "Observation Journal" ws
        self.add_frameinfo(self.ojws)
        #self.ojws.addpage('statmon', "StatMon", StatMonPage)

        # Populate "Lower Middle" ws
        self.handsets = self.lmws.addpage('handset', "Handset",
                                         WorkspacePage.WorkspacePage)
        self.queuepage = self.lmws.addpage('queues', "Queues",
                                           WorkspacePage.WorkspacePage)
        self.add_queue(self.queuepage, 'default', create=False)
        self.add_tagpage(self.lmws)
        self.dialogs = self.lmws.addpage('dialogs', "Dialogs",
                                         WorkspacePage.WorkspacePage)
        self.lmws.select('queues')

        # Populate "Observation Info" ws
        self.add_obsinfo(self.oiws)
        self.add_monitor(self.oiws)
        self.logpage = self.oiws.addpage('loginfo', "Logs",
                                         WorkspacePage.WorkspacePage)
        # self.fitspage = self.oiws.addpage('fitsview', "Fits",
        #                                   WorkspacePage.WorkspacePage)
        # self.fitspage.addpage('viewer', 'Fits Viewer',
        #                       FitsViewerPage)
        self.add_history(self.oiws)
        self.oiws.select('obsinfo')

        # Populate "Command Executors" ws
        self.add_terminal(self.exws)
        self.new_source('command', self.exws, title='Commands')
        
        self.add_dialogs()
        self.add_menus(self.w.menubar)

        self.w.root.show_all()
Пример #55
0
def Browse_Items(html, metamethod='', content='tvshows', view='515'):
    if (len(html) == 0):
        return
    r = re.compile("</a><div class='con'><a href='/(.+?)'>(.+?)</a><p>").findall(html)
    ItemCount = len(r)
    if len(r) > 0:
        for _url, _name in r:
            strona = 'http://diff-anime.pl/' + _url
            html2 = nURL(strona)
            _name2 = _name
#  grafika
            image = re.compile("</div><div class='content'><div class='con'><a href='(.+?)' class='fbox'>").findall(html2)
            ItemCount = len(image)
            if len(image) > 0:
                for foto in image:
                    img = "http://diff-anime.pl" + foto
            else:
                    img = ""
#  fanart
            if "Nie dodano kadrów do tej serii." in html2:
                fanart = fanartSite
            else:
                    image2 = re.compile("<h2>Kadry</h2></div><div class='content'><a href='(.+?)' class='fbox'>").findall(html2)
                    ItemCount = len(image)
                    if len(image) > 0:
                        for _fanart in image2:
                            fanart = "http://diff-anime.pl" + _fanart
                    else:
                            fanart = img
#  opis
            opis = re.compile("<h2>Opis anime</h2></div><div class='content'><div class='con'>(.+?)</div>").findall(html2)
            ItemCount = len(opis)
            if len(opis) > 0:
                for desc in opis:
                    plot = unicode(desc,"utf-8")
            else:
                    opis = re.compile("<h2>Opis anime</h2></div><div class='content'><div class='con'>(.+?)<").findall(html2)
                    ItemCount = len(opis)
                    if len(opis) > 0:
                        for desc in opis:
                            plot = unicode(desc,"utf-8")
                    else:
                            opis = re.compile("<div id='pDesc' class='panel'><div class='head'><h2>Opis anime</h2></div><div class='content'><div class='con'>(.+?)<br />").findall(html2)
                            ItemCount = len(opis)
                            if len(opis) > 0:
                                for desc in opis:
                                    plot = unicode(desc,"utf-8")
                            else:
                                    plot = "Nie dodano jeszcze opisu do tej serii."
            labs = {}
            try:
                labs['plot'] = plot
            except:
                labs['plot'] = ''
###
            pars = {'mode': 'EpisodesDiff', 'site': site, 'section': section, 'title': _name2, 'url': strona, 'img': img, 'fanart': fanart}
            contextLabs = {'title': _name2, 'url': strona, 'img': img, 'fanart': fanart, 'todoparams': _addon.build_plugin_url(pars), 'site': site, 'section': section, 'plot': labs['plot']}
            if   section == 'movie':
                contextMenuItems = ContextMenu_Movies(contextLabs)
            elif section == 'diffanime':
                contextMenuItems = ContextMenu_Series(contextLabs)
            else:
                contextMenuItems = []
            labs['title'] = _name2
            _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount)
# szukanie następnej strony
    npage = url[:-2] + str(int(url[-2:]) + 10)
    if -1 != html.find("div class='pagenav") and -1 != html.find("class='img"):
            _addon.add_directory({'mode': 'Page', 'site': site, 'section': section, 'url': npage, 'page': npage}, {'title': "Next page"}, is_folder=True, fanart=fanartSite, img=nexticon)
    set_view(content, view_mode=addst('tvshows-view'))