def INDEX(url,page): doc = read_page(url) items = doc.find('div', 'show_videos') if items: # prvni styl stranky s poradem items = items.find('div', 'items') for item in items.findAll('div', 'item'): item2 = item.find('h3') item3 = item.find('div', 'img') url = item3.h3.a['href'].encode('utf-8') title = item3.h3.a.getText(" ").encode('utf-8') thumb = item3.a.img['src'] if re.search('voyo.nova.cz', str(url), re.U): continue addDir(title,__baseurl__+url,3,thumb,1) else: # druhy styl stranky s poradem items = doc.find('div', id='extra_index') items = items.find('div', 'items') for item in items.findAll('div', 'item'): item2 = item.find('div', 'text') item3 = item.find('div', 'img') url = item3.a['href'].encode('utf-8') title = item2.h2.a.getText(" ").encode('utf-8') thumb = item3.a.img['src'] if re.search('voyo.nova.cz', str(url), re.U): continue addDir(title,__baseurl__+url,3,thumb,1)
def listEpisodes(url): logDbg("listEpisodes()") req = urllib2.Request(url) req.add_header('User-Agent', _UserAgent_) response = urllib2.urlopen(req) httpdata = response.read().decode("windows-1250") response.close() match = re.compile('<div class="list">(.+?)<div id="otherartw" class="pages"', re.S).findall(httpdata) if match: items = re.compile('src="(.*?)" alt=.+?<h2>.*?<a href="(.+?)">(.+?)</a>.+?<div class="time">(.+?)</div>(.*?)</div>', re.S).findall(match[0]) for img,link,name,date,desc in items: link = __baseurl__+link # remove new lines desc=desc.replace("\n", "") if len(desc): match = re.compile('<p>(.+?)</p>', re.S).findall(desc) if match: desc = match[0] # remove optional <span>...</span> tag if "<span" in desc: logDbg("span found") match = re.compile('<span.*</span>(.*)', re.S).findall(desc) if match: desc = match[0] logDbg("new desc: "+desc) addDir('('+date+') '+name,link,4,img,desc) items = re.compile('<div class="otherart r"><h5><a href="(.+?)">(.+?)</a>', re.S).findall(httpdata) if items: link, name = items[0] link = __baseurl__+link h = HTMLParser.HTMLParser() addDir('[B]'+h.unescape(name)+'[/B]',link,3,nexticon,'') else: logErr("List of episodes not found!")
def BONUSY(link): doc = read_page(link) items = doc.find('ul', 'clearfix content') if re.search('Ouha', str(items), re.U): link = url + 'bonusy/' BONUSY(link) for item in items.findAll('li', 'itemBlock clearfix'): name_a = item.find('h3') name_a = name_a.find('a') name = name_a.getText(" ").encode('utf-8') if len(name) < 2: name = 'Titul bez názvu' url = 'http://www.ceskatelevize.cz' + str(item.a['href']) url = re.sub('porady', 'ivysilani', url) thumb = str(item.img['src']) #print name, thumb, url addDir(name, url, 10, thumb) try: pager = doc.find('div', 'pagingContent') act_page_a = pager.find('td', 'center') act_page = act_page_a.getText(" ").encode('utf-8') act_page = act_page.split() next_page_i = pager.find('td', 'right') #print act_page,next_page_i next_url = next_page_i.a['href'] next_label = 'Další strana (Zobrazena videa ' + act_page[0] + '-' + act_page[2] + ' ze ' + act_page[4] + ')' #print next_label,next_url addDir(next_label, 'http://www.ceskatelevize.cz' + next_url, 7, nexticon) except: print 'STRANKOVANI NENALEZENO!'
def buildMainMenu(): util.addDir("JAV","JAV", 2, "") util.addDir("Gravure", "Gravure", 2, "") util.addDir("Latest JAV", "http://sexloading.com/", 1, "") util.addDir("Latest Gravure", "http://ivhunter.com", 1, "") util.addDir("Search","http://sexloading.com/?s=<split>http://ivhunter.com/?s=", 3, "") xbmcplugin.endOfDirectory(int(sysarg))
def INDEX(url, page): doc = read_page(url) style = doc.find('div', id='extra_index') if style: # prvni styl stranky s poradem items = doc.find('div', id='extra_index') items = items.find('div', 'items') for item in items.findAll('div', 'item'): item2 = item.find('div', 'text') item3 = item.find('div', 'img') url = item3.a['href'].encode('utf-8') title = item2.h2.a.getText(" ").encode('utf-8') thumb = item3.a.img['src'] if re.search('voyo.nova.cz', str(url), re.U): continue addDir(title, __baseurl__ + url, 3, thumb, 1) else: # druhy styl stranky s poradem items = doc.find('div', 'items') for item in items.findAll('div', 'item_container'): url = item.a['href'].encode('utf-8') if re.search('voyo.nova.cz', str(url), re.U): continue item3 = item.find('div', 'img') url = item3.a['href'].encode('utf-8') title = item3.h3.a.getText(" ").encode('utf-8') thumb = item3.a.img['src'] addDir(title, __baseurl__ + url, 3, thumb, 1)
def getVids(params) : param={'play':1} content=util.getURL(params['url'], hdr) if content!=False: films=util.extractAll(content, '<div class="item">', '<div class="item">') for film in films: param['title']=makeAscii(util.extract(film, 'title="', '"')) param['url']=util.extract(film, '<a href="', '" title="') param['poster']=util.extract(film, 'src="', '" title="') param['fanart']=param['poster'] if param['url']!=None: u=sys.argv[0]+"?url="+param['url']+"&play="+str(4)+"&name="+urllib.quote_plus(param['title'])+"&poster="+param['poster'] liz=xbmcgui.ListItem(param['title'], iconImage="DefaultVideo.png", thumbnailImage=param['poster']) liz.setInfo( type="Video", infoLabels={ "Title": param['title'],"Plot": ""} ) liz.setProperty("Poster_Image", param['poster']) ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=True) next=util.extract(content, '<span class="current">', '</span>') #xbmc.log(">>>>>"+str(next), xbmc.LOGERROR) if next!=None: next=int(next)+1 #xbmc.log('http://www.likuoo.com/new/'+str(next), xbmc.LOGERROR) if 'http://www.likuoo.com/new/'+str(next) in content: util.addDir("Next >", 'http://www.likuoo.com/new/'+str(next), 2, "","") xbmcplugin.endOfDirectory(int(sysarg))
def INDEX(url): doc = read_page(url) items = doc.find('div', 'videoClipsWrapper') for item in items.findAll('li','listItem'): thumb = item.find('a', 'framedThumbnail') thumb = str(item.img['src']) info = item.find('div', 'carouselListItemText') try: name = info.find('a') name = name.getText(" ").encode('utf-8') except: name = "" try: name2 = info.find('div',' carouselItemText') name2 = name2.getText(" ").encode('utf-8') except: name2 = "Neznámý" link = str(info.a['href']) #print name+' '+name2,__baseurl__+link,2,thumb addDir(name+' '+name2,__baseurl__+link,2,thumb) try: items = doc.find('div', 'pager') for item in items.findAll('a'): page = item.text.encode('utf-8') if re.match('následující', page, re.U): next_url = item['href'].replace('.','') cast_url = urlparse(url) #print 'http://'+cast_url[1]+cast_url[2]+next_url addDir('>> Další strana >>','http://'+cast_url[1]+cast_url[2]+next_url,1,nexticon) except: print 'strankovani nenalezeno'
def CATEGORIES(url, page): # rewrite by MN html = get_url(url) section = re.search( "<div class=\"b-show-listing\".*?ady</h3>(.*?)</section>", html, re.S) if section != None: articles = re.findall("<article(.*?)</article>", section.group(1), re.S) if articles != None: for article in articles: url = re.search("<a href=\"(.*?)\"", article, re.S) or "" title = re.search("<a.*?title=\"(.*?)\"", article, re.S) or "" thumb = re.search("<img.*?data-original=\"(.*?)\"", article, re.S) or "" if url != "" and title != "": if thumb != "": addDir( title.group(1).replace(' ', ' '), url.group(1), 2, thumb.group(1), 1) else: addDir( title.group(1).replace(' ', ' '), url.group(1), 2, None, 1) else: addLink("[COLOR red]Chyba načítání pořadů[/COLOR]", "#", None, "") else: addLink("[COLOR red]Chyba načítání kategorie[/COLOR]", "#", None, "")
def getCategories(params) : param={'categories':1} content=util.getURL(params['url'], hdr) if content!=False: contents=util.extract(content, '<ul class="small-block-grid-4 videos-cats">', '</ul>') films=util.extractAll(contents, '<li>', '</li>') for film in films: title=util.extract(film, '<div class="media-panel-title category-title">', '</div>') param['title']=util.extract(title, '">', '</a>') param['plot']=util.extract(film, '<div class="media-panel-info">', '</div>') param['url']=util.extract(title, '<a href="', '"') param['poster']=util.extract(film, '<img src="', '" />') param['fanart']=param['poster'] if param['url']!=None: u=sys.argv[0]+"?url="+param['url']+"&mode=2&name="+urllib.quote_plus(param['title'])+"&poster="+param['poster'] liz=xbmcgui.ListItem(param['title'], iconImage="DefaultVideo.png", thumbnailImage=param['poster']) liz.setInfo( type="Video", infoLabels={ "Title": param['title'],"Plot": param['plot']} ) liz.setProperty("Fanart_Image", param['fanart']) liz.setProperty("Landscape_Image", param['fanart']) liz.setProperty("Poster_Image", param['poster']) ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=True) next=util.extract(content, '<div class="pagination">', '</div>') if next!=None: next=util.extract(next, '<li class="active">', '</a>') if next!=None: url=util.extract(next, '<a href="', '"') util.addDir("Next >", url, 2, "","") xbmcplugin.endOfDirectory(int(sysarg))
def LIVE_OBSAH(url): url = url+str(time.time()) #seznam kanalu a jejich id pro zive vysilani cas = datetime.datetime.now() cas = cas.hour #Decko a Art se stridaji, proto menim podle casu if(cas < 20 and cas >= 6): programctda = r'ČT:Déčko - ' programctdaid = r'CT5' else: programctda = r'ČT ART - ' programctdaid = r'CT6' program=[r'ČT1 - ', r'ČT2 - ', r'ČT24 - ', r'ČT4 - ', programctda, r' ', r' ', r' ', r' ', r' ', r' ', r' ', r' '] programid=[r'CT1', r'CT2', r'CT24', r'CT4', programctdaid, r'CT26', r'CT27', r'CT28', r'CT29', r'CT30', r'CT31', r'CT32', r'CT33'] i = 0 # Zjisteni hashe hashurl = 'http://www.ceskatelevize.cz/ct24/zive-vysilani/' req = urllib2.Request(hashurl) req.add_header('User-Agent', ' Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3') response = urllib2.urlopen(req) httpdata = response.read() link=response.read() response.close() match = re.compile('hash=(.+?)&').findall(httpdata) hash = match[0] print 'HASH :'+hash i = 0 request = urllib2.Request(url) request.add_header("Referer",__baseurl__) request.add_header("Origin","http://www.ceskatelevize.cz") request.add_header("Accept","*/*") request.add_header("X-Requested-With","XMLHttpRequest") request.add_header("x-addr","127.0.0.1") request.add_header("User-Agent",_UserAgent_) request.add_header("Content-Type","application/x-www-form-urlencoded") con = urllib2.urlopen(request) # Read lisk XML page data = con.read() con.close() doc = BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES) items = doc.find('div', 'clearfix') for item in items.findAll('div', 'channel'): prehrano = item.find('div','progressBar') prehrano = prehrano['style'] prehrano = prehrano[(prehrano.find('width:') + len('width:') + 1):] #name_a = item.find('p') try: name_a = item.find('a') name = program[i]+name_a.getText(" ").encode('utf-8')+'- Přehráno: '+prehrano.encode('utf-8') url = 'http://www.ceskatelevize.cz/ivysilani/embed/iFramePlayerCT24.php?hash='+hash+'&videoID='+programid[i] thumb = str(item.img['src']) except: name = program[i]+'Právě teď běží pořad, který nemůžeme vysílat po internetu.' thumb = 'http://img7.ceskatelevize.cz/ivysilani/gfx/empty/noLive.png' #print name, thumb, url addDir(name,url,14,thumb) i=i+1
def OBSAH(): #addDir('Filmy', __baseurl__ + '/filmy/', 1, icon) #not working - use of silverlight addDir('Seriály', __baseurl__ + '/serialy/', 1, icon) addDir('Pořady', __baseurl__ + '/porady/', 1, icon) addDir('Zprávy', __baseurl__ + '/zpravy/', 1, icon) #addDir('Deti', __baseurl__ + '/deti/', 1, icon) #not working - use of silverlight addDir('Sport', __baseurl__ + '/sport/', 1, icon) addDir('Živé vysielanie', __baseurl__ + '/tv-zive/', 2, icon)
def getGenres(url, hdr): content=util.getURL(url, hdr) if content!=False: allGenres=util.extract(content, '<ul class="scrolling cat">', '</ul>') genres=util.extractAll(allGenres, '<a href', 'a>') for genre in genres: util.addDir(util.extract(genre, '>', '</'), util.extract(genre, '="', '"'), 1, "") xbmcplugin.endOfDirectory(int(sysarg))
def getStudios(url, hdr): content=util.getURL(url, hdr) if content!=False: studios=util.extractAll(content, '<a href="http://ivhunter.com/studios', 'a>') for studio in studios: if util.extract(studio, '">', '</')!="Studios": util.addDir(util.extract(studio, '">', '</'), "http://ivhunter.com/studios/"+util.extract(studio, '/', '/'), 1, "") xbmcplugin.endOfDirectory(int(sysarg))
def CATEGORIES(url,page): #print 'CATEGORIES *********************************' + str(url) doc = read_page(url) for article in doc.findAll('article'): url = article.a['href'].encode('utf-8') title = article.a['title'].encode('utf-8') thumb = article.a.div.img['data-original'].encode('utf-8') addDir(title,url,1,thumb,1)
def EPISODES(url,page): #print 'EPISODES *********************************' + str(url) doc = read_page(url) for article in doc.findAll('article', 'b-article b-article-text b-article-inline'): url = article.a['href'].encode('utf-8') title = article.a['title'].encode('utf-8') thumb = article.a.div.img['data-original'].encode('utf-8') addDir(title,url,3,thumb,1)
def CATEGORIES(url,page): #print 'CATEGORIES *********************************' + str(url) doc = read_page(url) for article in doc.findAll('article'): url = article.a['href'].encode('utf-8') title = article.a['title'].encode('utf-8') thumb = article.a.div.img['data-original'].encode('utf-8') addDir(title,url,2,thumb,1)
def VIDEA(url): doc = read_page(url) items = doc.find('div', id='content') for item in items.findAll('div', 'video-box'): name = item.findAll('div', 'title title-grey') name = name[0].span name = name.getText(" ").encode('utf-8') url = str(item.a['href']) print name, url addDir(name,url,4,icon)
def HOME_POSLEDNI(url): doc = read_page(url) for section in doc.findAll('section', 'b-main-section'): if section.div.h3 and section.div.h3.getText(" ").encode('utf-8') == 'Najnovšie epizódy': for article in section.findAll('article'): url = article.a['href'].encode('utf-8') title = article.a.find('div', {'class': 'e-info'}).getText(" ").encode('utf-8') thumb = article.a.div.img['data-original'].encode('utf-8') addDir(title,url,3,thumb)
def HOME_NEJSLEDOVANEJSI(url): doc = read_page(url) for section in doc.findAll('section', 'b-main-section b-section-articles b-section-articles-primary my-5'): if section.div.h3.getText(" ").encode('utf-8') == 'Najsledovanejšie': for article in section.findAll('article'): url = article.a['href'].encode('utf-8') title = article.a.find('div', {'class': 'e-text-row'}).getText(" ").encode('utf-8') thumb = article.a.div.img['data-original'].encode('utf-8') addDir(title,url,3,thumb)
def ABC(url): req = urllib2.Request(url) req.add_header('User-Agent', _UserAgent_) response = urllib2.urlopen(req) httpdata = response.read() response.close() match = re.compile('<a class=\"pageLoadAjaxAlphabet\"\s*href=\"([^\"]+)\"\s*rel=\"letter=.+?\">\s+<span>([^<]+)</span>\s+</a>').findall(httpdata) for link,name in match: print name,__baseurl__+link addDir(name,'http://www.ceskatelevize.cz'+link,3,icon)
def DAY_LIST(url): req = urllib2.Request(url) req.add_header('User-Agent', _UserAgent_) response = urllib2.urlopen(req) httpdata = response.read() response.close() data = substr(httpdata,'data-type="actual-channels"','.columns.actual-channels') match = re.compile('<img src="(.+?)" alt="(.+?)"').findall(data) for item in match: addDir(item[1],url,9,item[0])
def HOME_DOPORUCUJEME(url): doc = read_page(url) for section in doc.findAll('section', 'b-main-section b-section-articles b-section-articles-primary my-5'): if section.div.h3.getText(" ").encode('utf-8') == 'Odporúčame': for article in section.findAll('article'): url = article.a['href'].encode('utf-8') title = article.a.find('div', {'class': 'e-info'}).getText(" ").encode('utf-8') thumb = article.a.div.img['data-original'].encode('utf-8') addDir(title,url,3,thumb)
def HOME_TOPPORADY(url,page): doc = read_page(url) for section in doc.findAll('section', 'b-main-section my-sm-5'): if section.div.h3.getText(" ").encode('utf-8') == 'TOP pořady': for article in section.findAll('article'): url = article.a['href'].encode('utf-8') title = article.a['title'].encode('utf-8') thumb = article.a.div.img['data-original'].encode('utf-8') addDir(title,url,2,thumb,1)
def buildSubMenu(params): content=util.getURL('http://www.asianteensfor.me/javlist/blank/', hdr) if content!=False: xoxo=util.extract(content, '<ul id="'+params['url']+'">', '</ul>') subOptions=util.extractAll(xoxo, '<li class="cat-item', '</li>') for sub in subOptions: name=util.extract(sub, '/" >', '</a>') url=util.extract(sub, '<a href="', '" >') util.addDir(name, url, 5, "","") xbmcplugin.endOfDirectory(int(sysarg))
def OBSAH(): #self.core.setSorting('NONE') req = urllib2.Request(__baseurl__) req.add_header('User-Agent', _UserAgent_) response = urllib2.urlopen(req) httpdata = response.read() response.close() match = re.compile('<li><a href="(.+?)" title="(.+?)">.+?</a></li>').findall(httpdata) for url,name in match: addDir(name,__baseurl__+url,1,icon)
def INDEX(url): doc = read_page(url) items = doc.findAll('div', 'pos-media') for item in items: try: rating = item.find('div', 'vote-message').getText(" ").encode('utf-8') r = re.search('(?P<rating>.+?)rating', rating) if r: ra = r.group('rating').split('/') rating = float(ra[0]) * 2 item2 = item.findNextSibling() popis = item2.getText(" ").encode('utf-8') s = re.search('Originální název: (?P<origname>.+?)Český název: (?P<jmeno>.+?)Datum vydání: (?P<rok>.+?)Žánry: (?P<zanry>.+?)Hrají: (?P<herci>.+?)Obsah: (?P<obsah>.+?)$', popis) if s: origname = s.group('origname') name = s.group('jmeno') rok = s.group('rok') zanry = s.group('zanry') herci = s.group('herci').replace(' a ', ',').split(',') obsah = s.group('obsah') infoLabels = {'Title': name, 'Genre': zanry, 'Year': int(rok), 'Cast': herci, 'Rating': rating, 'OriginalTitle': origname, 'Plot': obsah} except: infoLabels = {} item = item.find('a') name = item['title'].encode('utf-8') link = item['href'] item = item.find('img') icon = item['src'] addDir(name, __baseurl__ + link, 3, icon, infoLabels) try: pager = doc.find('div', 'pagination-bg') act_page_a = pager.find('span') act_page = act_page_a.getText(" ").encode('utf-8') next_page = int(act_page) + 1 items = pager.findAll('a') for item in items: odkaz = item.getText(" ") odkaz = odkaz.replace(u"\u00A0", "").encode('utf-8') print 'item:', item, 'odkaz:', odkaz if odkaz == '>': next_page_a = item['href'] elif odkaz == '>>': index = item['href'].rfind('/') max_page = item['href'][index + 1:] else: continue if (int(next_page) > int(max_page)): next_page = max_page next_label = 'Přejít na stranu ' + str(next_page) + ' z ' + str(max_page) addDir(next_label, __baseurl__ + next_page_a, 2, nexticon) except: print 'stop'
def SERIALY(url): req = urllib2.Request(url) req.add_header('User-Agent', _UserAgent_) response = urllib2.urlopen(req) data = response.read() response.close() match = re.compile('<div class=\"contentheading\"><h1>Seriály online</h1></div>[\s|\S]+<dl class=\"social-links\">'). findall(data) match = re.compile('<p><a href=\"(.+?)\">(.+?)</a></p>').findall(match[0]) for item in match: addDir(item[1], __baseurl__ + item[0], 6, '', infoLabels={})
def TN(url, page): doc = read_page(url) items = doc.find('div', 'items') for item in items.findAll('div', 'item'): item2 = item.find('h3') item3 = item.find('div', 'img') url = item2.a['href'].encode('utf-8') title = item2.a.getText(" ").encode('utf-8') thumb = item3.a.span.img['src'] print title, url, thumb addDir(title, __baseurl__ + url, 3, thumb, 1)
def getStars(params): content=util.getURL(params['url'], hdr) if content!=False: alphabet=util.extractAll(content, "<ul class='small-block-grid-5 stars'>", '</ul>') for letter in alphabet: stars=util.extractAll(letter, '<li>', '</li>') for star in stars: name=util.extract(star, '">', '</a>') url=util.extract(star, 'href="', '"') util.addDir(name, url, 2, "","") xbmcplugin.endOfDirectory(int(sysarg))
def getIdols(url, hdr): content=util.getURL(url, hdr) if content!=False: alphabet=util.extractAll(content, '<ul class="links">', '</ul>') for letter in alphabet: idols=util.extractAll(letter, '<li>', '</li>') for idol in idols: if util.makeAscii(util.extract(idol, 'title="', '"'))!="": util.addDir(util.makeAscii(util.extract(idol, 'title="', '"')), util.extract(idol, 'href="', '"').encode('utf-8'), 1, "") xbmcplugin.endOfDirectory(int(sysarg))
def TN(url,page): doc = read_page(url) items = doc.find('div', 'items') for item in items.findAll('div','item'): item2 = item.find('h3') item3 = item.find('div','img') url = item2.a['href'].encode('utf-8') title = item2.a.getText(" ").encode('utf-8') thumb = item3.a.span.img['src'] print title,url,thumb addDir(title,__baseurl__+url,3,thumb,1)
def NEWEST(url): req = urllib2.Request(url) req.add_header('User-Agent', _UserAgent_) response = urllib2.urlopen(req) data = response.read() response.close() data = substr(data,'<ul id="newestBox"','</div>') pattern = '<a href="(.+?)">[\s]*?<img src="(.+?)".*?>[\s]*?(.+?)</a' match = re.compile(pattern).findall(data) for item in match: addDir(item[2].strip().replace('<br />',' '),'http://www.ceskatelevize.cz'+item[0],10,item[1])
def CATEGORIES(url,page): doc = read_page(url) items = doc.find('ul', 'show-list') for item in items.findAll('li'): if re.search('ad-placeholder', str(item), re.U): continue url = item.a['href'].encode('utf-8') title = item.a.span.getText(" ").encode('utf-8') match = re.compile('porad/(.+?)').findall(url) thumb = 'http://static.cz.prg.cmestatic.com/static/cz/microsites/avod/img/porady/'+match[0]+'.jpg' addDir(title,__baseurl__ + url,2,thumb,1)
def HOME_ORIGINALS(url,page): doc = read_page(url) for section in doc.findAll('section', 'b-main-section b-section-articles my-5'): if section.div.h3.getText(" ").encode('utf-8') == 'Nova Plus Originals': for article in section.findAll('article'): url = article.a['href'].encode('utf-8') title1 = article.h3.getText(" ").encode('utf-8') title2 = article.find('span', 'e-text').getText(" ").encode('utf-8') title = str(title1) + ' - ' + str(title2) thumb = article.a.div.img['data-original'].encode('utf-8') addDir(title,url,3,thumb,1)
def HOME_DOPORUCUJEME(url,page): doc = read_page(url) for section in doc.findAll('section', 'b-main-section b-section-articles b-section-articles-primary my-5'): if section.div.h3.getText(" ").encode('utf-8') == 'Doporučujeme': for article in section.findAll('article'): url = article.a['href'].encode('utf-8') title1 = article.h3.getText(" ").encode('utf-8') title2 = article.find('span', 'e-text').getText(" ").encode('utf-8') title = str(title1) + ' - ' + str(title2) thumb = article.a.div.img['data-original'].encode('utf-8') addDir(title,url,3,thumb,1)
def NEJNOVEJSI(url,page,kanal): newurl = str(url)+''+str(page)+'/'+str(kanal) data = getURL(newurl) pattern = '<div class="field-image-.+?"><a href="(.+?)"><span class="container-image.+?"><img src="(.+?)" alt=.+?class="field-title"><a href=".+?" title="(.+?)">.+?</a></div><div class="field-stream-length">' match = re.compile(pattern).findall(data) for linkurl, obrazek, nazev in match: print 'linkurl :' print linkurl addDir(nazev,__baseurl__+linkurl,10,obrazek,0,nazev) nextpage = page+1 addDir('>> Další strana',url,6,nexticon,nextpage,kanal)
def HOME_POSLEDNI(url,page): doc = read_page(url) for section in doc.findAll('section', 'b-main-section b-section-articles my-5'): if section.div.h3.getText(" ").encode('utf-8') == 'Poslední díly': for article in section.findAll('article'): url = article.a['href'].encode('utf-8') title1 = article.h3.getText(" ").encode('utf-8') title2 = article.find('span', 'e-text').getText(" ").encode('utf-8') title = str(title1) + ' - ' + str(title2) thumb = article.a.div.img['data-original'].encode('utf-8') addDir(title,url,3,thumb,1)
def getStars(params): content = util.getURL(params['url'], hdr) if content != False: alphabet = util.extractAll(content, "<ul class='small-block-grid-5 stars'>", '</ul>') for letter in alphabet: stars = util.extractAll(letter, '<li>', '</li>') for star in stars: name = util.extract(star, '">', '</a>') url = util.extract(star, 'href="', '"') util.addDir(name, url, 2, "", "") xbmcplugin.endOfDirectory(int(sysarg))
def VOYO_OBSAH_LIVE(): addDir('VOYO Cinema', __baseurl__ + '/product/tv-zive/28995-simulcast-voyo-cinema', 3, None) addDir('Nova', __baseurl__ + '/product/tv-zive/28992-simulcast-nova', 3, None) addDir('Nova Sport', __baseurl__ + '/product/tv-zive/28993-simulcast-nova-sport', 3, None) addDir('Fanda', __baseurl__ + '/product/tv-zive/33097-simulcast-fanda', 3, None) addDir('Telka', __baseurl__ + '/product/tv-zive/33917-simulcast-telka', 3, None)
def DATE_LIST(url): pole_url = url.split("/") date = pole_url[len(pole_url) - 1] if date: date = datetime.date(*time.strptime(date, DATE_FORMAT)[:3]) else: date = datetime.date.today() # Add link to previous month virtual folder pdate = date - datetime.timedelta(days=30) addDir('Předchozí měsíc (%s)' % date2label(pdate).encode('utf-8'), __baseurl__ + '/' + pdate.strftime(DATE_FORMAT), 5, icon) for i in range(0, 30): pdate = date - datetime.timedelta(i) addDir(date2label(pdate).encode('utf-8'), __baseurl__ + '/' + pdate.strftime(DATE_FORMAT), 8, icon)
def LIST_SHOWS(url): data = getJsonDataFromUrl(url) for item in data[u'_embedded'][u'stream:show']: link = __baseurl__ + item[u'_links'][u'self'][u'href'] image = makeImageUrl(item[u'image']) name = item[u'name'] addDir(name, link, 2, image) try: link = __baseurl__ + data[u'_links'][u'next'][u'href'] addDir(u'[B][COLOR blue]Další pořady >>[/COLOR][/B]', link, 1, nexticon) except: logDbg('Další pořady nenalezeny')
def LIST_EPISODES(url): data = getJsonDataFromUrl(url) for item in data[u'_embedded'][u'stream:episode']: link = __baseurl__ + item[u'_links'][u'self'][u'href'] image = makeImageUrl(item[u'image']) name = item[u'_embedded'][u'stream:show'][u'name'] + ' | ' + item[ u'name'] addDir(name, link, 10, image) try: link = __baseurl__ + data[u'_links'][u'next'][u'href'] addDir(u'[B]Další epizody pořadu >>[/B]', link, 3, nexticon) except: logDbg('Další epizody nenalezeny')
def PORADY(url): doc = read_page(url) items = doc.find('div', 'show-list') for item in items.findAll('div', 'show'): name = item.a name = name.getText(" ").encode('utf-8') url = item.findAll('li', 'archive') pocet = url[0].a pocet = pocet.getText(" ").encode('utf-8') pocet = re.compile('([0-9]+)').findall(pocet) url = str(url[0].a['href']) thumb = str(item.img['src']) print name, thumb, url, pocet[0] addDir(name+' ('+pocet[0]+' dílů)',url,3,thumb)
def DAY_PROGRAM_LIST( url, chnum ): nazvy=['ČT1', 'ČT2', 'ČT24', 'ČT sport', 'ČT:D', 'ČT art'] nlink=['ct1', 'ct2', 'ct24', 'sport', 'dart', 'dart' ] index = nazvy.index(chnum) req = urllib2.Request(url) req.add_header('User-Agent', _UserAgent_) response = urllib2.urlopen(req) data = response.read() response.close() data = substrAll(data,'programme-list channel-'+nlink[index]+'"','</ul') pattern = '<a href="(.+?)" class="program-item" title=[^>]+>[\s]*?<span class="time">(.+?)</span>[\s]*?<span class="title">(.+?)</span>' match = re.compile(pattern).findall(data) for item in match: addDir(item[1]+' '+item[2],'http://www.ceskatelevize.cz'+item[0],10,icon)
def CATEGORIES(url, page): doc = read_page(url) items = doc.find('ul', 'show-list') for item in items.findAll('li'): if re.search('ad-placeholder', str(item), re.U): continue url = item.a['href'].encode('utf-8') title = item.a.span.getText(" ").encode('utf-8') id_porad = item['data-show-id'] # pruhledna loga thumb = 'http://static.cz.prg.cmestatic.com/static/cz/microsites/avod/img/porady/logo/' + str( id_porad) + '.png' # loga s pozadim #thumb = 'http://static.cz.prg.cmestatic.com' + item.img['src'] addDir(title, __baseurl__ + url, 2, thumb, 1)
def getCategories(params): param = {'categories': 1} content = util.getURL(params['url'], hdr) if content != False: contents = util.extract(content, '<ul class="small-block-grid-4 videos-cats">', '</ul>') films = util.extractAll(contents, '<li>', '</li>') for film in films: title = util.extract( film, '<div class="media-panel-title category-title">', '</div>') param['title'] = util.extract(title, '">', '</a>') param['plot'] = util.extract(film, '<div class="media-panel-info">', '</div>') param['url'] = util.extract(title, '<a href="', '"') param['poster'] = util.extract(film, '<img src="', '" />') param['fanart'] = param['poster'] if param['url'] != None: u = sys.argv[0] + "?url=" + param[ 'url'] + "&mode=2&name=" + urllib.quote_plus( param['title']) + "&poster=" + param['poster'] liz = xbmcgui.ListItem(param['title'], iconImage="DefaultVideo.png", thumbnailImage=param['poster']) liz.setInfo(type="Video", infoLabels={ "Title": param['title'], "Plot": param['plot'] }) liz.setProperty("Fanart_Image", param['fanart']) liz.setProperty("Landscape_Image", param['fanart']) liz.setProperty("Poster_Image", param['poster']) ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz, isFolder=True) next = util.extract(content, '<div class="pagination">', '</div>') if next != None: next = util.extract(next, '<li class="active">', '</a>') if next != None: url = util.extract(next, '<a href="', '"') util.addDir("Next >", url, 2, "", "") xbmcplugin.endOfDirectory(int(sysarg))
def FULL_EPISODES(url,page): #print 'FULL EPISODES *********************************' + str(url) doc = read_page(url) section = doc.find('section', 'b-main-section') if section.div.h3.getText(" ").encode('utf-8').startswith('Celé'): for article in section.findAll('article'): url = article.a['href'].encode('utf-8') title = article.a['title'].encode('utf-8') thumb = article.a.div.img['data-original'].encode('utf-8') addDir(title,url,3,thumb,1) else: for article in doc.findAll('article', 'b-article b-article-text b-article-inline'): url = article.a['href'].encode('utf-8') title = article.a['title'].encode('utf-8') thumb = article.a.div.img['data-original'].encode('utf-8') addDir(title,url,3,thumb,1)
def VOYO_OBSAH(url, name='', page=None): i = 0 iter = False data = voyo_read(url) data = substr(data, (CAT_START, LISTING_START, VIDEO_START), LISTING_END) for regex in [ CATEGORIES_ITER_RE, LISTING_ITER_RE, LISTING_ITER_RE2, LISTING_ITER_RE3 ]: for match in re.finditer(regex, data, re.DOTALL): iter = True i += 1 item = match.groupdict() if 'date' in item and item['date']: title = "%s (%s)" % (item['title'], item['date']) else: title = item['title'] if 'img' in item: img = item['img'] else: img = None addDir(title, __baseurl__ + item['url'], 1, img) if iter: break if i >= MAX_PAGE_ENTRIES: if page is None: page = 1 page += 1 idx = url.find('?page=') if idx != -1: nexturl = url[:idx] + '?page=' + str(page) else: nexturl = url + '?page=' + str(page) addDir('Daľšia strana >>', nexturl, 1, nexticon, page=page) if not iter: if username != "": VIDEOLINK(url, name) else: VIDEOLINK_TEST(url, name)
def VOYO_OBSAH(url, name='', page=None): i = 0 iter1 = False iter2 = False data = markiza_read(url) data = substr(data, LISTING_START, LISTING_END) for item in re.finditer(CATEGORIES_ITER_RE, data, re.DOTALL): iter1 = True i += 1 addDir(item.group('title'), __baseurl__ + item.group('url'), 1, item.group('img')) if not iter1: for item in re.finditer(LISTING_ITER_RE, data, re.DOTALL): iter2 = True i += 1 addDir(item.group('title'), __baseurl__ + item.group('url'), 1, item.group('img')) if i == MAX_PAGE_ENTRIES: if page is None: page = 1 page += 1 idx = url.find('?page=') if idx != -1: nexturl = url[:idx] + '?page=' + str(page) else: nexturl = url + '?page=' + str(page) addDir('[B]Daľšia strana[/B]', nexturl, 1, nexticon, page=page) if not iter1 and not iter2: VIDEOLINK(url, name)
def getVids(params): param = {'play': 1} content = util.getURL(params['url'], hdr) if content != False: films = util.extractAll(content, '<div class="item">', '<div class="item">') for film in films: param['title'] = makeAscii(util.extract(film, 'title="', '"')) param['url'] = util.extract(film, '<a href="', '" title="') param['poster'] = util.extract(film, 'src="', '" title="') param['fanart'] = param['poster'] if param['url'] != None: u = sys.argv[0] + "?url=" + param['url'] + "&play=" + str( 4) + "&name=" + urllib.quote_plus( param['title']) + "&poster=" + param['poster'] liz = xbmcgui.ListItem(param['title'], iconImage="DefaultVideo.png", thumbnailImage=param['poster']) liz.setInfo(type="Video", infoLabels={ "Title": param['title'], "Plot": "" }) liz.setProperty("Poster_Image", param['poster']) ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz, isFolder=True) next = util.extract(content, '<span class="current">', '</span>') #xbmc.log(">>>>>"+str(next), xbmc.LOGERROR) if next != None: next = int(next) + 1 #xbmc.log('http://www.likuoo.com/new/'+str(next), xbmc.LOGERROR) if 'http://www.likuoo.com/new/' + str(next) in content: util.addDir("Next >", 'http://www.likuoo.com/new/' + str(next), 2, "", "") xbmcplugin.endOfDirectory(int(sysarg))
def EPISODES(url, name): # rewrite by MN html = get_url(url) # zalozky section = re.search( "<nav class=\"navigation js-show-detail-nav\">(.*?)</nav>", html, re.S) if section != None: lis = re.findall("<li(.*?)</li>", section.group(1), re.S) if lis != None: for li in lis: url2 = re.search("<a href=\"(.*?)\"", li, re.S) or "" title = re.search("<a.*?title=\"(.*?)\"", li, re.S) or "" if url2 != "" and title != "": if url == url2.group(1): addDir( '[I][COLOR yellow]' + title.group(1).replace(' ', ' ') + '[/COLOR][/I]', url2.group(1), 2, None, 1) else: addDir( '[COLOR yellow]' + title.group(1).replace(' ', ' ') + '[/COLOR]', url2.group(1), 2, None, 1) # dalsi dily poradu articles = re.findall( "<article class=\"b-article-news m-layout-playlist\">(.*?)</article>", html, re.S) if articles != None: for article in articles: url = re.search("<a href=\"(.*?)\"", article, re.S) or "" if url != "" and url.group(1).find('voyo') == -1: title = re.search("<a.*?title=\"(.*?)\"", article, re.S) or "" thumb = re.search("<img.*?data-original=\"(.*?)\"", article, re.S) or "" if thumb != "": addDir( title.group(1).replace(' ', ' '), url.group(1), 3, thumb.group(1), 1) else: addDir( title.group(1).replace(' ', ' '), url.group(1), 3, None, 1)
def EPISODES(url): # print 'EPISOD9ES *********************************' + str(url) try: doc = read_page(url) except urllib2.HTTPError: addLink("[COLOR red]Stranka nenalezena: "+url+"[/COLOR]","#",None,"") # xbmcgui.Dialog().ok('Chyba', 'CHYBA 404: STRÁNKA NEBOLA NÁJDENÁ', '', '') return False for article in doc.findAll('article', 'b-article b-article-text b-article-inline'): url = article.a['href'].encode('utf-8') title = article.a.find('div', {'class': 'e-info'}).getText(" ").encode('utf-8').strip() thumb = article.a.div.img['data-original'].encode('utf-8') addDir(title,url,3,thumb) for section in doc.findAll('section', 'b-main-section'): if section.div.h3.getText(" ").encode('utf-8') == 'Celé epizódy': for article in section.findAll('article'): url = article.a['href'].encode('utf-8') if (article.a.find('div', {'class': 'e-date'})): title = 'Celé epizódy - ' + article.a.find('div', {'class': 'e-info'}).getText(" ").encode('utf-8') else: title = 'Celé epizódy - ' + article.a['title'].encode('utf-8') thumb = article.a.div.img['data-original'].encode('utf-8') addDir(title,url,3,thumb) if section.div.h3.getText(" ").encode('utf-8') == 'Mohlo by sa vám páčiť': for article in section.findAll('article'): url = article.a['href'].encode('utf-8') title = 'Mohlo by sa vám páčiť - ' + article.a.find('div', {'class': 'e-info'}).getText(" ").encode('utf-8') thumb = article.a.div.img['data-original'].encode('utf-8') addDir(title,url,3,thumb) if section.div.h3.getText(" ").encode('utf-8') == 'Zo zákulisia': for article in section.findAll('article'): url = article.a['href'].encode('utf-8') title = 'Zo zákulisia - ' + article.a['title'].encode('utf-8') thumb = article.a.div.img['data-original'].encode('utf-8') addDir(title,url,3,thumb)
def HOMEPAGE(url, page): # new by MN html = get_url(url) # carousel sections = re.search( "<section class=\"b-main-section\">.*?<div class=\"b-carousel\">.*?<a href=\"(.*?)\" title=\"(.*?)\">.*?<img.*?data-original=\"(.*?)\"", html, re.S) if sections != None: addDir( sections.group(2).replace(' ', ' '), sections.group(1), 3, sections.group(3), 1) # articles sections = re.findall( "<h3 class=\"e-articles-title\">(.*?)</h3>(.*?)</section>", html, re.S) if sections != None: for section in sections: category = re.sub(r'<[^>]*?>', '', section[0]).replace( ' ', ' ').replace(' ', '').replace('\n', '') articles = re.findall("<article(.*?)</article>", section[1], re.S) if category == "TOP POŘADY": Hmode = 2 else: Hmode = 3 if articles != None: for article in articles: url = re.search("<a href=\"(.*?)\"", article, re.S) or "" title = re.search("<a.*?title=\"(.*?)\"", article, re.S) or "" thumb = re.search("<img.*?data-original=\"(.*?)\"", article, re.S) or "" if url != "" and title != "": if thumb != "": addDir( category + " - " + title.group(1).replace(' ', ' '), url.group(1), Hmode, thumb.group(1), 1) else: addDir( category + " - " + title.group(1).replace(' ', ' '), url.group(1), Hmode, None, 1) else: addLink("[COLOR red]Chyba načítání pořadů[/COLOR]", "#", None, "") else: addLink("[COLOR red]Chyba načítání kategorie[/COLOR]", "#", None, "")
def LIST_SEASON(url): data = getJsonDataFromUrl(url) seasons = data[u'_embedded'][u'stream:season'] if type(seasons) is dict: for item in seasons[u'_embedded'][u'stream:episode']: link = __baseurl__ + item[u'_links'][u'self'][u'href'] image = makeImageUrl(item[u'image']) name = item[u'name'] addDir(name, link, 10, image) elif type(seasons) is list: for season in seasons: try: for episode in season[u'_embedded'][u'stream:episode']: link = __baseurl__ + episode[u'_links'][u'self'][u'href'] image = makeImageUrl(episode[u'image']) name = season[u'name'] + ' | ' + episode[u'name'] addDir(name, link, 10, image) except: continue try: link = __baseurl__ + data[u'_links'][u'next'][u'href'] addDir(u'[B]Další epizody pořadu >>[/B]', link, 2, nexticon) except: logDbg('Další epizody nenalezeny')
def OBSAH(): addDir('Seriály a pořady','http://novaplus.nova.cz/porady/',5,icon,1) addDir('Televizní noviny','http://novaplus.nova.cz/porad/televizni-noviny',1,icon,1) addDir('TOP pořady','http://novaplus.nova.cz',9,icon,1) addDir('Poslední epizody','http://novaplus.nova.cz',8,icon,1) addDir('Nejsledovanější','http://novaplus.nova.cz',6,icon,1) addDir('Nova Plus Originals','http://novaplus.nova.cz',10,icon,1) addDir('Doporučujeme','http://novaplus.nova.cz',7,icon,1)
def buildMainMenu(): util.addDir("Latest", "http://www.likuoo.com/", 2, "", "") util.addDir("Categories", "http://www.likuoo.com/", 3, "", "") util.addDir("Search", "Search", 4, "", "") xbmcplugin.endOfDirectory(int(sysarg))
def buildMainMenu(): util.addDir("Newest", "http://woodrocket.com/newest-p**n", 2, "", "") util.addDir("Whats Hot", "http://woodrocket.com/whats-hot", 2, "", "") util.addDir("Web Series", "http://woodrocket.com/series", 5, "", "") util.addDir("Exclusives", "http://woodrocket.com/exclusive", 2, "", "") util.addDir("Categories", "http://woodrocket.com/categories", 6, "", "") util.addDir("Stars", "http://woodrocket.com/stars", 3, "", "") util.addDir("Search", "Search", 4, "", "") xbmcplugin.endOfDirectory(int(sysarg))
def OBSAH(): addDir('Nejsledovanější', 'http://novaplus.nova.cz', 6, icon, 1) addDir('Seriály a pořady', 'http://novaplus.nova.cz/porady/', 5, icon, 1) addDir('Televizní noviny', 'http://novaplus.nova.cz/porad/televizni-noviny', 1, icon, 1)