def find_episodes(params): import xml.dom.minidom baseurl = 'http://www.uitzendinggemist.nl' url = urllib.unquote(params['url']) module = params['module'] page = "" pagecount = 1 while pagecount<10: rssurl = baseurl + url + '.rss?page=' + str(pagecount) request = common.fetchPage({"link": rssurl, "cookie": "site_cookie_consent=yes"}) if not request["status"] == 200: break page = request["content"].encode('utf-8') try: dom = xml.dom.minidom.parseString(page) except: page = page.replace("&","&") dom = xml.dom.minidom.parseString(page) if len(dom.getElementsByTagName('item'))==0: break else: for item in dom.getElementsByTagName('item'): videourl = utils.getText(item.getElementsByTagName('link')[0].childNodes) videourl = urllib.quote_plus(videourl) videourl = sys.argv[0]+"?module="+module+"&action=find_video"+"&url="+videourl try: thumb = item.getElementsByTagName('media:thumbnail')[0].attributes['url'].value except: thumb = "" title = common.replaceHTMLCodes(utils.getText(item.getElementsByTagName('title')[0].childNodes)) utils.addLink(title, videourl, thumb) pagecount = pagecount+1 xbmcplugin.endOfDirectory(int(sys.argv[1]))
def List(): conn = sqlite3.connect(favoritesdb) c = conn.cursor() try: c.execute("SELECT * FROM favorites") for (name, url, mode, img) in c.fetchall(): utils.addLink(name, url, int(mode), img, '', '', 'del') conn.close() xbmcplugin.endOfDirectory(utils.addon_handle) except: conn.close() utils.notify('No Favourites','No Favourites found') return
def List(): conn = sqlite3.connect(favoritesdb) c = conn.cursor() try: c.execute("SELECT * FROM favorites") for (name, url, mode, img) in c.fetchall(): utils.addLink(name, url, int(mode), img, '', '', 'del') conn.close() xbmcplugin.endOfDirectory(utils.addon_handle) except: conn.close() utils.notify('No Favourites', 'No Favourites found') return
def List(url): try: listhtml = utils.getHtml(url, '') except: utils.notify('Oh no', 'It looks like this website is under maintenance') return None match = re.compile( '<ul class="carousel-list">(.*?)<div class="carousel-prev">', re.DOTALL | re.IGNORECASE).findall(listhtml)[0] match1 = re.compile( r'<a class="clip-link" data-id=".*?" title="([^"]+)" href="([^"]+)".*?src="([^"]+)"', re.DOTALL | re.IGNORECASE).findall(match) for name, videopage, img in match1: name = utils.cleantext(name) utils.addLink(name, videopage, 32, img, '') xbmcplugin.endOfDirectory(utils.addon_handle)
def createSuggestionsList(params): module = params['module'] url = 'http://www.uitzendinggemist.nl/kijktips' request = common.fetchPage({"link": url, "cookie": "site_cookie_consent=yes"}) if request["status"] == 200: page = request["content"].encode('utf-8') episodes = common.parseDOM(page, 'div', attrs = { 'class': 'kijktip' }) for episode in episodes: title = common.parseDOM(episode, 'h3') videourl = "http://www.uitzendinggemist.nl%s" % common.parseDOM(title, 'a', ret = 'href')[0] title = common.parseDOM(title, 'a')[0] subtitle = common.parseDOM(episode, 'h2') subtitle = common.parseDOM(subtitle, 'a')[0] plot = common.parseDOM(episode, 'p')[0] title = "%s - %s" % (title, subtitle) videourl = urllib.quote_plus(videourl) videourl = sys.argv[0]+"?module="+module+"&action=find_video"+"&url="+videourl thumb = utils.parseDataImages(common.parseDOM(episode, 'img', attrs = { 'class': 'thumbnail' }, ret = 'data-images')[0]) utils.addLink(title, videourl, thumb, info={'plot': plot}) xbmcplugin.endOfDirectory(int(sys.argv[1]))
def find_episodes(params): baseurl = 'http://www.uitzendinggemist.nl/top50/' period = params['period'] module = params['module'] url = baseurl + period request = common.fetchPage({"link": url, "cookie": "site_cookie_consent=yes"}) if request["status"] == 200: page = request["content"].encode('utf-8') page = common.parseDOM(page, 'tbody') episodes = common.parseDOM(page, 'tr') for i, episode in enumerate(episodes): title = common.parseDOM(episode, 'h2') subtitle = common.parseDOM(episode, 'h3') title = "%i. %s, %s" % (i+1, common.parseDOM(subtitle, 'a', ret='title')[0], common.parseDOM(title, 'a', ret='title')[0]) videourl = "http://www.uitzendinggemist.nl%s" % common.parseDOM(episode, 'a', attrs = { 'class': 'episode active episode-image' }, ret = 'href')[0] videourl = urllib.quote_plus(videourl) videourl = sys.argv[0]+"?module="+module+"&action=find_video"+"&url="+videourl thumb = utils.parseDataImages(common.parseDOM(episode, 'img', attrs = { 'class': 'thumbnail' }, ret = 'data-images')[0]) utils.addLink(title, videourl, thumb) xbmcplugin.endOfDirectory(int(sys.argv[1]))
def find_episodes(params): baseurl = 'http://www.uitzendinggemist.nl/' date = urllib.unquote(params['date']) module = params['module'] days = ["zo","ma","di","wo","do","vr","za"] dow = days[int(params['dow'])] page = "" url = baseurl + "/gids/" + date request = common.fetchPage({"link": url, "cookie": "site_cookie_consent=yes"}) if request["status"] == 200: page = request["content"].encode('utf-8') episodes = re.findall(r"<li.*?class=\"episode active\".*?data-path=\"(.*?)\".*?>", page) episodelist = [] unique=[] for episode in episodes: episodeinfourl = baseurl + episode request = common.fetchPage({"link": episodeinfourl, "cookie": "site_cookie_consent=yes"}) if request["status"] == 200: episodeinfo = request["content"].encode('utf-8') episodeinfo=episodeinfo.replace("\n","") info1 = re.findall(r"<h2.*?href=\"(.*?)\".*?class=\"episode active \".*?title=\"(.*?)\".*?h2>", episodeinfo) info2 = re.findall(r"<tr>.*?<th>datum</th>.*?<td>(.*?)</td>.*?</tr>", episodeinfo) try: date = info2[0] title = common.replaceHTMLCodes(info1[0][1]) date = date.replace(title,"").replace("(","").replace(")","") videourl = info1[0][0] videourl = sys.argv[0]+"?module="+module+"&action=find_video"+"&url=" + baseurl+videourl thumb = "" if not info1[0][0] in unique: if dow in date: episodelist.append([date,title,videourl,thumb]) unique.append(info1[0][0]) except: pass episodelist.sort(reverse=True) for episode in episodelist: utils.addLink("%s, %s" % (episode[0],episode[1]), episode[2], episode[3]) xbmcplugin.endOfDirectory(int(sys.argv[1]))