def UpdateVideoItem(self, item): """ Accepts an item. It returns an updated item. Usually retrieves the MediaURL and the Thumb! It should return a completed item. """ logFile.info('starting UpdateVideoItem for %s (%s)', item.name, self.channelName) # get the thumb item.thumb = self.CacheThumb(item.thumbUrl) # get additional info data = uriHandler.Open(item.url, pb=False) guid = common.DoRegexFindAll("fo.addVariable\('id'\W*,\W*'([^']+)'\)", data) url = '' if len(guid) > 0: url = 'http://gateway.freecaster.com/VP/%s' % (guid[0], ) if url == '': logFile.error("Cannot find GUID in url: %s", item.url) return item data = uriHandler.Open(url, pb=False) urls = common.DoRegexFindAll('<stream[^>]+>([^>]+)</stream_[^>]+>', data) if len(urls) > 0: item.mediaurl = urls[0] item.complete = True logFile.debug("UpdateVideoItem complete: mediaUrl = %s", item.mediaurl) return item
def ProcessPageNavigation(self, data): """ NOT USER EDITABLE Generates a list of pageNavigation items. Could also be used in the future to add a pageNav control or something (but not for plugin) """ logFile.debug("Starting ProcessPageNavigation") pageItems = [] #indication = common.DoRegexFindAll(self.pageNavigationIndicationRegex, data) #pagesPresent = len(indication) > 0 #if not pagesPresent: # logFile.debug("No pagenavigation found") # return pageItems #logFile.debug("Pagenavigation found") # try the regex on the current data pages = common.DoRegexFindAll(self.pageNavigationRegex, data) if len(pages) == 0: logFile.debug("No pages found.") return pageItems # logFile.debug(pages) # # # create a possible Url # pageNavUrl = '' # for part in indication[-1]: # pageNavUrl = '%s%s' % (pageNavUrl, part) # # if not pageNavUrl.startswith("http:"): # if pageNavUrl.startswith("/") and self.baseUrl.endswith("/"): # pageNavUrl = pageNavUrl.lstrip("/") # if not pageNavUrl.startswith("/") and not self.baseUrl.endswith("/"): # pageNavUrl = "/%s" % (pageNavUrl) # pageNavUrl = "%s%s" % (self.baseUrl, pageNavUrl) # # # Add the current item as page number 1 # firstPage = common.clistItem("1", pageNavUrl) # firstPage.type = "page" # pageItems.append(firstPage) # # data = uriHandler.Open(pageNavUrl) pages = common.DoRegexFindAll(self.pageNavigationRegex, data) for page in pages: item = self.CreatePageItem(page) pageItems.append(item) # filter double items for item in pageItems: if pageItems.count(item) > 1: logFile.debug("Removing duplicate for '%s'", item.name) pageItems.remove(item) #logFile.debug(pageItems) return pageItems
def UpdateVideoItem(self, item): """ Accepts an item. It returns an updated item. """ #logFile.debug('starting UpdateVideoItem for %s (%s)',item.name, self.channelName) item.thumb = self.CacheThumb(item.thumbUrl) # open the url to read the media url data = uriHandler.Open(item.url, pb=False) # create the algorithm helper algHelper = algorithms.Algorithms() results = common.DoRegexFindAll(self.mediaUrlRegex, data) megavideoResults = common.DoRegexFindAll( '<param name="movie" value="([^"]+)"></param>', data) veohResults = common.DoRegexFindAll( 'src="http://www.veoh.com/[^?]+\?permalinkId=([^&]+)', data) googleResults = common.DoRegexFindAll( '(http://video.google.com/googleplayer.swf\?docId=[^"]+)"', data) # First give it a try using the default regex if len(results) > 0: item.mediaurl = results[-1] logFile.debug("MediaUrl found: %s", item.mediaurl) item.complete = True # If there were no results, try megavideo.com elif len(megavideoResults) > 0: url = megavideoResults[-1] url = algHelper.DecodeItemUrl(url) data = uriHandler.Open(url, pb=True) item.mediaurl = algHelper.ExtractMediaUrl(url, data) # then try veoh elif len(veohResults) > 0: url = "http://www.veoh.com/videos/%s?cmpTag" % veohResults[-1] url = algHelper.DecodeItemUrl(url) data = uriHandler.Open(url, pb=True) item.mediaurl = algHelper.ExtractMediaUrl(url, data) # then google elif len(googleResults) > 0: url = googleResults[-1] url = algHelper.DecodeItemUrl(url) data = uriHandler.Open(url, pb=True) item.mediaurl = algHelper.ExtractMediaUrl(url, data) # If all else fails, return an error else: item.mediaurl = "" logFile.error("MediaUrl not found in url: %s", item.url) item.complete = False logFile.debug("%s was updated with mediaurl: %s", item.name, item.mediaurl) return item
def ParseMainList(self): # call the main list items = [] if len(self.mainListItems) > 1: return self.mainListItems items = chn_class.Channel.ParseMainList(self) # get more items: url1 = "http://www.rtl.nl/service/gemist/home/" data1 = uriHandler.Open(url1, pb=True) url2 = common.DoRegexFindAll( '<script[^>]+src="([^"]+)"[^>]*></script><div id="navigatie_container">', data1) javaUrl = "" for url in url2: javaUrl = url pass if javaUrl != "": data = uriHandler.Open(javaUrl, pb=True) moreItems = common.DoRegexFindAll( '\["([^"]+)","([^"]+)","[^"]+","[^"]+"\]', data) previousNumber = len(items) number = 0 for item in moreItems: moreItem = common.clistItem( item[0], self.RtlFolderUri("/%s" % item[1], "videomenu.xml")) moreItem.icon = self.folderIcon moreItem.thumb = self.noImage if items.count(moreItem) == 0: number = number + 1 items.append(moreItem) logFile.debug("Added %s more RTL Items to the already existing %s", number, previousNumber) rockNationItem = common.clistItem( 'Rock Nation', 'http://www.rtl.nl/system/video/menu/reality/rocknation/videomenu.xml', 'folder') rockNationItem.icon = self.folderIcon rockNationItem.thumb = self.noImage if items.count(rockNationItem) == 0: items.append(rockNationItem) # sort by name if self.episodeSort: items.sort(lambda x, y: cmp(x.name.lower(), y.name.lower())) return items
def ProcessNormalPage(self, data): logFile.info('starting ProcessNormalPage') items = [] title = common.DoRegexFindAll('<b class="btitle">([^<]+)</b>', data) title = title[-1] results = common.DoRegexFindAll( '<td height="40">(\d+-\d+-\d+)*</td>\s+[^/]+/index.php/aflevering(\?aflID=\d+)&md5=[0-9a-f]+', data) for result in results: item = self.CreateEpisodeItem((result[0], result[1], title)) items.append(item) return items
def ProcessFolderList(self, url): """ NOT USER EDITABLE Accepts an URL and returns a list of items with at least name & url set Each item can be filled using the ParseFolderItem and ParseVideoItem Methodes """ if not self.pluginMode: guiController = guicontroller.GuiController(self) guiController.ClearEpisodeLists() guiController.ShowData(self.folderHistory[0]) preItems = [] folderItems = [] videoItems = [] pageItems = [] if (url == "searchSite"): logFile.debug("Starting to search") return self.SearchSite() data = uriHandler.Open(url) # first of all do the Pre handler (data, preItems) = self.PreProcessFolderList(data) # then process folder items. if not self.folderItemRegex == '': folders = common.DoRegexFindAll(self.folderItemRegex, data) for folder in folders: folderItems.append(self.CreateFolderItem(folder)) # sort by name folderItems.sort(lambda x, y: cmp(x.name.lower(), y.name.lower())) # now process video items if not self.videoItemRegex == '': videos = common.DoRegexFindAll(self.videoItemRegex, data) for video in videos: videoItems.append(self.CreateVideoItem(video)) # sort #videoItems.sort() # now process page navigation if a pageNavigationIndication is present if not self.pageNavigationRegex == '': pageItems = self.ProcessPageNavigation(data) return preItems + folderItems + videoItems + pageItems
def PreProcessFolderList(self, data): """ Accepts an data from the ProcessFolderList Methode, BEFORE the items are processed. Allows setting of parameters (like title etc). No return value! """ _items = [] #load all additional pages and add the html to the data variable #self.programTitle = common.DoRegexFindAll('- ([^<]+)</title>', data)[-1] self.programTitle = common.DoRegexFindAll( 'title="[^"]+">([^<]+)</a>\W+</td>', data)[-1] #title="Admirals TV">Admirals TV</a> </td> _totalPages = 1 # determine second page _pageExtUrl = common.DoRegexFindAll( '<a href="([^"]+p=\d+&pm=\d+)">\d+</a>', data) # check if more if _pageExtUrl != []: # load second page as "pageExt" _nPagesExt = len(_pageExtUrl) logFile.info('Extra pages %s', _nPagesExt) _totalPages += int(_nPagesExt) # determine percentage per page: _percentagePerPage = int(math.floor(100 / int(_totalPages))) _percentage = 0 # now add all data to the data variable if _pageExtUrl != []: # do first page (already loaded) _parsePB = xbmcgui.DialogProgress() _parsePB.create('Opening Additional Pages', 'Opening additional pages', self.initialUri) _page = 1 for _pageUrl in _pageExtUrl: _page += 1 data = data + uriHandler.Open(self.baseUrl + _pageUrl, pb=False) _percentage += _percentagePerPage _parsePB.update(_percentage, "Loading page %s" % (_page)) if _parsePB.iscanceled(): break _parsePB.close() return (data, _items)
def ParseMainList(self): """ accepts an url and returns an list with items of type CListItem Items have a name and url. This is used for the filling of the progwindow """ items = [] if len(self.mainListItems) > 1: return self.mainListItems _data = uriHandler.Open(self.mainListUri) # first process folder items. _episodeItems = common.DoRegexFindAll(self.episodeItemRegex, _data) for _episode in _episodeItems: _tmpItem = self.CreateEpisodeItem(_episode) # catch the return of None if _tmpItem and items.count(_tmpItem) == 0: items.append(_tmpItem) # sort by name if self.episodeSort: items.sort(lambda x, y: cmp(x.name.lower(), y.name.lower())) self.mainListItems = items return items
def UpdateVideoItem(self, item): """ Accepts an item. It returns an updated item. Usually retrieves the MediaURL and the Thumb! """ # no MediaURL retrieving is done, because that is done in Playvideo (we don't need to open # any urls here. Info is already present, except the url). logFile.info('starting UpdateVideoItem for %s (%s)\nUrl:%s', item.name, self.channelName, item.url) data = uriHandler.Open(item.url) #get mediaurl videoUrl = common.DoRegexFindAll(self.mediaUrlRegex, data) item.mediaurl = common.ConvertURLEntities(videoUrl[-1]) if item.mediaurl.find('.flv') < 0: #parse ASX/ASF for older XBMC verions item.mediaurl = self.ParseAsxAsf(item.mediaurl) logFile.info("Real media url = %s", item.mediaurl) else: # if it is an FLV, it is downloadable item.downloadable = True item.complete = True # finish and return logFile.info('finishing ParseVideo for %s. MediaUrl=%s', item.url, item.mediaurl) return item
def NewItems(self): """ Filters the correct data from www.uitzendinggemist.nl page and calls """ items = [] data = uriHandler.Open("http://www.uitzendinggemist.nl/") selectedData = common.DoRegexFindAll("Meer nieuwe programma([\w\W]+)<!--// options -->", data) if len(selectedData) > 0: data = selectedData[0] results = common.DoRegexFindAll("<a href=\"/index.php/aflevering(\?aflID=\d+&md5=[0-9a-f]+)\"[^>]+>([^<]+)</a>", data) for result in results: tmp = common.clistItem(result[1], common.StripAmp(result[0])) tmp.icon = self.icon tmp.type = 'video' items.append(tmp) return items
def TipItems(self): """ Filters the tips from www.uitzendinggemist.nl """ items = [] data = uriHandler.Open("http://www.uitzendinggemist.nl/") selectedData = common.DoRegexFindAll("<!--<h1>Eerdere tips van Uitzendinggemist</h1>([\W\w]+)<!--// eerdere tips van uitzendinggemist -->", data) if len(selectedData) > 0: data = selectedData[0] results = common.DoRegexFindAll("<a href=\"http://player.omroep.nl/(\?aflID=\d+&md5=[0-9a-f]+)\" [^>]+>([^<]+)</a>", data) for result in results: tmp = common.clistItem(result[1], common.StripAmp(result[0])) tmp.icon = self.icon tmp.type = 'video' items.append(tmp) return items
def TopItems(self): """ Gets the top 50 items """ #check for cookie: logFile.info("Checking for NOS cookies.") if uriHandler.CookieCheck('UGSES') and uriHandler.CookieCheck('CheckUGCookie'):# and uriHandler.CookieCheck('quuid'): logFile.info("Cookies found. Continuing") else: logFile.info("No cookies found. Opening main site") temp = uriHandler.Open(self.baseUrl) items = [] data = uriHandler.Open("http://www.uitzendinggemist.nl/index.php/top50") results = common.DoRegexFindAll('<td style=[^>]+><a href="/index.php/aflevering(\?aflID=\d+&md5=[^"]+)">([^<]+)</a></td>\W+<td align="right">([^<]+)</td>', data) logFile.debug("Adding %s top50 items", len(results)) for result in results: tmp = common.clistItem(result[1], common.StripAmp(result[0])) tmp.icon = self.icon tmp.date = result[2] tmp.type = 'video' items.append(tmp) return items
def CacheThumb(self, remoteImage): """ NOT USER EDITABLE Caches an image. Before calling, set the thumb to noImage in the channel """ logFile.debug("Going to cache %s", remoteImage) if remoteImage == "": return self.noImage if remoteImage.find(":") < 2: return remoteImage logFile.debug("Caching url=%s", remoteImage) thumb = "" # get image localImageName = common.DoRegexFindAll('/([^/]*)$', remoteImage)[-1] # correct for fatx localImageName = uriHandler.CorrectFileName(localImageName) localCompletePath = os.path.join(config.cacheDir, localImageName) try: if os.path.exists(localCompletePath): #check cache thumb = localCompletePath else: # save them in cache folder logFile.debug("Downloading thumb. Filename=%s", localImageName) thumb = uriHandler.Download(remoteImage, localImageName, folder=config.cacheDir, pb=False) except: logFile.error("Error opening thumbfile!", exc_info=True) return self.noImage return thumb
def UpdateVideoItem(self, item): """ Accepts an item. It returns an updated item. """ logFile.debug('starting UpdateVideoItem for %s (%s)', item.name, self.channelName) if len(item.mediaurl) == 0: data = uriHandler.Open(item.url, pb=False) matches = common.DoRegexFindAll(self.mediaUrlRegex, data) logFile.debug("Possible Matches for mediaUrl: %s", matches) if len(matches) > 0: # sort mediaurl -> get highest quality matches.sort(lambda x, y: int(y[1]) - int(x[1])) mediaUrls = [] for match in matches: mediaUrls.append("%s%s%s" % match) logFile.debug("Sorted Matches: %s", mediaUrls) item.mediaurl = mediaUrls else: logFile.error("Cannot find media URL") logFile.info('finishing UpdateVideoItem. Media url = %s', item.mediaurl) item.thumb = self.CacheThumb(item.thumbUrl) item.complete = True return item
def PreProcessFolderList(self, data): """ Accepts an data from the ProcessFolderList Methode, BEFORE the items are processed. Allows setting of parameters (like title etc). No return value! """ pages = [] numberOfItems = common.DoRegexFindAll( "class='clipslist' name='(\d+)'>[^/]+'/([^_]+)_\d+'[^>]*>", data) logFile.debug(numberOfItems) if len(numberOfItems) > 0: pageId = numberOfItems[0][1] numberOfItems = numberOfItems[0][0] # 20 items per page, so we need to calculate numberOfPages = int(float(numberOfItems) / 20 + 0.5) logFile.debug('Number of items: %s\nNumber of page: %s', numberOfItems, numberOfPages) pageData = '' for i in range(1, numberOfPages + 1): pageUrl = "http://www.freecaster.com/helpers/videolist_helper.php?apID=%s&i=%s&q=&sortby=date&sort=DESC&event_id=" % ( pageId, i) pageData = "%s<pagenav>%s<pagenav><pagenavurl>%s</pagenavurl>" % ( pageData, i, pageUrl) data = "%s%s" % (pageData, data) return (data, pages)
def PopularItems(self): """ Filters the NewItems from www.uitzendinggemist.nl """ items = [] data = uriHandler.Open("http://www.uitzendinggemist.nl/") selectedData = common.DoRegexFindAll("<thead id=\"tooltip_populair\"([\w\W]+)<script type=\"text/javascript\">", data) if len(selectedData) > 0: data = selectedData[0] results = common.DoRegexFindAll("<td><a href=\"/index.php/aflevering(\?aflID=\d+&md5=[0-9a-f]+)\">([^<]+)</a></td>\W+<td [^>]+>([^<]+)</td>", data) for result in results: tmp = common.clistItem(result[1], common.StripAmp(result[0])) tmp.date = result[2] tmp.icon = self.icon tmp.type = 'video' items.append(tmp) return items
def UpdateVideoItem(self, item): """ Accepts an item. It returns an updated item. Usually retrieves the MediaURL and the Thumb! It should return a completed item. """ logFile.info('starting UpdateVideoItem for %s (%s)', item.name, self.channelName) data = uriHandler.Open(item.url, pb=False) info = common.DoRegexFindAll( '[\W\w]+<headline><!\[CDATA\[([^<]+)\]\]></headline>[\W\w]*<subhead><!\[CDATA\[([^<]+)\]\]></subhead>', data) if len(info) > 0: info = info[0] logFile.debug(info) item.description = "%s %s" % (info[1], info[2]) item.name = info[1] item.thumbUrl = info[0] if item.thumbUrl != "": item.thumb = self.CacheThumb(item.thumbUrl) else: item.thumb = self.noImage # get the RTMP urls #<src>rtmp://cp40493.edgefcs.net/ondemand/comedystor/_!/com/sp/acts/Season01/E_0102/compressed/flv/0102_3_DI_640x480_500kbps.flv</src> #rtmp://cp40493.edgefcs.net/ondemand?slist=comedystor/_!/com/sp/acts/Season01/E_0106/compressed/flv/0106_4_DI_640x480_700kbps urlInfo = common.DoRegexFindAll('<videourl>/www/xml([^<]+)</videourl>', data) item.mediaurl = [] for url in urlInfo: rtmpData = uriHandler.Open("%s%s" % (self.baseUrl, url), pb=False) rtmpUrl = common.DoRegexFindAll( '<src>([^<]+ondemand)/([^<]+).flv</src>', rtmpData)[-1] item.mediaurl.append("%s?slist=%s" % rtmpUrl) logFile.debug("Media url: %s", item.mediaurl) if not item.mediaurl == []: item.complete = True else: item.complete = False return item
def PreProcessFolderList(self, data): """ Accepts an data from the ProcessFolderList Methode, BEFORE the items are processed. Allows setting of parameters (like title etc). No return value! """ _items = [] _items = [] #description _matches = common.DoRegexFindAll('/ <a href="\.([^"]+)">([^<]+)</a>', data) # Because of the Newly Added Movies item, we must check if we are at the main # page or not. The mainpage does not have the / <a href and thus no matches if len(_matches) > 0: # we are not on the mainpage anymore #description _matches = common.DoRegexFindAll( '/ <a href="\.([^"]+)">([^<]+)</a>', data) if self.folderHistory[ -1].description == "Please wait while loading data": self.previousDescription = _matches[0][1] else: self.previousDescription = self.folderHistory[-1].description # now remove everything above the sidebar HTML to prevent problems with new # links on the site data = common.DoRegexFindAll( '<div id="body-sidebar">(([\n\r]|.)*)', data) if len(data) > 0: if len(data[0]) > 0: data = data[0][0] else: # we remain on the mainpage for newly added movies. self.previousDescription = "Newly Added Movies" data = common.DoRegexFindAll( '<div class="xboxcontent">([\w\W]*)<div class="clear"', data) if len(data) > 0: data = data[0] return (data, _items)
def UpdateVideoItem(self, item): """ Accepts an item. It returns an updated item. """ logFile.debug('starting UpdateVideoItem for %s (%s)', item.name, self.channelName) item.thumb = self.CacheThumb(item.thumbUrl) # retrieve the mediaurl data = uriHandler.Open(item.url, pb=False) # them for RAM asxRegex = '<a href="([^"]+asx)"' asxResults = common.DoRegexFindAll(asxRegex, data) if len(asxResults) > 0: logFile.debug("Running ASX") item.mediaurl = "%s%s" % ("http://www.svt.se", asxResults[0]) else: # first check for ASX ramRegex = '<a href="([^"]+ram)">' ramResults = common.DoRegexFindAll(ramRegex, data) if len(ramResults) > 0: logFile.debug("Running RAM") item.mediaurl = "%s%s" % ("http://www.svt.se", ramResults[0]) else: # then for FLV flvRegex = 'so.addVariable\("pathflv"\W*,\W*"([^"]+)"\W*\)' flvResults = common.DoRegexFindAll(flvRegex, data) if len(flvResults) > 0: logFile.debug("Running FLV") item.mediaurl = flvResults[0] if item.mediaurl.startswith("rtmp"): item.mediaurl = item.mediaurl.replace("_definst_","?slist=") pass #rtmp://fl1.c00928.cdn.qbrick.com/00928/?slist=/kluster/20090101/090102PASPARET_J53UJH #rtmp://fl1.c00928.cdn.qbrick.com/00928/_definst_/kluster/20090101/090102PASPARET_J53UJH item.complete = True logFile.debug("Found mediaurl: %s", item.mediaurl) return item
def UpdateVideoItem(self, item): """ Accepts an item. It returns an updated item. Usually retrieves the MediaURL and the Thumb! It should return a completed item. """ logFile.info('starting UpdateVideoItem for %s (%s)', item.name, self.channelName) # download the thumb item.thumb = self.CacheThumb(item.thumbUrl) # load the url and get the info for the media urls: data = uriHandler.Open(item.url, pb=False) acts = common.DoRegexFindAll('<media:content url="([^=]+=\d+&hiLoPref=hi)"[^>]+>', data) #retrieve the first real url rtmpUrls = [] if len(acts)>0: rtmpData = uriHandler.Open(common.StripAmp(acts[0]), pb=False) #<src>rtmp://cp40493.edgefcs.net/ondemand/comedystor/_!/com/sp/acts/Season01/E_0102/compressed/flv/0102_3_DI_640x480_500kbps.flv</src> #rtmp://cp40493.edgefcs.net/ondemand?slist=comedystor/_!/com/sp/acts/Season01/E_0106/compressed/flv/0106_4_DI_640x480_700kbps parts = common.DoRegexFindAll('<src>([^<]+ondemand)/([^<]+flv/\d+_)\d+(_[^<]+)[.]flv</src>', rtmpData) rtmpUrlParts = [] for part in parts: rtmpUrlParts = part actNr = 1 for act in acts: rtmpUrl = "%s?slist=%s%s%s" % (rtmpUrlParts[0], rtmpUrlParts[1], actNr ,rtmpUrlParts[2]) rtmpUrls.append(rtmpUrl) actNr = actNr + 1 logFile.debug('Appending: %s', rtmpUrl) item.mediaurl = rtmpUrls else: logFile.critical("Error retrieving rtmp stream from %s", item.url) if item.mediaurl != "": logFile.debug("Media url was found: %s", item.mediaurl) item.complete = True else: logFile.debug("Media url was not found.") return item
def UpdateVideoItem(self, item): """ Accepts an item. It returns an updated item. Usually retrieves the MediaURL and the Thumb! It should return a completed item. """ logFile.info('starting UpdateVideoItem for %s (%s)', item.name, self.channelName) item.thumb = self.CacheThumb(item.thumbUrl) # now the mediaurl is derived. First we try WMV data = uriHandler.Open(item.url, pb=False) urls = common.DoRegexFindAll( '<a class="wmv-player-holder" href="(http://asx.sbsnet.nl)(/[^"]+)"></a>', data) for url in urls: item.mediaurl = "http://www.garnierstreamingmedia.com/asx/openclip.asp?file=/sbs6%s" % ( url[1]) # then we check a different implementation if item.mediaurl == "": urls = common.DoRegexFindAll('<param name="url" value="([^"]+)"', data) for url in urls: item.mediaurl = url # and finally a FLV player that is sometimes used if item.mediaurl == "": urls = common.DoRegexFindAll( '<a class="flv-player-holder" href="[^=]+mediaplayer.swf\?file=([^"]+)">', data) for url in urls: item.mediaurl = url if item.mediaurl != "": logFile.debug("Media url was found: %s", item.mediaurl) item.complete = True else: logFile.debug("Media url was not found.") return item
def PreProcessFolderList(self, data): """ Accepts an data from the ProcessFolderList Methode, BEFORE the items are processed. Allows setting of parameters (like title etc). No return value! """ _items = [] # strip the last part of the HTML to prevent the channels to appear as folders newdata = common.DoRegexFindAll("([\w\W]+)<h3>Channels:</h3>", data) if len(newdata) > 0: data = newdata[0] return (data, _items)
def PreProcessFolderList(self, data): """ Accepts an data from the ProcessFolderList Methode, BEFORE the items are processed. Allows setting of parameters (like title etc). No return value! """ logFile.info("Performing Pre-Processing") _items = [] regex = '<div id="crumbsWrap">[\W\w]+<div id="links' results = common.DoRegexFindAll(regex, data) headerData = "" for result in results: headerData = result regex = '\);">([^<]+)</a>[^/<]+' results = common.DoRegexFindAll(regex, headerData) self.categoryName = "" for result in results: self.categoryName = "%s%s\n" % (self.categoryName, result) return (data, _items)
def SearchSite(self): """ accepts an url and returns an list with items of type CListItem Items have a name and url. """ items = [] #check for cookie: logFile.info("Checking for NOS cookies.") if uriHandler.CookieCheck('UGSES') and uriHandler.CookieCheck('CheckUGCookie'):# and uriHandler.CookieCheck('quuid'): logFile.info("Cookies found. Continuing") else: logFile.info("No cookies found. Opening main site") temp = uriHandler.Open(self.baseUrl) keyboard = xbmc.Keyboard('') keyboard.doModal() if (keyboard.isConfirmed()): needle = keyboard.getText() if len(needle)<4: dialog = xbmcgui.Dialog() dialog.ok("Uitzendinggemist","Geen geldig zoekopdracht. Een zoekopdracht\nheeft minimaal 4 characters.") return #get only first one logFile.info("Searching NOS for needle: "+needle) data = uriHandler.Open("http://www.uitzendinggemist.nl/index.php/search",params="searchitem=&qs_uitzending="+needle+"&titel=&dag=&net_zender=&omroep=&genre=") #resultSet = common.DoRegexFindAll('<a class="title" href="/index.php/search\?([^"]+)&sq=[^<]+">([^<]+)(<span[^>]+>)*([^<]+)(</span>)*</a></td>', data) resultSet = common.DoRegexFindAll('<a class="title" href="/index.php/search\?([^"]+)&sq=[^<]+">([^<]*)(<span[^>]+>)*([^<]*)(</span>)*([^<]*)</a></td>', data) for item in resultSet: logFile.debug(item) name = "" for part in item[1:]: if not part.find(">") > 0: name = "%s%s" % (name, part) tmp = common.clistItem(name, self.baseUrl + "/index.php/serie?" + common.StripAmp(item[0])) tmp.icon = self.folderIcon tmp.thumb = self.noImage tmp.description = name items.append(tmp) #because lists are downloaded according to date (else some programs will be missing), a sort on name is performed. items.sort(lambda x, y: cmp(x.name,y.name)) else: logFile.info('user canceled search') return items
def UpdateVideoItem(self, item): """ Accepts an item. It returns an updated item. Usually retrieves the MediaURL and the Thumb! It should return a completed item. """ logFile.info('starting UpdateVideoItem for %s (%s)', item.name, self.channelName) item.thumb = self.CacheThumb(item.thumbUrl) # now the mediaurl is derived # http://www.garnierstreamingmedia.com/asx/openclip.asp?file=/sbs6/net5/juliastango_S02/juliastango_S02E07.wmv # http://asx.sbsnet.nl/net5/juliastango_S02/juliastango_S02E07.wmv data = uriHandler.Open(item.url, pb=False) urls = common.DoRegexFindAll( '<a class="wmv-player-holder" href="(http://asx.sbsnet.nl/sbs6/)([^"]+)"></a>', data) # ASX for url in urls: item.mediaurl = "http://www.garnierstreamingmedia.com/asx/openclip.asp?file=/sbs6/sbs6/%s" % ( url[1]) # FLV if item.mediaurl == "": urls = common.DoRegexFindAll( '<a class="flv-player-holder" href="/design/channel/sbs6/swf/mediaplayer.swf\?file=([^"]+)"></a>', data) for url in urls: item.mediaurl = url if item.mediaurl != "": logFile.debug("Media url was found: %s", item.mediaurl) item.complete = True else: logFile.debug("Media url was not found.") return item
def IsOutOfDate(self): """ NOT USER EDITABLE Compare the maxVersion of the channel (so the maximum version of XOT for which the channel was tested) against the current version of XOT. If the currentversion is higher, don't load the channel. """ maxVersionSplit = common.DoRegexFindAll( '^(\d{1,3})\.(\d{1,3})\.(\d{1,3})(([abAB])(\d{0,2}))*$', self.maxXotVersion)[0] xotVersionSplit = common.DoRegexFindAll( '^(\d{1,3})\.(\d{1,3})\.(\d{1,3})(([abAB])(\d{0,2}))*$', config.version)[0] versions = (maxVersionSplit, xotVersionSplit) intVersions = [] for version in versions: tmpVersion = "%s%s%s" % (version[0].rjust( 3, '0'), version[1].rjust(3, '0'), version[2].rjust(3, '0')) # now the last part will be for the beta/alpha counting. Therefore we assume that if no beta/alpa is present # the last 3 digits are 999. If they are presents the digits are between 0-998 if version[4] != "": if version[4] == 'a': tmpVersion = "%s%s" % (tmpVersion, "1") elif version[4] == 'b': tmpVersion = "%s%s" % (tmpVersion, "2") tmpVersion = "%s%s" % (tmpVersion, version[5].rjust(3, '0')) else: tmpVersion = "%s99" % (tmpVersion) intVersions.append(tmpVersion) if intVersions[0] < intVersions[1]: logFile.warning( "%s has maxVersion %s and is to old for XOT version %s. (maxXot=%s, currentXot=%s)", self.channelName, self.maxXotVersion, config.version, intVersions[0], intVersions[1]) return True else: return False
def ProcessExtendedPages(self, data): try: logFile.info('starting ProcessExtendedPages') items = [] title = common.DoRegexFindAll('<b class="btitle">([^<]+)</b>', data) title = title[-1] results = common.DoRegexFindAll( '<a href="http://player.omroep.nl/(\?aflID=\d+)"[^>]*><[^>]+alt="bekijk uitzending: ([^(]+) \(([0-9-]*)\)" /></a>', data) for result in results: item = self.CreateEpisodeItem( (result[2], result[0], result[1])) items.append(item) items = items + self.ProcessPageNavigation(data) return items except: logFile.error("Error parsing extended pages", exc_info=True) return items
def PreProcessFolderList(self, data): """ Accepts an data from the ProcessFolderList Methode, BEFORE the items are processed. Allows setting of parameters (like title etc). No return value! """ logFile.info("Performing Pre-Processing") items = [] # first part of the data to prevent double pages data = common.DoRegexFindAll('<div class="resultBox">([\w\W]+)', data)[0] logFile.debug("Pre-Processing finished") return (data, items)
def ProcessFolderList(self, url): logFile.info('starting ParseFolder for ' + url) items = [] # get the data for most current episode try: # load first page as "pageBase" data = uriHandler.Open(url, pb=True) # see if it is an extended page or not: extended = common.DoRegexFindAll('<u>terug naar programma</u>', data) if extended != []: items = self.ProcessExtendedPages(data) else: items = self.ProcessNormalPage(data) # determine if an extended page item should be added pageExtUrl = common.DoRegexFindAll( '<a href="(/index.php/serie2\?serID=\d+&md5=[0-9a-f]+)"', data) if pageExtUrl != []: # add an folder item for it to the list folderItem = common.clistItem( "Oudere afleveringen", self.baseUrl + common.StripAmp(pageExtUrl[0]), type='folder') folderItem.icon = self.folderIcon folderItem.thumb = self.noImage folderItem.description = "Oudere, gearchiveerde items van '%s'." % items[ 0].name #items.append(folderItem) items.insert(0, folderItem) return items except: logFile.critical("Error Parsing with new methode", exc_info=True) return items
def PreProcessFolderList(self, data): """ Accepts an data from the ProcessFolderList Methode, BEFORE the items are processed. Allows setting of parameters (like title etc). No return value! """ _items = [] if len(self.folderHistory) == 1: # The first folder to be processed matches = common.DoRegexFindAll( '<ul title="([^"]*)" rel="([^"]*)videomenu.xml"', data) self.progTitle = matches[0][0] self.videoMenu = matches[0][1] return (data, _items)