def LoadUrl(url, cacheInt): xml = XML.ElementFromString(HTTP.GetCached( url, cacheInt), True) # This may cache the interstitial ad page while len(xml.xpath('//a[@class="prestitialText2"]')) != 0: xml = XML.ElementFromURL(url, True) # Is there a way to refresh the cache? return xml
def load(self, date): """ Fetch game data from mlb.com and generate a list of Game objects """ streams = self.loadStreams(date) iphone_xml = XML.ElementFromURL(Util.DateURL(date, C["URL"]["GAMES"]), cacheTime=C["GAME_CACHE_TTL"], isHTML=True) # for some reason switching to the soup parser (isHTML=True) made every game # appear twice. keep track of which games we've already listed. games_parsed = {} for xml in iphone_xml.xpath('game'): game = Game.fromXML(xml) if game: try: pseudo_id = game.home_team.fullName( ) + game.away_team.fullName() except: pseudo_id = None if pseudo_id in games_parsed: continue games_parsed[pseudo_id] = True if game.event_id: game.streams = streams[game.event_id] # game.streams.game = game else: game.streams = [] self.append(game)
def __init__(self, sender): """ Fetch a list of featured highlights from mlb.com, adding each to the menu. """ ABCMenu.__init__(self, title2=sender.itemTitle, viewGroup="Details") count = 0 for entry in XML.ElementFromURL(C["URL"]["TOP_VIDEOS"]).xpath('item'): count += 1 if count > 20: break id = entry.get("content_id") title = Util.XPathSelectOne(entry, "title") summary = Util.XPathSelectOne(entry, "big_blurb") duration = int( Util.parseDuration(Util.XPathSelectOne(entry, "duration"))) * 1000 thumb = Util.XPathSelectOne( entry, "pictures/picture[@type='dam-raw-thumb']/url") url = Util.XPathSelectOne(entry, "url[@speed=1000]") self.Append( self.getVideoItem(id, url=url, title=title, summary=summary, duration=duration, thumb=thumb))
def HandlePhotosRequest(pathNouns, count): dir_ = MediaContainer("art-default.jpg", "InfoList", "dinosaurcomics") global archived if archived == "new": archived = XML.ElementFromString(HTTP.GetCached(BASE_URL + "/archive.php", CACHE_TIME), True).xpath('//a[contains(@href, "www.qwantz.com/index.php?comic=")]') for item in archived[count*10:count*10+10]: try: title = item.text imgHTML = XML.ElementFromString(HTTP.GetCached(item.get("href"), CACHE_TIME), True).xpath('//img[contains(@class, "comic")]')[0] img = imgHTML.get("src") desc = imgHTML.get("title") ph = PhotoItem(img, title, desc, img) dir_.AppendItem(ph) except Exception, e: Log.Add("Exception caught when trying to fetch comic:") Log.Add("\t" + str(e)) Log.Add("\t" + item.get("href"))
def populateFromFeed(url, secondTitle=None, firstTitle=None): if not firstTitle: firstTitle = "Channels" dir = MediaContainer('art-default.jpg', None, firstTitle, secondTitle) dir.SetViewGroup("InfoList") if USE_CACHE == True: feed = RSS.Parse(HTTP.GetCached(url, CACHE_INTERVAL)) else: feed = RSS.Parse(url) added = 0 for e in feed["items"]: try: id = e.enclosures[0]["href"] media_type = e.enclosures[0]["type"] if media_type == "application/x-shockwave-flash" or media_type.find( "vmv") > -1 or media_type == "": continue try: description = XML.ElementFromString(e.description, True).text_content() except: description = "" try: thumbnail = e["media_thumbnail"][0]["url"] except: thumbnail = "" try: content_url = e["media_content"][0]["url"] except: content_url = "" duration = "" title = e.title if secondTitle and title.find(secondTitle) == 0: title = title[len(secondTitle) + 1:] videoItem = VideoItem(id, title, description, duration + content_url, thumbnail) # TODO: Set the metadata! # videoItem.SetTelevisonMetadata(1,2,3) dir.AppendItem(videoItem) added = added + 1 Log.Add("Read data for: " + title) except: Log.Add("Skipping item due to an error.") Log.Add("Total Videos: " + str(added)) return dir.ToXML()
def get_setting(name,channel=""): try: prefs = [] from PMS import Log from PMS import Plugin Log("bundlepath="+Plugin.__bundlePath) #/Users/jesus/Library/Application Support/Plex Media Server/Plug-ins/pelisalacarta.bundle path = "%s/Contents/DefaultPrefs.json" % Plugin.__bundlePath if os.path.exists(path): f = open(path, "r") string = f.read() f.close() from PMS import JSON prefs = JSON.ObjectFromString(string) from PMS import Prefs Log("prefspath="+Prefs.__prefsPath) #/Users/jesus/Library/Application Support/Plex Media Server/Plug-in Support/Preferences/com.plexapp.plugins.pelisalacarta.xml path = Prefs.__prefsPath if os.path.exists(path): f = open(path, "r") from PMS import XML userPrefs = XML.ElementFromString(f.read()) f.close() for userPref in userPrefs: for pref in prefs: if pref["id"] == userPref.tag: pref["value"] = userPref.text from PMS import Log valor = "" for pref in prefs: Log("pref="+str(pref)) if pref["id"]==name: valor = pref["value"] except: valor="" if name=="cache.dir": return "" if name=="download.enabled": return "false" if name=="cookies.dir": return os.getcwd() if name=="quality_youtube": return "8" elif name=="cache.mode": return "2" return valor
def browse(sender, orderby='dviews', query=None): dir = MediaContainer(title2=sender.itemTitle) html = XML.ElementFromURL(VIDEOS_URL, isHTML=True) for link in html.xpath("//div[@id='topnav']/a"): category = link.text dir.Append( Function(DirectoryItem(render_video_items, title=category), title=sender.itemTitle + " - " + category, query={ 'category': category, 'sortby': orderby })) return dir
def loadStreams(self, date): """ Load stream data for a given day. (A stream, for this purpose, is any game-specific media listed on http://mlb.mlb.com/mediacenter/) """ events = {} table = XML.ElementFromURL( Util.DateURL(date, C["URL"]["MEDIA"]), True, encoding='UTF-8').cssselect('.mmg_table tbody')[0] # how many columns in the table? num_columns = 0 for cell in table.cssselect('tr:first-child td'): num_columns += 1 if not cell.get('colspan') else int( cell.get('colspan')) for column_types in C["MEDIA_COLUMNS"]: if num_columns == len(column_types): # parse some HTML for row in table.cssselect('tr'): event_id = row.get('id') if not event_id: continue streams = [] cells = row.cssselect('td') if len(cells) < len(column_types): continue for i in range(0, len(column_types)): stream = Stream.fromHTML(column_types[i], cells[i]) if stream: streams.append(stream) events[event_id] = list(GameStreamList(streams)) break return events
def render_video_items(sender=None, title=None, query={}, page=1): dir = MediaContainer(title2=title) url = SEARCH_URL + "?" + urlencode(query) + "&" + urlencode({'page': page}) html = XML.ElementFromURL(url, isHTML=True) for item in html.xpath("//div[@class='resultcont']"): videoLink = item.xpath("h2/a") videoTitle = get_text(videoLink) videoUrl = get_attribute(videoLink, 'href') if videoTitle and videoUrl: videoDescription = get_text(item.xpath("p")) videoThumbnail = get_attribute(item.xpath("a/img"), 'src') dir.Append( VideoItem(resolve_video_url(videoUrl), title=videoTitle, summary=videoDescription, thumb=videoThumbnail)) if not html.xpath("//a[@class='oldest current' and text()='Last']"): dir.Append( Function(DirectoryItem(render_video_items, title=MORE_TEXT), title=title, query=query, page=page + 1)) return dir
def getVideoItem(self, id, url=None, title=None, subtitle=None, summary=None, duration=None, thumb=None): """ Get the VideoItem for a highlight video, either by assembling the data we already have, or fetching more from mlb.com """ # (year, month, day, content_id) = (id[:4], id[4:6], id[6:8], id[8:]) # subtitle = None #"posted %s/%s/%s" % (month, day, year) xml = None if None in [url, title, subtitle, summary, duration, thumb]: xurl = C["URL"]["GAME_DETAIL"] % (id[-3], id[-2], id[-1], id) xml = XML.ElementFromURL( xurl, headers={"Referer": Util.getURLRoot(xurl)}) if url is None: # TODO this seems fragile. investigate another way. for scenario in [ "FLASH_1000K_640X360", "MLB_FLASH_1000K_PROGDNLD", "MLB_FLASH_1000K_STREAM_VPP", "FLASH_800K_640X360", "MLB_FLASH_800K_PROGDNLD", "MLB_FLASH_800K_STREAM_VPP", "FLASH_400K_600X338" ]: url = Util.XPathSelectOne( xml, 'url[@playback_scenario="' + scenario + '"]') if url is not None: break else: # couldn't find a URL return if duration is None: duration_string = Util.XPathSelectOne(xml, 'duration') if duration_string is not None: duration = int(Util.parseDuration(duration_string)) * 1000 if title is None: title = Util.XPathSelectOne(xml, 'headline') if subtitle is None: date = isodate.parse_datetime(Util.XPathSelectOne(xml, '//@date')) # Log(date.astimezone(datetime.datetime.now().tzinfo)) # subtitle = date.strftime("%a, %d %b %Y %H:%M:%S %Z") subtitle = date.strftime("%A, %B %d") if summary is None: summary = re.sub("^\s*(\d+\.){2}\d+\:", "", str(Util.XPathSelectOne(xml, 'big-blurb'))) if thumb is None: thumb = Util.XPathSelectOne( xml, 'thumbnailScenarios/thumbnailScenario[@type="3"]') if url[:7] == "rtmp://": # pass clip as an empty string to prevent an exception return RTMPVideoItem(url, clip="", title=title, subtitle=subtitle, summary=summary, duration=duration, thumb=thumb) else: return VideoItem(url, title, subtitle=subtitle, summary=summary, duration=duration, thumb=thumb)
def resolve_video_url(videoUrl): page = XML.ElementFromURL(videoUrl, isHTML=True) return get_attribute(page.xpath("//embed[@type='video/divx']"), 'src')