Beispiel #1
0
def agenda():
    """ Returns events from a google calendar URL. For instance you could use a
        private one like:
        
        https://www.google.com/calendar/feeds/[email address]/private-[stuff]/basic?[options]
        
        The url is stored in calxmlurl.txt in the same folder as sources.py.

        The options are whatever suits, I use:

        orderby=starttime&sortorder=ascending&singleevents=true&futureevents=true&max-results=5

        See the following for hints on options:
         * https://developers.google.com/google-apps/calendar/v2/reference#Parameters
         * https://developers.google.com/gdata/docs/2.0/reference#Queries
    """

    try:
        results = ""
        URL = open("calxmlurl.txt").read()
        dom = xml.dom.minidom.parse(urllib.urlopen(URL))
        entries = dom.getElementsByTagName("entry")
        for e in dom.getElementsByTagName("entry"):
            # Parse out the event title
            event = e.getElementsByTagName("title")[0].lastChild.toxml()\
                        .encode('ascii','ignore')
            event = cgi.escape(
                        tools.unescape(tools.unescape(event)).encode('ascii'))
            if len(event) > 20:
                event = event[:17] + '...'

            # Parse out the summary, this contains the start and end date/time
            summary = e.getElementsByTagName("summary")[0].lastChild.toxml()\
                        .encode('ascii','ignore').split("\n")[0]
            date = re.findall(
                        r'When:.*?[ ]([0-9]{1,2}[ ].*?[0-9]{4}).*?', summary)[0]
            date = time.strptime(date, "%d %b %Y")
            date = "%i%s" % (date.tm_mday, tools.ordinal(date.tm_mday))
            times = re.findall(r'.*?([0-9]{2}:[0-9]{2}).*?', summary)

            # Handle "All day" events
            displaytime = "All day"
            if len(times) > 0:
                displaytime = times[0]

            # Generate some HTML
            results += "%s - <span class=\"dt\">%s, %s</span><br />" %\
                       (event, date, displaytime)
        return results
    except:
        pass
    return "???"
def add_item_atoz(soup, provider, count): #Scrape items from an AtoZ-style HTML page
 baseurl = base_url(provider)
 info = tools.defaultinfo()
 info["Studio"] = provider
 if soup.find('h5'):
  link = soup.h5.find("a", attrs={"href": re.compile(baseurl)})
  if link:
   infoitems = {}
   href = re.match("%s/(.*?)/%s/([0-9]+)/%s/([0-9]+)/%s/([0-9]+)/" % (baseurl, tv3_urls["VIDEO1"], tv3_urls["VIDEO2"], tv3_urls["VIDEO3"]), link['href'])
   if href:
    if link.string:
     title = link.string.strip()
     if title <> "":
      info["TVShowTitle"] = title
      image = soup.find("img", attrs={"src": re.compile(tv3_urls["IMG_RE2"]), "title": True})
      if image:
       info.update(tools.imageinfo(image))
      info.update(seasonepisode(soup.contents[4]))
      info["Title"] = tools.itemtitle(info["TVShowTitle"], info["PlotOutline"])
      plot = soup.find("span", attrs={"class": "lite"})
      if plot.string:
       cleanedplot = plot.string.strip()
       if cleanedplot:
        info["Plot"] = tools.unescape(cleanedplot)
      info["Count"] = count
      info["FileName"] = "%s?ch=TV3&id=%s&info=%s" % (sys.argv[0], "%s,%s,%s,%s" % (href.group(1), href.group(2), href.group(3), href.group(4)), urllib.quote(str(info)))
      infoitems[info["Title"]] = info
      #tools.addlistitem(int(sys.argv[1]), info, tv3_urls["Fanart"], 0)
   tools.addlistitems(infoitems, tv3_urls["Fanart"], 0)
def add_item_div(soup, provider, count): #Scrape items from a div-style HTML page
 baseurl = base_url(provider)
 info = tools.defaultinfo()
 info["Studio"] = provider
 sys.stderr.write(baseurl)
 link = soup.find("a", attrs={"href": re.compile(baseurl)})
 if link:
  href = re.match("%s/(.*?)/%s/([0-9]+)/%s/([0-9]+)/%s/([0-9]+)/" % (baseurl, tv3_urls["VIDEO1"], tv3_urls["VIDEO2"], tv3_urls["VIDEO3"]), link['href'])
  if href:
   if link.string:
    title = link.string.strip()
    if title <> "":
     info["TVShowTitle"] = title
     image = soup.find("img", attrs={"src": re.compile(tv3_urls["IMG_RE"]), "title": True})
     if image:
      info.update(tools.imageinfo(image))
     se = soup.find("span", attrs={"class": "title"})
     if se:
      info.update(seasonepisode(se))
     date = soup.find("span", attrs={"class": "dateAdded"})
     if date:
      info.update(dateduration(date))
     info["Title"] = tools.itemtitle(info["TVShowTitle"], info["PlotOutline"])
     info["Count"] = count
     plot = soup.find("div", attrs={"class": "left"}).string
     if plot:
      if plot.strip() <> "":
       info["Plot"] = tools.unescape(plot.strip())
     info["FileName"] = "%s?ch=TV3&id=%s&info=%s" % (sys.argv[0], "%s,%s,%s,%s" % (href.group(1), href.group(2), href.group(3), href.group(4)), urllib.quote(str(info)))
     tools.addlistitem(int(sys.argv[1]), info, tv3_urls["Fanart"], 0)