def INDEX():
 info = tools.defaultinfo(1)
 info["Title"] = localize(30053)
 info["Count"] = 1
 #info["Thumb"] = "DefaultVideoPlaylists.png"
 info["FileName"] = "%s?ch=Ziln&folder=channels" % sys.argv[0]
 tools.addlistitem(int(sys.argv[1]), info, ziln_urls["Fanart"], 1)
 info = tools.defaultinfo(1)
 info["Title"] = localize(30065)
 info["Count"] = 2
 info["Thumb"] = "DefaultVideoPlaylists.png"
 info["FileName"] = "%s?ch=Ziln&folder=search" % sys.argv[0]
 tools.addlistitem(int(sys.argv[1]), info, ziln_urls["Fanart"], 1)
Example #2
0
def INDEX_SHOWS(provider): #Create a second level list of TV Shows from a TV3 webpage
 #doc = tools.gethtmlpage("%s/Shows/tabid/64/Default.aspx" % ("http://www.tv3.co.nz")) #Get our HTML page with a list of video categories
 doc = tools.gethtmlpage("%s/Shows.aspx" % ("http://www.tv3.co.nz")) #Get our HTML page with a list of video categories
 if doc:
  html_divtag = BeautifulSoup(doc)
  linksdiv = html_divtag.find('div', attrs = {"id": "pw_8171"})
  if linksdiv:
   links = linksdiv.findAll('a')
   if len(links) > 0:
    count = 0
    for link in links:
     info = tools.defaultinfo(1)
     info["Title"] = link.string.strip()
     catid = link['href']
     if info["Title"] == "60 Minutes": #The URL on the next line has more videos
      info["FileName"] = "%s?ch=TV3&cat=%s&title=%s&catid=%s" % (sys.argv[0], "shows", urllib.quote(info["Title"]), urllib.quote(catid)) #"http://ondemand.tv3.co.nz/Default.aspx?TabId=80&cat=22"
     else:
      info["FileName"] = "%s?ch=TV3&cat=%s&title=%s&catid=%s" % (sys.argv[0], "shows", urllib.quote(info["Title"]), urllib.quote(catid))
     info["Count"] = count
     count += 1
     tools.addlistitem(int(sys.argv[1]), info, tv3_urls["Fanart"], 1)
   else:
    sys.stderr.write("Couldn't find any videos in list")
  else:
   sys.stderr.write("Couldn't find video list")
 else:
  sys.stderr.write("Couldn't get index webpage")
Example #3
0
def INDEX(provider): #Create a list of top level folders as scraped from TV3's website
 doc = tools.gethtmlpage("%s/tabid/56/default.aspx" % (base_url(provider))) #Get our HTML page with a list of video categories
 if doc:
  a_tag = SoupStrainer('a')
  html_atag = BeautifulSoup(doc, parseOnlyThese = a_tag)
  links = html_atag.findAll(attrs={"rel": "nofollow", "href": re.compile(tv3_urls["CAT_RE"])}) #, "title": True
  if len(links) > 0:
   count = 0
   for link in links:
    info = tools.defaultinfo(1)
    info["Title"] = link.string
    caturl = link['href']
    catid = re.search('%s([0-9]+)' % (tv3_urls["CAT_RE"]), caturl).group(1)
    if info["Title"] == "Title (A - Z)":
     cat = "atoz"
    elif info["Title"] == "TV3 Shows":
     cat = "tv3"
    #elif info["Title"] == "C4TV Shows":
    elif info["Title"] == "FOUR Shows":
     cat = "c4tv"
    else:
     cat = "tv"
    if catid:
     info["Count"] = count
     count += 1
     info["FileName"] = "%s?ch=TV3&cat=%s&catid=%s" % (sys.argv[0], cat, catid)
     tools.addlistitem(int(sys.argv[1]), info, tv3_urls["Fanart"], 1)
  else:
   sys.stderr.write("Couldn't find any categories")
 else:
  sys.stderr.write("Couldn't get index webpage")
Example #4
0
def INDEX_FOLDER(folder): #Create second level folder for the hierarchy view, only showing items for the selected top level folder
 infopages = dict()
 infopages["0"]  = ("63", localize(30052), "tv3", localize(30056)) # Latest
 infopages["1"]  = ("61", localize(30052), "tv3", localize(30057)) # Most Watched
 infopages["2"]  = ("64", localize(30052), "tv3", localize(30058)) # Expiring soon
 infopages["3"]  = ("70", localize(30052), "atoz", "A - Z")
 infopages["4"]  = ("71", localize(30053), "tv3", "TV3")
 infopages["5"]  = ("72", localize(30053), "c4tv", "FOUR")
 infopages["6"]  = ("65", localize(30054), "tv3", localize(30059)) # Comedy
 infopages["7"]  = ("66", localize(30054), "tv3", localize(30060)) # Drama
 infopages["8"]  = ("67", localize(30054), "tv3", localize(30061)) # News/Current affairs
 infopages["9"]  = ("68", localize(30054), "tv3", localize(30062)) # Reality
 infopages["10"] = ("82", localize(30054), "tv3", localize(30063)) # Sports
 infopages["11"] = ("80", localize(30052), "tv3", localize(30064)) # All
 #infopages["12"] = ("74", "RSS", "tv3", "RSS Feeds")
 #infopages["13"] = ("81", "Categories", "tv3", "C4 Highlights")
 #infopages["13"] = ("73", "Categories", "tv3", "All (Small)")
 for index in infopages:
  if infopages[index][1] == folder:
   info = tools.defaultinfo(1)
   info["Title"] = infopages[index][3]
   info["Count"] = int(index)
   info["FileName"] = "%s?ch=TV3&cat=%s&catid=%s" % (sys.argv[0], infopages[index][2], infopages[index][0])
   tools.addlistitem(int(sys.argv[1]), info, tv3_urls["Fanart"], 1)
 if folder == "Shows":
  INDEX_SHOWS("tv3")
 elif folder == "Search":
  INDEX_SEARCH()
Example #5
0
def add_item_atoz(soup, provider, count): #Scrape items from an AtoZ-style HTML page
 baseurl = base_url(provider)
 info = tools.defaultinfo()
 info["Studio"] = provider
 if soup.find('h5'):
  link = soup.h5.find("a", attrs={"href": re.compile(baseurl)})
  if link:
   infoitems = {}
   href = re.match("%s/(.*?)/%s/([0-9]+)/%s/([0-9]+)/%s/([0-9]+)/" % (baseurl, tv3_urls["VIDEO1"], tv3_urls["VIDEO2"], tv3_urls["VIDEO3"]), link['href'])
   if href:
    if link.string:
     title = link.string.strip()
     if title <> "":
      info["TVShowTitle"] = title
      image = soup.find("img", attrs={"src": re.compile(tv3_urls["IMG_RE2"]), "title": True})
      if image:
       info.update(tools.imageinfo(image))
      info.update(seasonepisode(soup.contents[4]))
      info["Title"] = tools.itemtitle(info["TVShowTitle"], info["PlotOutline"])
      plot = soup.find("span", attrs={"class": "lite"})
      if plot.string:
       cleanedplot = plot.string.strip()
       if cleanedplot:
        info["Plot"] = tools.unescape(cleanedplot)
      info["Count"] = count
      info["FileName"] = "%s?ch=TV3&id=%s&info=%s" % (sys.argv[0], "%s,%s,%s,%s" % (href.group(1), href.group(2), href.group(3), href.group(4)), urllib.quote(str(info)))
      infoitems[info["Title"]] = info
      #tools.addlistitem(int(sys.argv[1]), info, tv3_urls["Fanart"], 0)
   tools.addlistitems(infoitems, tv3_urls["Fanart"], 0)
Example #6
0
def add_item_div(soup, provider, count): #Scrape items from a div-style HTML page
 baseurl = base_url(provider)
 info = tools.defaultinfo()
 info["Studio"] = provider
 sys.stderr.write(baseurl)
 link = soup.find("a", attrs={"href": re.compile(baseurl)})
 if link:
  href = re.match("%s/(.*?)/%s/([0-9]+)/%s/([0-9]+)/%s/([0-9]+)/" % (baseurl, tv3_urls["VIDEO1"], tv3_urls["VIDEO2"], tv3_urls["VIDEO3"]), link['href'])
  if href:
   if link.string:
    title = link.string.strip()
    if title <> "":
     info["TVShowTitle"] = title
     image = soup.find("img", attrs={"src": re.compile(tv3_urls["IMG_RE"]), "title": True})
     if image:
      info.update(tools.imageinfo(image))
     se = soup.find("span", attrs={"class": "title"})
     if se:
      info.update(seasonepisode(se))
     date = soup.find("span", attrs={"class": "dateAdded"})
     if date:
      info.update(dateduration(date))
     info["Title"] = tools.itemtitle(info["TVShowTitle"], info["PlotOutline"])
     info["Count"] = count
     plot = soup.find("div", attrs={"class": "left"}).string
     if plot:
      if plot.strip() <> "":
       info["Plot"] = tools.unescape(plot.strip())
     info["FileName"] = "%s?ch=TV3&id=%s&info=%s" % (sys.argv[0], "%s,%s,%s,%s" % (href.group(1), href.group(2), href.group(3), href.group(4)), urllib.quote(str(info)))
     tools.addlistitem(int(sys.argv[1]), info, tv3_urls["Fanart"], 0)
Example #7
0
def add_item_show(soup, provider, count, title): #Scrape items from a show-style HTML page
 info = tools.defaultinfo()
 info["Studio"] = provider
 bold = soup.find('b')
 if bold:
  link = bold.find("a", attrs={"href": re.compile(tv3_urls["FEEDBURNER_RE"])})
  if link:
   urltype = "other"
  else:
   link = bold.find("a", attrs={"href": re.compile(base_url("tv3"))})
   if link:
    urltype = "tv3"
  if link:
   if link.string:
    plot = link.string.strip()
    if plot <> "":
     info["PlotOutline"] = plot
     info["TVShowTitle"] = title
     image = soup.find("img", attrs={"src": re.compile(tv3_urls["IMG_RE"])})
     if image:
      info.update(tools.imageinfo(image))
     info.update(seasonepisode(link))
     info["Title"] = tools.itemtitle(info["TVShowTitle"], info["PlotOutline"])
     info["Count"] = count
     if urltype == "tv3":
      href = re.search("%s/(.*?)/%s/([0-9]+)/%s/([0-9]+)/%s/([0-9]+)/" % (base_url("tv3"), tv3_urls["VIDEO1"], tv3_urls["VIDEO2"], tv3_urls["VIDEO3"]), link['href'])
      if href:
       info["FileName"] = "%s?ch=TV3&id=%s&info=%s" % (sys.argv[0], "%s,%s,%s,%s" % (href.group(1), href.group(2), href.group(3), href.group(4)), urllib.quote(str(info)))
     elif urltype == "other":
      info["FileName"] = "%s?ch=TV3&id=%s&info=%s" % (sys.argv[0], urllib.quote(link["href"]), urllib.quote(str(info)))
     tools.addlistitem(int(sys.argv[1]), info, tv3_urls["Fanart"], 0)
def SHOW_EPISODES(id):
 getEpisodes(id, "%s/content/%s/ps3_xml_skin.xml" % (BASE_URL, id))
 link = tools.gethtmlpage("%s/content/%s_extras_group/ps3_xml_skin.xml" % (BASE_URL, id[:-15]), "ps3")
 if link:
  node = tools.getxmldocument(link)
  if node:
   info = tools.defaultinfo(1)
   info["FileName"] = "%s?ch=TVNZ&type=shows&id=%s_extras_group" % (sys.argv[0], id[:-15])
   info["Title"] = "Extras"
   tools.addlistitem(int(sys.argv[1]), info, FANART_URL, 1)
 return
def PROGRAMMES(type, urlext):
 if type == "channel":
  folder = 1
  url = ziln_urls["ZILN"]
 elif type == "video":
  folder = 0
  #url = "%s/channel/%s" % (ziln_urls["ZILN"], urlext)
  url = "%s/assets/php/slider.php?channel=%s" % (ziln_urls["ZILN"], urlext)
 elif type == "search":
  folder = 0
  url = "%s/search?search_keyword=%s" % (ziln_urls["ZILN"], urlext.replace(" ", "+"))
 doc = tools.gethtmlpage(url)
 if doc:
  if type == "channel" or type == "search":
   div_tag = SoupStrainer('div')
   html_divtag = BeautifulSoup(doc, parseOnlyThese = div_tag)
   programmes = html_divtag.findAll(attrs={'class' : 'programmes'})
  elif type == "video":
   div_tag = SoupStrainer('body')
   html_divtag = BeautifulSoup(doc, parseOnlyThese = div_tag)
   programmes = html_divtag.findAll(attrs={'class' : 'slider slider-small'})
  if type == "search":
   type = "video"
  if len(programmes) > 0:
   for programme in programmes:
    list = programme.find('ul')
    if list:
     listitems = list.findAll('li')
     if len(listitems) > 0:
      count = 0
      for listitem in listitems:
       link = listitem.find('a', attrs={'href' : re.compile("^/%s/" % type)})
       if link.img:
        if re.search("assets/images/%ss/" % type, link.img["src"]):
         info = tools.defaultinfo(1)
         #info["Title"] = link.img["alt"]
         if listitem.p.string:
          info["Title"] = listitem.p.string.strip()
         else:
          info["Title"] = link.img["alt"]
         info["Thumb"] = "%s/%s" % (ziln_urls["ZILN"], link.img["src"])
         info["Count"] = count
         count += 1
         #channelurl = re.search("/%s/(.*)" % type, link["href"]).group(1)
         channelurl = re.search("assets/images/%ss/([0-9]*?)-mini.jpg" % type, link.img["src"]).group(1)
         #infourl = "&info=%s" % urllib.quote(str(info))
         info["FileName"] = "%s?ch=Ziln&%s=%s" % (sys.argv[0], type, urllib.quote(channelurl))
         tools.addlistitem(int(sys.argv[1]), info, ziln_urls["Fanart"], folder)
    else:
     sys.stderr.write("Search returned no results")
  else:
   sys.stderr.write("Couldn't find any programs")
 else:
  sys.stderr.write("Couldn't get page")
Example #10
0
def RESOLVE(index): #, info
 doc = tools.gethtmlpage("%s/playlist/null/%s" % (ziln_urls["ZILN"], index))
 if doc:
  soup = BeautifulStoneSoup(doc)
  #tools.message(soup.find('media:content')["url"])
  #minidom.parseString(doc).documentElement.getElementsByTagName("media:content")[0].attributes["url"].value
  info = tools.defaultinfo(0)
  info["Title"] = soup.find('item').title.contents[0]
  info["Thumb"] = soup.find('jwplayer:image').contents[0]
  info["Plot"] = soup.find('description').contents[0]
  uri = "%s%s" % (ziln_urls["ZILN"], soup.find('media:content')["url"])
  tools.addlistitem(int(sys.argv[1]), info, ziln_urls["Fanart"], 0, 1, uri)
Example #11
0
def getShow(show):
 se = re.search('/content/(.*)_(episodes|extras)_group/ps3_xml_skin.xml', show.attributes["href"].value)
 if se:
  info = tools.defaultinfo(1)
  info["FileName"] = "%s?ch=TVNZ&type=singleshow&id=%s_episodes_group" % (sys.argv[0], se.group(1))
  info["Title"] = show.attributes["title"].value
  #if "videos" in show.attributes.keys():
  # videos = int(show.attributes["videos"].value)
  #else:
  # videos = 0
  #channel = show.attributes["channel"].value
  #url = "%s?ch=TVNZ&type=singleshow&id=%s_episodes_group" % (sys.argv[0],show_id)
  tools.addlistitem(int(sys.argv[1]), info, FANART_URL, 1)
def RESOLVE(channel, count):
 info = tools.defaultinfo(0)
 info["Title"] = 'Shine TV (Live Stream)'
 info["Thumb"] = os.path.join(addon.getAddonInfo('path'), "resources/images/%s.png" % channel)
 info["Plot"] = "Shine TV is a television network of the Rhema Broadcasting Group Inc - New Zealand's largest Christian media organisation. On-air since December 2002, Shine broadcasts 24 hours nationwide on the SKY digital and Freeview Satellite platforms, with regional channels in Canterbury, Nelson and Wellington."
 info["Date"] = date.today().strftime("%d.%m.%Y")
 quality = 'fast'
 if addon.getSetting('%s_quality' % channel) == "Low":
  quality = 'mobile'
 elif addon.getSetting('%s_quality' % channel) == "Medium":
  quality = 'slow'
 info["FileName"] = "%s%s" % ("mms://wnss1.streaming.net.nz/rbg-shinetv-", quality)
 tools.addlistitem(int(sys.argv[1]), info, "resources/images/%s.jpg" % channel, 0, count)
Example #13
0
def INDEX_FOLDERS(): #Create a list of top level folders for the hierarchy view
 folders = dict()
 folders["0"] = localize(30052) # "Categories"
 folders["1"] = localize(30053) # "Channels"
 folders["2"] = localize(30054) # "Genres"
 #folders["3"] = localize(30055) # "Shows"
 folders["4"] = localize(30065) # "Search"
 count = len(folders)
 for index in folders:
  info = tools.defaultinfo(1)
  info["Title"] = folders[index]
  info["Count"] = int(index)
  info["FileName"] = "%s?ch=TV3&folder=%s" % (sys.argv[0], folders[index])
  tools.addlistitem(int(sys.argv[1]), info, tv3_urls["Fanart"], 1, count)
Example #14
0
def getEpisode(ep):
 info = tools.defaultinfo(0)
 
 title = ep.attributes["title"].value
 subtitle = ep.attributes["sub-title"].value
 if len(subtitle) == 0:
  titleparts = title.split(': ') # Some Extras have the Title and Subtitle put into the title attribute separated by ': '
  if len(titleparts) == 2:
   title = titleparts[0]
   subtitle = titleparts[1]

 season = 0
 episode = 1
 episodeparts = string.split(ep.attributes["episode"].value, '|')
 if len(episodeparts) == 3:
  se = re.search('Series ([0-9]+), Episode ([0-9]+)', episodeparts[0].strip())
  if se:
   season = int(se.group(1))
   episode = int(se.group(2))
   # Adjust the subtitle to include the season and episode numbers.
   if len(subtitle):
    subtitle = str(season) + "x" + str(episode) + " " + subtitle
   else:
    subtitle = str(season) + "x" + str(episode)
  elif len(subtitle) == 0:
    subtitle = episodeparts[0].strip() # E.g. "Coming Up" or "Catch Up"
  info["Date"] = getDate(episodeparts[1].strip())
  info["Premiered"] = episodeparts[1].strip()
  info["Duration"] = getDuration(episodeparts[2].strip())

 info["TVShowTitle"] = title
 info["Title"] = subtitle
 info["Season"]  = season
 info["Episode"] = episode
 
 #channel = ep.attributes["channel"].value
 info["Thumb"] = ep.attributes["src"].value

 if len(info["Title"]):
  label = "%s - \"%s\"" % (info["TVShowTitle"], info["Title"],)
 else:
  label = info["TVShowTitle"]
 info["Title"] = label
 if ep.firstChild:
  info["Plot"] = ep.firstChild.data
 info["FileName"] = "%s?ch=TVNZ&type=video&id=%s&info=%s" % (sys.argv[0], ep.attributes["href"].value, urllib.quote(str(info)))
 return(info)
Example #15
0
def INDEX():
 link = tools.gethtmlpage("%s/content/ps3_navigation/ps3_xml_skin.xml" % (BASE_URL), "ps3")
 if link:
  count = 0
  node = tools.getxmldocument(link)
  if node:
   for stat in node.getElementsByTagName('MenuItem'):
    type = stat.attributes["type"].value
    if type in ('shows', 'alphabetical'): #, 'distributor'
     m = re.search('/([0-9]+)/',stat.attributes["href"].value)
     if m:
      info = tools.defaultinfo(1)
      info["Title"] = stat.attributes["title"].value
      info["Count"] = count
      count += 1
      info["FileName"] = "%s?ch=TVNZ&type=%s&id=%s" % (sys.argv[0], type, m.group(1))
      tools.addlistitem(int(sys.argv[1]), info, FANART_URL, 1)
def RESOLVE(channel, count):
 info = tools.defaultinfo(0)
 info["Title"] = 'Parliament TV (Live Stream)'
 info["Thumb"] = os.path.join(addon.getAddonInfo('path'), "resources/images/%s.png" % channel)
 info["Plot"] = "Parliament TV provides live broadcasts from the House of Representatives. Question time is replayed each day at 6pm and 10pm."
 info["Date"] = date.today().strftime("%d.%m.%Y")
 quality = '384'
 if addon.getSetting('%s_stream' % channel) == "Apple Quicktime":
  quality = '512'
 if addon.getSetting('%s_quality' % channel) == "Low":
  quality = '56'
 elif addon.getSetting('%s_quality' % channel) == "Medium":
  quality = '128'
 info["FileName"] = "%s%s" % ("mms://wms-parliament.harmonycdn.net/parlserv-house", quality)
 if addon.getSetting('%s_stream' % channel) == "Apple Quicktime":
  info["FileName"] = "%s%s%s" % ("rtsp://Qts1.ptv.parliament.nz/ptv-", quality, ".sdp")
 tools.addlistitem(int(sys.argv[1]), info, "resources/images/%s.jpg" % channel, 0, count)
Example #17
0
def add_item_table(soup, provider, count, title): #Scrape items from a table-style HTML page
 info = tools.defaultinfo()
 info["Studio"] = provider
 link = soup.find('a')
 if link:
  if link.string:
   plot = link.string.strip()
   if plot <> "":
    info["PlotOutline"] = plot
    info["TVShowTitle"] = title
    info.update(seasonepisode(link))
    info["Title"] = tools.itemtitle(info["TVShowTitle"], info["PlotOutline"])
    info["Count"] = count
    href = re.search("%s/(.*?)/%s/([0-9]+)/%s/([0-9]+)/%s/([0-9]+)/" % (base_url("tv3"), tv3_urls["VIDEO1"], tv3_urls["VIDEO2"], tv3_urls["VIDEO3"]), link['href'])
    if href:
     info["FileName"] = "%s?ch=TV3&id=%s&info=%s" % (sys.argv[0], "%s,%s,%s,%s" % (href.group(1), href.group(2), href.group(3), href.group(4)), urllib.quote(str(info)))
    tools.addlistitem(int(sys.argv[1]), info, tv3_urls["Fanart"], 0)
Example #18
0
def SHOW_LIST(id):
 link = tools.gethtmlpage("%s/content/%s/ps3_xml_skin.xml" % (BASE_URL, id), "ps3")
 node = tools.getxmldocument(link)
 if node:
  urls = list()
  count = 0
  infoitems = {}
  for show in node.getElementsByTagName('Show'):
   se = re.search('/content/(.*)_(episodes|extras)_group/ps3_xml_skin.xml', show.attributes["href"].value)
   if se:
    if se.group(2) == "episodes":
     #videos = int(show.attributes["videos"].value)
     #channel = show.attributes["channel"].value
     info = tools.defaultinfo(1)
     info["FileName"] = "%s?ch=TVNZ&type=singleshow&id=%s_episodes_group" % (sys.argv[0], se.group(1))
     info["Title"] = show.attributes["title"].value
     info["Count"] = count
     count += 1
     infoitems[info["Title"]] = info
  tools.addlistitems(int(sys.argv[1]), infoitems, FANART_URL, 1)
Example #19
0
def add_item_search(soup, provider, count): #Scrape items from a table-style HTML page
 baseurl = base_url(provider)
 info = tools.defaultinfo()
 info["Studio"] = provider
 title = soup.find("div", attrs={"class": 'catTitle'})
 if title:
  info["TVShowTitle"] = title.a.string.strip()
  href = re.match("%s/(.*?)/%s/([0-9]+)/%s/([0-9]+)/%s/([0-9]+)/" % (baseurl, tv3_urls["VIDEO1"], tv3_urls["VIDEO2"], tv3_urls["VIDEO3"]), title.a['href'])
  image = soup.find("img")
  if image:
   info.update(tools.imageinfo(image))
  ep = soup.find("div", attrs={"class": 'epTitle'})
  if ep:
   if ep.a:
    info.update(seasonepisode(ep.a))
  date = soup.find("div", attrs={"class": 'epDate'})
#  if date:
#   sys.stderr.write(date.span[1].string.strip())
  info["Title"] = tools.itemtitle(info["TVShowTitle"], info["PlotOutline"])
  info["Count"] = count
  info["FileName"] = "%s?ch=TV3&id=%s&info=%s" % (sys.argv[0], "%s,%s,%s,%s" % (href.group(1), href.group(2), href.group(3), href.group(4)), urllib.quote(str(info)))
  tools.addlistitem(int(sys.argv[1]), info, tv3_urls["Fanart"], 0)
def INDEX():
 channels = dict()
 channels["0"] = "TV3"
 channels["1"] = "TVNZ"
 if addon.getSetting('Ziln_hide') == "false":
  channels["2"] = "Ziln"
 #channels["3"] = "iSKY"
# streamingchannels = dict()
# streamingchannels["0"] = "Shine"
# streamingchannels["1"] = "Parliament"
 count = len(channels) + 1 #+ len(streamingchannels)
 for index in channels:
  info = tools.defaultinfo(1)
  info["Title"] = channels[index]
  info["Thumb"] = os.path.join(addon.getAddonInfo('path'), "resources/images/%s.png" % channels[index])
  info["Count"] = int(index)
  info["FileName"] = "%s?ch=%s" % (sys.argv[0], channels[index])
  tools.addlistitem(int(sys.argv[1]), info, "resources/images/%s.jpg" % channels[index], 1, count)
 if addon.getSetting('Parliament_hide') == "false":
  import parliament
  parliament.RESOLVE("Parliament", count)
 if addon.getSetting('Shine_hide') == "false":
  import shine
  shine.RESOLVE("Shine", count)