def Play(self, stream_name, stream_id, subtitle):
        data = ba.FetchUrl(stream_id, 3600)
        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")
        streamid = re.compile("load_player\('(.*?)'", re.DOTALL + re.IGNORECASE).search(str(soup)).group(1)
        if streamid == "": mc.ShowDialogNotification("Geen stream beschikbaar...")

        data = ba.FetchUrl('http://player.omroep.nl/info/security', 0)
        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")
        try:
            key = soup.session.key.contents[0]
        except:
            mc.ShowDialogNotification("Kan de security key niet ophalen")
            return
        security = base64.b64decode(key)

        securitystr = str(security).split('|')[1]
        md5code = streamid + '|' + securitystr
        md5code = md5.md5(md5code).hexdigest()

        streamdataurl = 'http://player.omroep.nl/info/stream/aflevering/' + str(streamid) + '/' + str(md5code).upper()
        data = ba.FetchUrl(streamdataurl, 0).decode('utf-8')
        xmlSoup = BeautifulSoup(data)
        streamurl = xmlSoup.find(attrs={"compressie_formaat" : "wvc1"})
        url_play = streamurl.streamurl.contents[0].replace(" ","").replace("\n","").replace("\t","")

        play = ba.CreatePlay()
        play.SetPath(url_play)
        if subtitle:
            play.SetSubtitle(self.GetSubtitle(security, streamid))
            play.SetSubtitle_type('sami')

        return play
    def Genre(self, genre, filter, page, totalpage):
        id = self.genre[genre]
        url = self.url_base + '/ard/servlet/ajax-cache/3517242/view=list/datum='+id+'/senderId=208/zeit=1/index.html'
        data = ba.FetchUrl(url, 2400)
        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")

        genrelist = list()
        if data < 20:
            mc.ShowDialogNotification("No episode found for " + str(stream_name))
            genrelist = list()
            return genrelist

        for info in soup.findAll( 'div', {'class' : 'mt-media_item'}):
            if info.findAll( 'span', {'class' : 'mt-icon mt-icon_video'}):
                detail = info.find('a')
                title = detail.contents[0]
                airtime = info.find('span', {'class' : 'mt-airtime'})

                genreitem = ba.CreateEpisode()
                genreitem.SetName(title)
                genreitem.SetId(detail['href'].split('=')[1])
                genreitem.SetDate(airtime.contents[0][-9:])
                genreitem.SetPage(page)
                genreitem.SetTotalpage(totalpage)
                genrelist.append(genreitem)
        if len(genrelist) < 1:
            mc.ShowDialogNotification("No episode found for " + str(genre))
        return genrelist
    def Episode(self, stream_name, stream_id, page, totalpage):

        url = self.url_base + '/ZDFmediathek/xmlservice/web/aktuellste?id='+stream_id+'&maxLength=50'
        data = ba.FetchUrl(url, 3600)
        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")
        
        if len(data) < 5:
            mc.ShowDialogNotification("No episode found for " + str(stream_name))
            episodelist = list()
            return episodelist

        teaser = soup.findAll('teaser')

        episodelist = list()
        for info in teaser:
            if info.type.contents[0] == 'video':
                title = info.find('title')
                title = info.find('title')
                detail = info.find('detail')
                id = info.find('assetid')
                airtime = info.find('airtime')
                airtime = airtime.contents[0]
                thumb = self.url_base + '/ZDFmediathek/contentblob/'+ str(id.contents[0]) +'/timg276x155blob'

                episode = ba.CreateEpisode()
                episode.SetName(title.contents[0])
                episode.SetId(id.contents[0])
                episode.SetDescription(stream_name + ': ' + detail.contents[0])
                episode.SetThumbnails(thumb)
                episode.SetDate(airtime)
                episode.SetPage(page)
                episode.SetTotalpage(totalpage)
                episodelist.append(episode)

        return episodelist
    def Genre(self, genre, filter, page, totalpage):
        id = self.genre[genre]
        url = self.url_base + '/ZDFmediathek/xmlservice/web/sendungVerpasst?startdate=' + id +'&enddate='+id+'&maxLength=50'
        data = ba.FetchUrl(url, 2400)
        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")

        genrelist = list()
        if len(soup) < 20:
            mc.ShowDialogNotification("No episode found for " + str(genre))
            return genrelist

        teaser = soup.findAll('teaser')

        for info in teaser:
            if info.type.contents[0] == 'video':
                title = info.find('title')
                id = info.find('assetid')
                airtime = info.find('airtime')
                airtime = airtime.contents[0]

                genreitem = ba.CreateEpisode()
                genreitem.SetName(title.contents[0])
                genreitem.SetId(id.contents[0])
                genreitem.SetDate(airtime[-5:])
                genreitem.SetPage(page)
                genreitem.SetTotalpage(totalpage)
                genrelist.append(genreitem)
        if len(genrelist) < 1:
            mc.ShowDialogNotification("No episode found for " + str(genre))
        return genrelist
def ShowEpisode(urlshow, title = ""):
    targetcontrol  	= 52
    targetwindow   	= 14000

    mc.ShowDialogWait()
    data = ba.FetchUrl(urlshow, 3600)
    soup = BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES, smartQuotesTo="xml")

    list = mc.GetWindow(targetwindow).GetList(targetcontrol)
    list_items = mc.ListItems()

    for info in soup.findAll('tr', {'class' : 'podcast-episode'}):
        try: title = info['preview-title']
        except: pass
        try: link = info['video-preview-url']
        except:
            try: link = info['audio-preview-url']
            except: link = False
        if link:
            list_item = mc.ListItem(mc.ListItem.MEDIA_UNKNOWN)
            list_item.SetLabel(title.encode('utf-8', 'ignore'))
            list_item.SetPath(str(link))
            list_items.append(list_item)
            
    mc.HideDialogWait()
    list.SetItems(list_items)
Example #6
0
def getTagsForUrl(url):
    try:
        content = resource.get(url).decodeBody().lower()
    except:
        content = ""
    
    soup = BeautifulSoup(content) 
    texts = soup.findAll(text=True)

    def visible(element):
        if element.parent.name in ['style', 'script', '[document]', 'head', 'title']:
            return False
        elif re.match('<!--.*-->', str(element)):
            return False
        return True
    
    visible_texts = filter(visible, texts)
    visibleText = " ".join(visible_texts)
    
    result = getTagsProposalsForText(visibleText)
    
    entry = Entry.all().filter("url =", url).fetch(1)
    if len(entry) > 0:
        entryStableTags = entry[0].tags
        for t in entryStableTags:
            found = False
            name = Tag.get(t).name
            for r in result:
                if name == r:
                    found = True
            if not found:
                result.append(name)
                
    return result
    def Episode(self, stream_name, stream_id, page, totalpage):
        url = self.url_base + stream_id + "?ajax,sb/sb"
        data = ba.FetchUrl(url, 3600)

        if data == "":
            mc.ShowDialogNotification("No episode found for " + str(stream_name))
            episodelist = list()
            return episodelist

        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")

        episodelist = list()
        for info in soup.findAll("li"):
            if len(info.a.findAll("em")) > 0:
                episode = ba.CreateEpisode()
                episode.SetName(stream_name)
                episode.SetId(info.a["href"])
                episode.SetDescription(info.a.span.contents[0].replace(" ", "").replace("\n", "").replace("\t", ""))
                episode.SetThumbnails(info.a.img["src"])
                episode.SetDate(info.a.em.contents[0][-10:])
                episode.SetPage(page)
                episode.SetTotalpage(totalpage)
                episodelist.append(episode)

        return episodelist
Example #8
0
def ConvertSami(samiurl):
    data = FetchUrl(samiurl, 0)
    soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")
    i = 1
    sync = ''
    temp = ''
    for info in soup.findAll("sync"):
        if info.find(attrs={"class" : "ENUSCC"}):
            sync += str(i) + '\n'
            temp = info.find(attrs={"class" : "ENUSCC"}).contents[0]
            timemsec = str(info['start'])[-3:]
            timesec = int(str(info['start']))/1000
            hour = timesec / 3600
            minute = (timesec - (hour*3600)) / 60
            sec = timesec - (hour*3600) - (minute*60)
            srttime = str(hour) + ':' + str(minute) + ':' + str(sec) + ',' + str(timemsec)
            sync += str(srttime)
            i += 1
        else:
            timemsec = str(info['start'])[-3:]
            timesec = int(str(info['start']))/1000
            hour = timesec / 3600
            minute = (timesec - (hour*3600)) / 60
            sec = timesec - (hour*3600) - (minute*60)
            srttime = str(hour) + ':' + str(minute) + ':' + str(sec) + ',' + str(timemsec)
            sync += ' --> ' + str(srttime) + '\n'
            sync += str(temp) + '\n' + '\n'
        tmpPath = mc.GetTempDir()
        subFilePath = tmpPath+os.sep+'subcache.srt'
        f = open(subFilePath, "w")
        f.write(sync)
        f.close()
    return subFilePath
    def SearchNzb(self, name):
        url = 'http://www.nzbclub.com/nzbfeed.aspx?ig=1&st=5&sp=1&ns=1&sn=1&q=' + quote_plus(name) + '&de=' + self.retention
        data = ba.FetchUrl(url, 3600)
        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")
        list = mc.GetWindow(14445).GetList(51)
        focus = int(list.GetFocusedItem())
        list_items = mc.ListItems()

        for info in soup.findAll('item'):
            title = info.title.contents[0]
            description = info.description.contents[0]
            description = description.replace("<br />","\n")
            path = info.enclosure['url']
            date = info.findAll('pubdate')[0].contents[0]
            list_item = mc.ListItem(mc.ListItem.MEDIA_UNKNOWN)
            list_item.SetLabel(ba.ConvertASCII(title))
            list_item.SetProperty('info', ba.ConvertASCII(description))
            list_item.SetProperty('date', str(date))
            list_item.SetPath(str(path))
            list_items.append(list_item)
        list.SetItems(list_items)

        max = len(list_items) - 1
        if focus > 0 and focus < max:
            list.SetFocusedItem(focus)
        elif focus >= max:
            list.SetFocusedItem(max)
Example #10
0
    def Episode(self, stream_name, stream_id, page, totalpage):
        url = "http://mercury.itv.com/api/html/dotcom/Episode/Programme/" + quote(stream_id)
        data = ba.FetchUrl(url, 3600)
        soup = BeautifulSoup(data)

        if len(data) < 10:
            mc.ShowDialogNotification("No episode found for " + str(stream_name))
            episodelist = list()
            return episodelist
        table = soup.find("tbody")

        episodelist = list()
        for info in table.findAll("tr"):
            time = info.find("td", {"class": "t_time"})
            duration = info.find("td", {"class": "t_duration"})
            details = info.find("td", {"class": "t_details"})

            episode = ba.CreateEpisode()
            episode.SetName(stream_name)
            episode.SetId(self.url_base + details.a["href"])
            episode.SetDescription(duration.contents[0] + " - " + details.span.contents[0])
            episode.SetThumbnails(details.a.img["src"])
            episode.SetDate(time.contents[2])
            episode.SetPage(page)
            episode.SetTotalpage(totalpage)
            episodelist.append(episode)
        return episodelist
    def GetCategory(self):
        url = 'http://'+ self.host + ':' + self.port + '/api?mode=queue&start=START&limit=LIMIT&output=xml&apikey=' + self.api
        data = ba.FetchUrl(url, 360).decode('utf-8')
        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")

        list = mc.GetWindow(15000).GetList(51)
        list_items = mc.ListItems()
        for info in soup.findAll('category'):
            title = info.contents[0]
            list_item = mc.ListItem(mc.ListItem.MEDIA_UNKNOWN)
            list_item.SetLabel(str(title.encode('utf-8')))
            list_items.append(list_item)
        list.SetItems(list_items)
    def List(self):
        url = self.url_base + '/content/ta_ent_video_shows_group/ta_ent_programme_result_module_skin.xinc'
        data = ba.FetchUrl(url)
        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")

        streamlist = list()
        for info in soup.findAll('a'):
            if info['href'] != '#':
                stream = ba.CreateStream()
                stream.SetName(info['title'])
                stream.SetId(info['href'])
                streamlist.append(stream)

        return streamlist
Example #13
0
    def Search(self, search):
        url = 'http://search.bbc.co.uk/suggest?scope=iplayer&format=xml&callback=xml.suggest&q=' + quote_plus(search)
        data = ba.FetchUrl(url, 0)

        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")

        streamlist = list()
        for info in soup.findAll('text'):
            stream = ba.CreateStream()
            stream.SetName(info.contents[0])
            stream.SetId(ba.ConvertASCII(info.contents[0]))
            streamlist.append(stream)

        return streamlist
Example #14
0
    def Genre(self, genre, filter, page, totalpage):
        url = "http://mercury.itv.com/api/html/dotcom/Schedule/"
        data = ba.FetchUrl(url, 3600)
        soup = BeautifulSoup(data)

        if len(data) < 10:
            mc.ShowDialogNotification("No episode found for " + str(stream_name))
            episodelist = list()
            return episodelist

        day = soup.find("li", {"class": re.compile("^" + genre)})

        net = []

        if filter and filter != "None":
            net.append(filter)
        else:
            for id in self.filter:
                net.append(id)
        if "None" in net:
            net.remove("None")

        data = {}
        data_sorted = []
        for i in net:
            netdata = day.find("li", {"class": re.compile("^" + i)})
            for info in netdata.findAll(attrs={"class": re.compile("^whatsOnTime")}):
                if info.a:
                    title = info.find("span", {"class": "title"})
                    time = info.find("span", {"class": "time"})
                    # date:[name,id,filter]
                    data[time.contents[0]] = [title.contents[0], self.url_base + info.a["href"], i]
        date = data.keys()
        date.sort(reverse=True)
        for i in date:
            data_sorted.append({"name": data[i][0], "id": data[i][1], "filter": data[i][2], "date": i})

        genrelist = list()
        for info_sorted in data_sorted:
            genreitem = ba.CreateEpisode()
            genreitem.SetName(info_sorted["name"])
            genreitem.SetId(info_sorted["id"])
            genreitem.SetDate(info_sorted["date"])
            genreitem.SetFilter(info_sorted["filter"])
            genreitem.SetPage(page)
            genreitem.SetTotalpage(totalpage)
            genrelist.append(genreitem)

        return genrelist
def get_page(sender, instance, **kwargs):
    data = urllib.urlopen(instance.url)
    instance.page = unicode(data.read(), errors="ignore")
    soup = BeautifulSoup(instance.page)
    instance.title = soup.html.head.title.string
    desc = soup.find("meta", {"name": "description"})
    if desc:
        instance.description = desc["content"]
    else:
        instance.description = ""
    keywords = soup.find("meta", {"name": "keywords"})
    if keywords:
        instance.keywords = keywords["content"]
    else:
        instance.keywords = ""
Example #16
0
    def List(self):
        url = self.url_base + "/alfabetisk"
        data = ba.FetchUrl(url)
        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")

        div_main = soup.findAll("div", {"class": "tab active"})[0]

        streamlist = list()
        for info in div_main.findAll("a"):
            if len(info.contents[0]) > 4:
                stream = ba.CreateStream()
                stream.SetName(info.contents[0])
                stream.SetId(info["href"])
                streamlist.append(stream)

        return streamlist
Example #17
0
    def Search(self, search):
        url = self.url_base + "/script/common/ajax_zoek.php"
        params = "keyword=" + quote(search)
        data = ba.FetchUrl(url, 0, False, params)

        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")

        streamlist = list()
        for info in soup.findAll("li"):
            title = re.compile("---(.*?)$", re.DOTALL + re.IGNORECASE).search(str(info.span.contents[0])[4:]).group(1)
            id = re.compile("---(.*?)---", re.DOTALL + re.IGNORECASE).search(str(info.span.contents[0])).group(1)
            stream = ba.CreateStream()
            stream.SetName(title)
            stream.SetId(id)
            streamlist.append(stream)
        return streamlist
    def Search(self, search):
        url = self.url_base + '/browse/search?alphabet=All&family_friendly=0&closed_captioned=0&has_free=1&has_huluplus=0&has_hd=0&channel=All&subchannel=&network=All&display=Shows%20with%20full%20episodes%20only&decade=All&type=tv&view_as_thumbnail=false&block_num=0&keyword=' + quote_plus(search)
        data = ba.FetchUrl(url)

        data = re.compile('"show_list", "(.*?)"\)', re.DOTALL + re.IGNORECASE).search(str(data)).group(1)
        data = data.replace('\\u003c','<').replace('\\u003e','>').replace('\\','').replace('\\n','').replace('\\t','')
        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")

        streamlist = list()
        for info in soup.findAll('a', {'onclick':True}):
            stream = ba.CreateStream()
            stream.SetName(info.contents[0])
            stream.SetId(info['href'])
            streamlist.append(stream)

        return streamlist
Example #19
0
    def Genre(self, genre, filter, page, totalpage):
        url = self.url_base + '/7dagen/' + genre
        if filter != "": url = url + ',' + str(filter)
        url = url + '?weergave=detail&page=' + str(page)
        data = ba.FetchUrl(url, 3600)
        if data == "":
            mc.ShowDialogNotification("No genre found for " + str(genre))
            genrelist = list()
            return genrelist
        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")
        if totalpage == "":
            try:
                pagediv = soup.findAll( 'div', {'class' : 'pagination'})[0]
                apage = pagediv.findAll("a")
                totalpage = int(apage[len(apage)-2].contents[0])
            except:
                totalpage = 1

        div_show = soup.find( 'table', {'class' : 'broadcasts detail'})

        genrelist = list()
        for info in div_show.findAll("tr"):
            omroep = info.findAll(attrs={"class" : "broadcaster-logo"})[0]['alt']
            if omroep == "Nederland 1": omroep = "nl1"
            elif omroep == "Nederland 2": omroep = "nl2"
            elif omroep == "Nederland 3": omroep = "nl3"
            try:
                thumb = info.findAll(attrs={"class" : "thumbnail"})[0]['src']
            except:
                thumb = info.findAll(attrs={"class" : "thumbnail placeholder"})[0]['src']
            path = self.url_base + info.find(attrs={"class" : "thumbnail_wrapper"})['href']
            date = info.find(attrs={"class" : "time"}).time.contents[0].replace(' ','').replace('\n','').replace('\t','')
            title = info.findAll(attrs={"class" : "series"})[0].contents[0]
            desc = info.find('div', {'class' : 'description'}).p.contents[0]

            genreitem = ba.CreateEpisode()
            genreitem.SetName(title)
            genreitem.SetId(path)
            genreitem.SetDescription(desc)
            genreitem.SetThumbnails(thumb)
            genreitem.SetDate(date)
            genreitem.SetFilter(str(omroep).upper())
            genreitem.SetPage(page)
            genreitem.SetTotalpage(totalpage)
            genrelist.append(genreitem)

        return genrelist
    def List(self):
        url = self.url_home
        data = ba.FetchUrl(url)
        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")

        div_main  = soup.findAll( 'div', {'class' : 'mo-a alphabetical'})[0]
        div_show  = div_main.findAll( 'div', {'class' : 'wrapper'})[0]

        streamlist = list()
        for info in div_show.findAll('a'):
            stream = ba.CreateStream()
            name = info.contents[0]
            id = self.url_base + info['href']
            if not name in self.exclude:
                stream.SetName(name)
                stream.SetId(id)
                streamlist.append(stream)

        return streamlist
Example #21
0
    def List(self):
        array = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
        title = []
        id = []
        for letter in array:
            url = self.url_base + '/ZDFmediathek/xmlservice/web/sendungenAbisZ?characterRangeStart='+letter+'&detailLevel=2&characterRangeEnd='+letter
            data = ba.FetchUrl(url)
            soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")

            title.extend(soup.findAll('title'))
            id.extend(soup.findAll('assetid'))

        streamlist = list()
        for title_i,id_i in izip(title,id):
            stream = ba.CreateStream()
            stream.SetName(title_i.contents[0].replace('"',''))
            stream.SetId(id_i.contents[0])
            streamlist.append(stream)

        return streamlist
Example #22
0
    def Episode(self, stream_name, stream_id, page, totalpage):

        url = self.url_base + '/ard/servlet/ajax-cache/3516962/view=list/documentId='+stream_id+'/goto='+str(page)+'/index.html'
        data = ba.FetchUrl(url, 3600)
        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")
        
        if data < 20:
            mc.ShowDialogNotification("No episode found for " + str(stream_name))
            episodelist = list()
            return episodelist

        if totalpage == "":
            try:
                pages = soup.find( 'li', {'class' : 'mt-paging ajax-paging-li'})
                pages = pages.findAll('span')[2]
                pages = pages.contents[0][-2:].replace(' ','')
                print pages
                totalpage = int(pages)
            except:
                totalpage = 1


        episodelist = list()
        for info in soup.findAll( 'div', {'class' : 'mt-media_item'}):
            if info.findAll( 'span', {'class' : 'mt-icon mt-icon_video'}):
                detail = info.find('a')
                title = stream_name
                airtime = info.find('span', {'class' : 'mt-airtime'})
                thumb = info.find('img')

                episode = ba.CreateEpisode()
                episode.SetName(stream_name)
                episode.SetId(detail['href'].split('=')[1])
                episode.SetDescription(detail.contents[0])
                episode.SetThumbnails(self.url_base + thumb['data-src'])
                episode.SetDate(airtime.contents[0])
                episode.SetPage(page)
                episode.SetTotalpage(totalpage)
                episodelist.append(episode)

        return episodelist
Example #23
0
    def Search(self, search):
        url = self.url_base + '/programmas/search'
        params = 'query=' + quote_plus(search)
        data = ba.FetchUrl(url, 0, True, params)

        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")
        div_page = soup.find("ul")
        
        streamlist = list()
        try:
            div_page.findAll("a")
        except:
            return streamlist

        for info in div_page.findAll('a'):
            stream = ba.CreateStream()
            stream.SetName(info.contents[0])
            stream.SetId(info['href'].split('/')[2])
            streamlist.append(stream)

        return streamlist
    def Episode(self, stream_name, stream_id, page, totalpage):
        url = str(stream_id) + '/page=' + str(page)
        data = ba.FetchUrl(url, 3600)

        if data == "":
            mc.ShowDialogNotification("Geen afleveringen gevonden voor " + str(stream_name))
            return ba.CreateEpisode()

        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")

        if totalpage == "":
            try:
                pages = soup.findAll( 'div', {'class' : 'paginator'})[0]
                pages = pages.findAll('span')
                totalpage = len(pages) - 1
            except:
                totalpage = 1

        div_main = soup.findAll('div', {'class' : 'mo-c double'})[0]
        div_show = div_main.findAll('div', {'class' : 'wrapper'})[0]

        info = div_show.findAll('div', {'class' : 'thumb'})
        airtime = div_show.findAll('div', {'class' : 'airtime'})

        if len(info) < 1:
            mc.ShowDialogNotification("Geen afleveringen gevonden voor " + str(stream_name))
            return ba.CreateEpisode()

        episodelist = list()
        for info_i, airtime_i in izip(info, airtime):
            episode = ba.CreateEpisode()
            episode.SetName(stream_name)
            episode.SetId(self.url_base + info_i.a['href'])
            episode.SetThumbnails(self.url_base + info_i.find('img')['src'])
            episode.SetDate(airtime_i.a.span.contents[0])
            episode.SetPage(page)
            episode.SetTotalpage(totalpage)
            episodelist.append(episode)

        return episodelist
def ShowNet():
    mc.ShowDialogWait()
    targetcontrol = 51
    targetwindow = 14000

    countryid = config.GetValue("country")
    genreid = config.GetValue("genre")
    if countryid == '': countryid = 'US'
    
    url = 'http://itunes.apple.com/'+countryid+'/rss/toppodcasts/limit=50/genre='+genreid+'/xml'

    list = mc.GetWindow(targetwindow).GetList(targetcontrol)
    list_items = mc.ListItems()

    data = ba.FetchUrl(url, 3600)
    soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")

    for info in soup.findAll('entry'):
        title = info.find('im:name').contents[0]
        link = info.link['href']
        thumb = info.find('im:image', {'height' : '170'}).contents[0]
        price = info.find('im:price')['amount']
        artist = info.find('im:artist').contents[0]
        try: summary = info.summary.contents[0]
        except: summary = ''

        if price == '0':
            list_item = mc.ListItem(mc.ListItem.MEDIA_UNKNOWN)
            list_item.SetLabel(title.encode('utf-8', 'ignore'))
            list_item.SetThumbnail(str(thumb))
            list_item.SetPath(str(link))
            list_item.SetDescription(summary.encode('utf-8', 'ignore'))
            list_item.SetArtist(artist.encode('utf-8', 'ignore'))
            list_items.append(list_item)

    Label = mc.GetWindow(targetwindow).GetLabel(10102)
    Label.SetLabel('[B]Country:[/B] '+country[countryid]+'[CR][B]Genre:[/B] '+genre[genreid])
    
    mc.HideDialogWait()
    list.SetItems(list_items)
    def GetQuery(self):
        url = 'http://'+ self.host + ':' + self.port + '/api?mode=queue&start=START&limit=LIMIT&output=xml&apikey=' + self.api

        try:
            data = ba.FetchUrl(url)
        except:
            return
        
        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")
        try: speed = soup.findAll('speed')[0].contents[0]
        except: speed = ''
        timeleft = soup.findAll('timeleft')[0].contents[0]
        pause_int = soup.findAll('pause_int')[0].contents[0]
        paused = soup.findAll('paused')[0].contents[0]
        slot = soup.findAll('slot')
        
        if pause_int == '0': pause_int = 'Unlimited'
        else: pause_int = pause_int + ' s'
        window = mc.GetWindow(14444)
        if str(paused) == "True":
            window.GetToggleButton(10198).SetSelected(True)
        else:
            window.GetToggleButton(10198).SetSelected(False)
            
        if slot:
            list = window.GetList(51)
            focus = int(list.GetFocusedItem())
            print focus

            list_items = mc.ListItems()
            for info in soup.findAll('slot'):
                percentage = int(info.percentage.contents[0])
                percentage = int(round(percentage/10.0)*10.0)
                list_item = mc.ListItem(mc.ListItem.MEDIA_UNKNOWN)
                list_item.SetLabel(ba.ConvertASCII(info.filename.contents[0]))
                list_item.SetPath(str(info.nzo_id.contents[0]))
                list_item.SetProperty('queued', str(info.status.contents[0]))
                list_item.SetProperty('info', str(info.mb.contents[0]) + 'Mb - ' + str(info.timeleft.contents[0]))
                list_item.SetProperty('status', 'download-' + str(percentage) + '.png')
                list_item.SetProperty('speed', str(speed) + 'bps')
                list_item.SetProperty('timeleft', str(timeleft))
                list_item.SetProperty('paused', str(paused))
                list_item.SetProperty('pause_int', str(pause_int))
                list_item.SetProperty('percentage', str(info.percentage.contents[0]) + '%')
                list_items.append(list_item)
            list.SetItems(list_items)
            print len(list_items)
            max = len(list_items) - 1
            if focus > 0 and focus < max:
                list.SetFocusedItem(focus)
            elif focus >= max:
                list.SetFocusedItem(max)
        else:
            self.EmptySearch(14444)
Example #27
0
    def Play(self, stream_name, stream_id, subtitle):
        url = 'http://www.zdf.de/ZDFmediathek/xmlservice/web/beitragsDetails?ak=web&id='+stream_id
        data = ba.FetchUrl(url)
        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")

        url = soup.find('formitaet',{'basetype':'wmv3_wma9_asf_mms_asx_http'})
        url = url.url.contents[0]

        sub = soup.find('caption')
        try:
            sub = sub.url.contents[0]
        except:
            sub = ''

        play = ba.CreatePlay()
        play.SetPath(url)
        if subtitle:
            if sub:
                play.SetSubtitle(str(sub))
                play.SetSubtitle_type('flashxml')

        return play
Example #28
0
    def Episode(self, stream_name, stream_id, page, totalpage):
        url = self.url_base + '/quicksearch?page=' + str(page) + '&series_id=' + stream_id
        data = ba.FetchUrl(url, 3600)

        if data == "":
            mc.ShowDialogNotification("No episode found for " + str(stream_name))
            episodelist = list()
            return episodelist

        data = re.compile('"quicksearch-results", "(.*?)"\)', re.DOTALL + re.IGNORECASE).search(str(data)).group(1)
        data = data.replace('\\"','"').replace('\\n','').replace('\\t','')
        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")

        if totalpage == "":
            try:
                pages = soup.findAll( 'div', {'class' : 'pagination'})[0]
                pages = pages.findAll('a')
                totalpage = int(pages[len(pages)-2].contents[0])
            except:
                totalpage = 1

        episodelist = list()
        for info in soup.findAll('li'):
			try: 
				id = info.h3.a['href']
			except:
				id = False
			if id:
				episode = ba.CreateEpisode()
				episode.SetName(stream_name)
				episode.SetId(self.url_base + id)
				episode.SetDescription(info.span.contents[0].replace(' ','') + ' - '+ info.p.contents[0])
				episode.SetThumbnails(info.a.img['src'])
				episode.SetDate(info.h3.a.contents[0])
				episode.SetPage(page)
				episode.SetTotalpage(totalpage)
				episodelist.append(episode)

        return episodelist
    def Episode(self, stream_name, stream_id, page, totalpage):

        url = self.url_base + stream_id
        data = ba.FetchUrl(url, 3600)
        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")

        episodelist = list()
        if data < 20:
            mc.ShowDialogNotification("No episode found for " + str(stream_name))
            return episodelist

        try:
            episodes = soup.findAll( 'div', {'id' : 'slidefullepisodes'})[0]
        except:
            return episodelist


        for info in episodes.findAll('li'):
            detail = info.findAll('a')[2]
            title = info.findAll('a')[1]
            #airtime = info.find('div', {'class' : 'time'})
            #print airtime.contents[0]
            thumb = info.findAll('img')[1]
            episodenr = info.find('strong')
            link = title['href'].split('-')
            linkid = link.pop()

            episode = ba.CreateEpisode()
            episode.SetName(title['title'])
            episode.SetId(linkid)
            episode.SetDescription(detail['title'])
            episode.SetThumbnails(thumb['src'])
            episode.SetDate(episodenr.contents[0])
            episode.SetPage(page)
            episode.SetTotalpage(totalpage)
            episodelist.append(episode)

        return episodelist
Example #30
0
 def crawl(self, albumArtist, album, releaseDate):
     releaseFound = False
     searchPage = urllib.urlopen("{0}/search?query={1}&facets[]=fieldType:release".format(self.domain, urllib.quote(albumArtist + " " + album))).read()
     searchHtml = BeautifulSoup(searchPage)
     #releaseLinks = searchHtml.findAll('a', { 'name' : 'unit_title' })
     releaseLinks = []
     releases = searchHtml.findAll('li', { 'name' : re.compile('tiles-list_release_[0-9]+') })
     
     for release in releases:
         thisTitle = release.find('a', { 'name' : 'unit_title' })
         thisAlbum = thisTitle.string
         thisUrl = thisTitle['href']
         thisDate = release.find('span', { 'class' : 'itemRenderer-minor' }).contents[1].replace(" | ", "").strip()
         print thisDate
         if releaseDate and releaseDate == thisDate:
             releaseUrl = thisUrl
             beatportAlbum = thisAlbum
             releaseFound = True
             break
             
     if releaseFound:
         # open the release page
         releasePage = urllib.urlopen("{0}{1}".format(self.domain, releaseUrl))
         releaseHtml = BeautifulSoup(releasePage)
         
         # now that we are here, we can obtain some of the release info
         releaseInfoLabels = releaseHtml.findAll('td', { 'class' : 'meta-data-label' })
         
         beatportReleaseDate = releaseInfoLabels[0].nextSibling.string
         beatportLabel = releaseInfoLabels[1].nextSibling.a.string
         beatportCatalogNumber = releaseInfoLabels[2].nextSibling.string
         beatportAlbumArtUrl = releaseHtml.find('img', { 'class' : 'tile-image' })['src']
             
         return (self.decodeHtml(beatportAlbum), 
                 self.decodeHtml(beatportLabel), 
                 self.decodeHtml(beatportCatalogNumber), 
                 self.decodeHtml(beatportReleaseDate), 
                 beatportAlbumArtUrl)