Exemplo n.º 1
0
    def Episode(self, stream_name, stream_id, page, totalpage):
        url = self.url_base + stream_id + "?ajax,sb/sb"
        data = ba.FetchUrl(url, 3600)

        if data == "":
            mc.ShowDialogNotification("No episode found for " + str(stream_name))
            episodelist = list()
            return episodelist

        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")

        episodelist = list()
        for info in soup.findAll("li"):
            if len(info.a.findAll("em")) > 0:
                episode = ba.CreateEpisode()
                episode.SetName(stream_name)
                episode.SetId(info.a["href"])
                episode.SetDescription(info.a.span.contents[0].replace(" ", "").replace("\n", "").replace("\t", ""))
                episode.SetThumbnails(info.a.img["src"])
                episode.SetDate(info.a.em.contents[0][-10:])
                episode.SetPage(page)
                episode.SetTotalpage(totalpage)
                episodelist.append(episode)

        return episodelist
Exemplo n.º 2
0
    def Genre(self, genre, filter, page, totalpage):
        id = self.genre[genre]
        url = self.url_base + '/ZDFmediathek/xmlservice/web/sendungVerpasst?startdate=' + id +'&enddate='+id+'&maxLength=50'
        data = ba.FetchUrl(url, 2400)
        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")

        genrelist = list()
        if len(soup) < 20:
            mc.ShowDialogNotification("No episode found for " + str(genre))
            return genrelist

        teaser = soup.findAll('teaser')

        for info in teaser:
            if info.type.contents[0] == 'video':
                title = info.find('title')
                id = info.find('assetid')
                airtime = info.find('airtime')
                airtime = airtime.contents[0]

                genreitem = ba.CreateEpisode()
                genreitem.SetName(title.contents[0])
                genreitem.SetId(id.contents[0])
                genreitem.SetDate(airtime[-5:])
                genreitem.SetPage(page)
                genreitem.SetTotalpage(totalpage)
                genrelist.append(genreitem)
        if len(genrelist) < 1:
            mc.ShowDialogNotification("No episode found for " + str(genre))
        return genrelist
Exemplo n.º 3
0
    def Episode(self, stream_name, stream_id, page, totalpage):

        url = self.url_base + '/ZDFmediathek/xmlservice/web/aktuellste?id='+stream_id+'&maxLength=50'
        data = ba.FetchUrl(url, 3600)
        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")
        
        if len(data) < 5:
            mc.ShowDialogNotification("No episode found for " + str(stream_name))
            episodelist = list()
            return episodelist

        teaser = soup.findAll('teaser')

        episodelist = list()
        for info in teaser:
            if info.type.contents[0] == 'video':
                title = info.find('title')
                title = info.find('title')
                detail = info.find('detail')
                id = info.find('assetid')
                airtime = info.find('airtime')
                airtime = airtime.contents[0]
                thumb = self.url_base + '/ZDFmediathek/contentblob/'+ str(id.contents[0]) +'/timg276x155blob'

                episode = ba.CreateEpisode()
                episode.SetName(title.contents[0])
                episode.SetId(id.contents[0])
                episode.SetDescription(stream_name + ': ' + detail.contents[0])
                episode.SetThumbnails(thumb)
                episode.SetDate(airtime)
                episode.SetPage(page)
                episode.SetTotalpage(totalpage)
                episodelist.append(episode)

        return episodelist
Exemplo n.º 4
0
def getTagsForUrl(url):
    try:
        content = resource.get(url).decodeBody().lower()
    except:
        content = ""
    
    soup = BeautifulSoup(content) 
    texts = soup.findAll(text=True)

    def visible(element):
        if element.parent.name in ['style', 'script', '[document]', 'head', 'title']:
            return False
        elif re.match('<!--.*-->', str(element)):
            return False
        return True
    
    visible_texts = filter(visible, texts)
    visibleText = " ".join(visible_texts)
    
    result = getTagsProposalsForText(visibleText)
    
    entry = Entry.all().filter("url =", url).fetch(1)
    if len(entry) > 0:
        entryStableTags = entry[0].tags
        for t in entryStableTags:
            found = False
            name = Tag.get(t).name
            for r in result:
                if name == r:
                    found = True
            if not found:
                result.append(name)
                
    return result
    def SearchNzb(self, name):
        url = 'http://www.nzbclub.com/nzbfeed.aspx?ig=1&st=5&sp=1&ns=1&sn=1&q=' + quote_plus(name) + '&de=' + self.retention
        data = ba.FetchUrl(url, 3600)
        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")
        list = mc.GetWindow(14445).GetList(51)
        focus = int(list.GetFocusedItem())
        list_items = mc.ListItems()

        for info in soup.findAll('item'):
            title = info.title.contents[0]
            description = info.description.contents[0]
            description = description.replace("<br />","\n")
            path = info.enclosure['url']
            date = info.findAll('pubdate')[0].contents[0]
            list_item = mc.ListItem(mc.ListItem.MEDIA_UNKNOWN)
            list_item.SetLabel(ba.ConvertASCII(title))
            list_item.SetProperty('info', ba.ConvertASCII(description))
            list_item.SetProperty('date', str(date))
            list_item.SetPath(str(path))
            list_items.append(list_item)
        list.SetItems(list_items)

        max = len(list_items) - 1
        if focus > 0 and focus < max:
            list.SetFocusedItem(focus)
        elif focus >= max:
            list.SetFocusedItem(max)
def ShowEpisode(urlshow, title = ""):
    targetcontrol  	= 52
    targetwindow   	= 14000

    mc.ShowDialogWait()
    data = ba.FetchUrl(urlshow, 3600)
    soup = BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES, smartQuotesTo="xml")

    list = mc.GetWindow(targetwindow).GetList(targetcontrol)
    list_items = mc.ListItems()

    for info in soup.findAll('tr', {'class' : 'podcast-episode'}):
        try: title = info['preview-title']
        except: pass
        try: link = info['video-preview-url']
        except:
            try: link = info['audio-preview-url']
            except: link = False
        if link:
            list_item = mc.ListItem(mc.ListItem.MEDIA_UNKNOWN)
            list_item.SetLabel(title.encode('utf-8', 'ignore'))
            list_item.SetPath(str(link))
            list_items.append(list_item)
            
    mc.HideDialogWait()
    list.SetItems(list_items)
Exemplo n.º 7
0
def ConvertSami(samiurl):
    data = FetchUrl(samiurl, 0)
    soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")
    i = 1
    sync = ''
    temp = ''
    for info in soup.findAll("sync"):
        if info.find(attrs={"class" : "ENUSCC"}):
            sync += str(i) + '\n'
            temp = info.find(attrs={"class" : "ENUSCC"}).contents[0]
            timemsec = str(info['start'])[-3:]
            timesec = int(str(info['start']))/1000
            hour = timesec / 3600
            minute = (timesec - (hour*3600)) / 60
            sec = timesec - (hour*3600) - (minute*60)
            srttime = str(hour) + ':' + str(minute) + ':' + str(sec) + ',' + str(timemsec)
            sync += str(srttime)
            i += 1
        else:
            timemsec = str(info['start'])[-3:]
            timesec = int(str(info['start']))/1000
            hour = timesec / 3600
            minute = (timesec - (hour*3600)) / 60
            sec = timesec - (hour*3600) - (minute*60)
            srttime = str(hour) + ':' + str(minute) + ':' + str(sec) + ',' + str(timemsec)
            sync += ' --> ' + str(srttime) + '\n'
            sync += str(temp) + '\n' + '\n'
        tmpPath = mc.GetTempDir()
        subFilePath = tmpPath+os.sep+'subcache.srt'
        f = open(subFilePath, "w")
        f.write(sync)
        f.close()
    return subFilePath
Exemplo n.º 8
0
    def Genre(self, genre, filter, page, totalpage):
        id = self.genre[genre]
        url = self.url_base + '/ard/servlet/ajax-cache/3517242/view=list/datum='+id+'/senderId=208/zeit=1/index.html'
        data = ba.FetchUrl(url, 2400)
        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")

        genrelist = list()
        if data < 20:
            mc.ShowDialogNotification("No episode found for " + str(stream_name))
            genrelist = list()
            return genrelist

        for info in soup.findAll( 'div', {'class' : 'mt-media_item'}):
            if info.findAll( 'span', {'class' : 'mt-icon mt-icon_video'}):
                detail = info.find('a')
                title = detail.contents[0]
                airtime = info.find('span', {'class' : 'mt-airtime'})

                genreitem = ba.CreateEpisode()
                genreitem.SetName(title)
                genreitem.SetId(detail['href'].split('=')[1])
                genreitem.SetDate(airtime.contents[0][-9:])
                genreitem.SetPage(page)
                genreitem.SetTotalpage(totalpage)
                genrelist.append(genreitem)
        if len(genrelist) < 1:
            mc.ShowDialogNotification("No episode found for " + str(genre))
        return genrelist
Exemplo n.º 9
0
    def Episode(self, show_name, show_id, page, totalpage):
        url = self.url_base + '/feeds/api/videos?q=' + quote_plus(show_id) + '&start-index=1&max-results=20&format=5&orderby=viewCount'
        data = ba.FetchUrl(url, 0)
        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")

        title = []
        path = []
        thumb = []
        desc = []
        pup = []
        for info in soup.findAll('title'):
            tmp_name = info.contents[0]
            title.append(tmp_name)

        for info in soup.findAll('media:content', {'isdefault' : 'true'}):
            tmp_id = info['url']  + '&fmt=22'
            path.append(tmp_id)

        for info in soup.findAll('media:thumbnail', {'height' : '240'}):
            tmp_thumb = info['url']
            thumb.append(tmp_thumb)
        
        for info in soup.findAll('content'):
            tmp_desc = info.contents[0]
            desc.append(tmp_desc)
        
        for info in soup.findAll('published'):
            tmp_pub = info.contents[0][0:10]
            pup.append(tmp_pub)


        title.pop(0)

        episodelist = list()
        for title_i,path_i,thumb_i,desc_i,pup_i in izip(title,path,thumb,desc,pup):
            episode = ba.CreateEpisode()
            episode.SetName(title_i)
            episode.SetId(path_i)
            episode.SetDescription(desc_i)
            episode.SetThumbnails(thumb_i)
            episode.SetDate(pup_i)
            episode.SetPage(page)
            episode.SetTotalpage(totalpage)
            episodelist.append(episode)

        return episodelist
Exemplo n.º 10
0
    def List(self):
        array = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
        title = []
        id = []
        for letter in array:
            url = self.url_base + '/ZDFmediathek/xmlservice/web/sendungenAbisZ?characterRangeStart='+letter+'&detailLevel=2&characterRangeEnd='+letter
            data = ba.FetchUrl(url)
            soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")

            title.extend(soup.findAll('title'))
            id.extend(soup.findAll('assetid'))

        streamlist = list()
        for title_i,id_i in izip(title,id):
            stream = ba.CreateStream()
            stream.SetName(title_i.contents[0].replace('"',''))
            stream.SetId(id_i.contents[0])
            streamlist.append(stream)

        return streamlist
Exemplo n.º 11
0
    def Episode(self, stream_name, stream_id, page, totalpage):
        url = str(stream_id) + '/page=' + str(page)
        data = ba.FetchUrl(url, 3600)

        if data == "":
            mc.ShowDialogNotification("Geen afleveringen gevonden voor " + str(stream_name))
            return ba.CreateEpisode()

        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")

        if totalpage == "":
            try:
                pages = soup.findAll( 'div', {'class' : 'paginator'})[0]
                pages = pages.findAll('span')
                totalpage = len(pages) - 1
            except:
                totalpage = 1

        div_main = soup.findAll('div', {'class' : 'mo-c double'})[0]
        div_show = div_main.findAll('div', {'class' : 'wrapper'})[0]

        info = div_show.findAll('div', {'class' : 'thumb'})
        airtime = div_show.findAll('div', {'class' : 'airtime'})

        if len(info) < 1:
            mc.ShowDialogNotification("Geen afleveringen gevonden voor " + str(stream_name))
            return ba.CreateEpisode()

        episodelist = list()
        for info_i, airtime_i in izip(info, airtime):
            episode = ba.CreateEpisode()
            episode.SetName(stream_name)
            episode.SetId(self.url_base + info_i.a['href'])
            episode.SetThumbnails(self.url_base + info_i.find('img')['src'])
            episode.SetDate(airtime_i.a.span.contents[0])
            episode.SetPage(page)
            episode.SetTotalpage(totalpage)
            episodelist.append(episode)

        return episodelist
    def GetCategory(self):
        url = 'http://'+ self.host + ':' + self.port + '/api?mode=queue&start=START&limit=LIMIT&output=xml&apikey=' + self.api
        data = ba.FetchUrl(url, 360).decode('utf-8')
        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")

        list = mc.GetWindow(15000).GetList(51)
        list_items = mc.ListItems()
        for info in soup.findAll('category'):
            title = info.contents[0]
            list_item = mc.ListItem(mc.ListItem.MEDIA_UNKNOWN)
            list_item.SetLabel(str(title.encode('utf-8')))
            list_items.append(list_item)
        list.SetItems(list_items)
Exemplo n.º 13
0
    def Episode(self, stream_name, stream_id, page, totalpage):
        url = self.url_base + '/quicksearch?page=' + str(page) + '&series_id=' + stream_id
        data = ba.FetchUrl(url, 3600)

        if data == "":
            mc.ShowDialogNotification("No episode found for " + str(stream_name))
            episodelist = list()
            return episodelist

        data = re.compile('"quicksearch-results", "(.*?)"\)', re.DOTALL + re.IGNORECASE).search(str(data)).group(1)
        data = data.replace('\\"','"').replace('\\n','').replace('\\t','')
        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")

        if totalpage == "":
            try:
                pages = soup.findAll( 'div', {'class' : 'pagination'})[0]
                pages = pages.findAll('a')
                totalpage = int(pages[len(pages)-2].contents[0])
            except:
                totalpage = 1

        episodelist = list()
        for info in soup.findAll('li'):
			try: 
				id = info.h3.a['href']
			except:
				id = False
			if id:
				episode = ba.CreateEpisode()
				episode.SetName(stream_name)
				episode.SetId(self.url_base + id)
				episode.SetDescription(info.span.contents[0].replace(' ','') + ' - '+ info.p.contents[0])
				episode.SetThumbnails(info.a.img['src'])
				episode.SetDate(info.h3.a.contents[0])
				episode.SetPage(page)
				episode.SetTotalpage(totalpage)
				episodelist.append(episode)

        return episodelist
Exemplo n.º 14
0
 def crawl(self, albumArtist, album, releaseDate):
     releaseFound = False
     searchPage = urllib.urlopen("{0}/search?query={1}&facets[]=fieldType:release".format(self.domain, urllib.quote(albumArtist + " " + album))).read()
     searchHtml = BeautifulSoup(searchPage)
     #releaseLinks = searchHtml.findAll('a', { 'name' : 'unit_title' })
     releaseLinks = []
     releases = searchHtml.findAll('li', { 'name' : re.compile('tiles-list_release_[0-9]+') })
     
     for release in releases:
         thisTitle = release.find('a', { 'name' : 'unit_title' })
         thisAlbum = thisTitle.string
         thisUrl = thisTitle['href']
         thisDate = release.find('span', { 'class' : 'itemRenderer-minor' }).contents[1].replace(" | ", "").strip()
         print thisDate
         if releaseDate and releaseDate == thisDate:
             releaseUrl = thisUrl
             beatportAlbum = thisAlbum
             releaseFound = True
             break
             
     if releaseFound:
         # open the release page
         releasePage = urllib.urlopen("{0}{1}".format(self.domain, releaseUrl))
         releaseHtml = BeautifulSoup(releasePage)
         
         # now that we are here, we can obtain some of the release info
         releaseInfoLabels = releaseHtml.findAll('td', { 'class' : 'meta-data-label' })
         
         beatportReleaseDate = releaseInfoLabels[0].nextSibling.string
         beatportLabel = releaseInfoLabels[1].nextSibling.a.string
         beatportCatalogNumber = releaseInfoLabels[2].nextSibling.string
         beatportAlbumArtUrl = releaseHtml.find('img', { 'class' : 'tile-image' })['src']
             
         return (self.decodeHtml(beatportAlbum), 
                 self.decodeHtml(beatportLabel), 
                 self.decodeHtml(beatportCatalogNumber), 
                 self.decodeHtml(beatportReleaseDate), 
                 beatportAlbumArtUrl)
Exemplo n.º 15
0
    def List(self):
        url = self.url_base + '/content/ta_ent_video_shows_group/ta_ent_programme_result_module_skin.xinc'
        data = ba.FetchUrl(url)
        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")

        streamlist = list()
        for info in soup.findAll('a'):
            if info['href'] != '#':
                stream = ba.CreateStream()
                stream.SetName(info['title'])
                stream.SetId(info['href'])
                streamlist.append(stream)

        return streamlist
Exemplo n.º 16
0
    def Search(self, search):
        url = 'http://search.bbc.co.uk/suggest?scope=iplayer&format=xml&callback=xml.suggest&q=' + quote_plus(search)
        data = ba.FetchUrl(url, 0)

        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")

        streamlist = list()
        for info in soup.findAll('text'):
            stream = ba.CreateStream()
            stream.SetName(info.contents[0])
            stream.SetId(ba.ConvertASCII(info.contents[0]))
            streamlist.append(stream)

        return streamlist
Exemplo n.º 17
0
    def Search(self, search):
        url = self.url_base + "/script/common/ajax_zoek.php"
        params = "keyword=" + quote(search)
        data = ba.FetchUrl(url, 0, False, params)

        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")

        streamlist = list()
        for info in soup.findAll("li"):
            title = re.compile("---(.*?)$", re.DOTALL + re.IGNORECASE).search(str(info.span.contents[0])[4:]).group(1)
            id = re.compile("---(.*?)---", re.DOTALL + re.IGNORECASE).search(str(info.span.contents[0])).group(1)
            stream = ba.CreateStream()
            stream.SetName(title)
            stream.SetId(id)
            streamlist.append(stream)
        return streamlist
Exemplo n.º 18
0
    def List(self):
        url = self.url_base + "/alfabetisk"
        data = ba.FetchUrl(url)
        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")

        div_main = soup.findAll("div", {"class": "tab active"})[0]

        streamlist = list()
        for info in div_main.findAll("a"):
            if len(info.contents[0]) > 4:
                stream = ba.CreateStream()
                stream.SetName(info.contents[0])
                stream.SetId(info["href"])
                streamlist.append(stream)

        return streamlist
Exemplo n.º 19
0
    def Search(self, search):
        url = self.url_base + '/browse/search?alphabet=All&family_friendly=0&closed_captioned=0&has_free=1&has_huluplus=0&has_hd=0&channel=All&subchannel=&network=All&display=Shows%20with%20full%20episodes%20only&decade=All&type=tv&view_as_thumbnail=false&block_num=0&keyword=' + quote_plus(search)
        data = ba.FetchUrl(url)

        data = re.compile('"show_list", "(.*?)"\)', re.DOTALL + re.IGNORECASE).search(str(data)).group(1)
        data = data.replace('\\u003c','<').replace('\\u003e','>').replace('\\','').replace('\\n','').replace('\\t','')
        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")

        streamlist = list()
        for info in soup.findAll('a', {'onclick':True}):
            stream = ba.CreateStream()
            stream.SetName(info.contents[0])
            stream.SetId(info['href'])
            streamlist.append(stream)

        return streamlist
Exemplo n.º 20
0
    def Genre(self, genre, filter, page, totalpage):
        url = self.url_base + '/7dagen/' + genre
        if filter != "": url = url + ',' + str(filter)
        url = url + '?weergave=detail&page=' + str(page)
        data = ba.FetchUrl(url, 3600)
        if data == "":
            mc.ShowDialogNotification("No genre found for " + str(genre))
            genrelist = list()
            return genrelist
        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")
        if totalpage == "":
            try:
                pagediv = soup.findAll( 'div', {'class' : 'pagination'})[0]
                apage = pagediv.findAll("a")
                totalpage = int(apage[len(apage)-2].contents[0])
            except:
                totalpage = 1

        div_show = soup.find( 'table', {'class' : 'broadcasts detail'})

        genrelist = list()
        for info in div_show.findAll("tr"):
            omroep = info.findAll(attrs={"class" : "broadcaster-logo"})[0]['alt']
            if omroep == "Nederland 1": omroep = "nl1"
            elif omroep == "Nederland 2": omroep = "nl2"
            elif omroep == "Nederland 3": omroep = "nl3"
            try:
                thumb = info.findAll(attrs={"class" : "thumbnail"})[0]['src']
            except:
                thumb = info.findAll(attrs={"class" : "thumbnail placeholder"})[0]['src']
            path = self.url_base + info.find(attrs={"class" : "thumbnail_wrapper"})['href']
            date = info.find(attrs={"class" : "time"}).time.contents[0].replace(' ','').replace('\n','').replace('\t','')
            title = info.findAll(attrs={"class" : "series"})[0].contents[0]
            desc = info.find('div', {'class' : 'description'}).p.contents[0]

            genreitem = ba.CreateEpisode()
            genreitem.SetName(title)
            genreitem.SetId(path)
            genreitem.SetDescription(desc)
            genreitem.SetThumbnails(thumb)
            genreitem.SetDate(date)
            genreitem.SetFilter(str(omroep).upper())
            genreitem.SetPage(page)
            genreitem.SetTotalpage(totalpage)
            genrelist.append(genreitem)

        return genrelist
    def GetQuery(self):
        url = 'http://'+ self.host + ':' + self.port + '/api?mode=queue&start=START&limit=LIMIT&output=xml&apikey=' + self.api

        try:
            data = ba.FetchUrl(url)
        except:
            return
        
        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")
        try: speed = soup.findAll('speed')[0].contents[0]
        except: speed = ''
        timeleft = soup.findAll('timeleft')[0].contents[0]
        pause_int = soup.findAll('pause_int')[0].contents[0]
        paused = soup.findAll('paused')[0].contents[0]
        slot = soup.findAll('slot')
        
        if pause_int == '0': pause_int = 'Unlimited'
        else: pause_int = pause_int + ' s'
        window = mc.GetWindow(14444)
        if str(paused) == "True":
            window.GetToggleButton(10198).SetSelected(True)
        else:
            window.GetToggleButton(10198).SetSelected(False)
            
        if slot:
            list = window.GetList(51)
            focus = int(list.GetFocusedItem())
            print focus

            list_items = mc.ListItems()
            for info in soup.findAll('slot'):
                percentage = int(info.percentage.contents[0])
                percentage = int(round(percentage/10.0)*10.0)
                list_item = mc.ListItem(mc.ListItem.MEDIA_UNKNOWN)
                list_item.SetLabel(ba.ConvertASCII(info.filename.contents[0]))
                list_item.SetPath(str(info.nzo_id.contents[0]))
                list_item.SetProperty('queued', str(info.status.contents[0]))
                list_item.SetProperty('info', str(info.mb.contents[0]) + 'Mb - ' + str(info.timeleft.contents[0]))
                list_item.SetProperty('status', 'download-' + str(percentage) + '.png')
                list_item.SetProperty('speed', str(speed) + 'bps')
                list_item.SetProperty('timeleft', str(timeleft))
                list_item.SetProperty('paused', str(paused))
                list_item.SetProperty('pause_int', str(pause_int))
                list_item.SetProperty('percentage', str(info.percentage.contents[0]) + '%')
                list_items.append(list_item)
            list.SetItems(list_items)
            print len(list_items)
            max = len(list_items) - 1
            if focus > 0 and focus < max:
                list.SetFocusedItem(focus)
            elif focus >= max:
                list.SetFocusedItem(max)
        else:
            self.EmptySearch(14444)
Exemplo n.º 22
0
    def List(self):
        url = self.url_home
        data = ba.FetchUrl(url)
        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")

        div_main  = soup.findAll( 'div', {'class' : 'mo-a alphabetical'})[0]
        div_show  = div_main.findAll( 'div', {'class' : 'wrapper'})[0]

        streamlist = list()
        for info in div_show.findAll('a'):
            stream = ba.CreateStream()
            name = info.contents[0]
            id = self.url_base + info['href']
            if not name in self.exclude:
                stream.SetName(name)
                stream.SetId(id)
                streamlist.append(stream)

        return streamlist
Exemplo n.º 23
0
    def Episode(self, stream_name, stream_id, page, totalpage):

        url = self.url_base + '/ard/servlet/ajax-cache/3516962/view=list/documentId='+stream_id+'/goto='+str(page)+'/index.html'
        data = ba.FetchUrl(url, 3600)
        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")
        
        if data < 20:
            mc.ShowDialogNotification("No episode found for " + str(stream_name))
            episodelist = list()
            return episodelist

        if totalpage == "":
            try:
                pages = soup.find( 'li', {'class' : 'mt-paging ajax-paging-li'})
                pages = pages.findAll('span')[2]
                pages = pages.contents[0][-2:].replace(' ','')
                print pages
                totalpage = int(pages)
            except:
                totalpage = 1


        episodelist = list()
        for info in soup.findAll( 'div', {'class' : 'mt-media_item'}):
            if info.findAll( 'span', {'class' : 'mt-icon mt-icon_video'}):
                detail = info.find('a')
                title = stream_name
                airtime = info.find('span', {'class' : 'mt-airtime'})
                thumb = info.find('img')

                episode = ba.CreateEpisode()
                episode.SetName(stream_name)
                episode.SetId(detail['href'].split('=')[1])
                episode.SetDescription(detail.contents[0])
                episode.SetThumbnails(self.url_base + thumb['data-src'])
                episode.SetDate(airtime.contents[0])
                episode.SetPage(page)
                episode.SetTotalpage(totalpage)
                episodelist.append(episode)

        return episodelist
def ShowNet():
    mc.ShowDialogWait()
    targetcontrol = 51
    targetwindow = 14000

    countryid = config.GetValue("country")
    genreid = config.GetValue("genre")
    if countryid == '': countryid = 'US'
    
    url = 'http://itunes.apple.com/'+countryid+'/rss/toppodcasts/limit=50/genre='+genreid+'/xml'

    list = mc.GetWindow(targetwindow).GetList(targetcontrol)
    list_items = mc.ListItems()

    data = ba.FetchUrl(url, 3600)
    soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")

    for info in soup.findAll('entry'):
        title = info.find('im:name').contents[0]
        link = info.link['href']
        thumb = info.find('im:image', {'height' : '170'}).contents[0]
        price = info.find('im:price')['amount']
        artist = info.find('im:artist').contents[0]
        try: summary = info.summary.contents[0]
        except: summary = ''

        if price == '0':
            list_item = mc.ListItem(mc.ListItem.MEDIA_UNKNOWN)
            list_item.SetLabel(title.encode('utf-8', 'ignore'))
            list_item.SetThumbnail(str(thumb))
            list_item.SetPath(str(link))
            list_item.SetDescription(summary.encode('utf-8', 'ignore'))
            list_item.SetArtist(artist.encode('utf-8', 'ignore'))
            list_items.append(list_item)

    Label = mc.GetWindow(targetwindow).GetLabel(10102)
    Label.SetLabel('[B]Country:[/B] '+country[countryid]+'[CR][B]Genre:[/B] '+genre[genreid])
    
    mc.HideDialogWait()
    list.SetItems(list_items)
Exemplo n.º 25
0
    def Episode(self, stream_name, stream_id, page, totalpage):

        url = self.url_base + stream_id
        data = ba.FetchUrl(url, 3600)
        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")

        episodelist = list()
        if data < 20:
            mc.ShowDialogNotification("No episode found for " + str(stream_name))
            return episodelist

        try:
            episodes = soup.findAll( 'div', {'id' : 'slidefullepisodes'})[0]
        except:
            return episodelist


        for info in episodes.findAll('li'):
            detail = info.findAll('a')[2]
            title = info.findAll('a')[1]
            #airtime = info.find('div', {'class' : 'time'})
            #print airtime.contents[0]
            thumb = info.findAll('img')[1]
            episodenr = info.find('strong')
            link = title['href'].split('-')
            linkid = link.pop()

            episode = ba.CreateEpisode()
            episode.SetName(title['title'])
            episode.SetId(linkid)
            episode.SetDescription(detail['title'])
            episode.SetThumbnails(thumb['src'])
            episode.SetDate(episodenr.contents[0])
            episode.SetPage(page)
            episode.SetTotalpage(totalpage)
            episodelist.append(episode)

        return episodelist
Exemplo n.º 26
0
    def Genre(self, genre, filter, page, totalpage):
        url = "http://www.tmf.nl/ajax/?genreTabs=" + self.genrelist[genre]
        data = ba.FetchUrl(url, 3600, True)
        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")

        genrelist = list()
        if len(data) < 1:
            mc.ShowDialogNotification("No data found for " + str(genre))
            return episodelist

        for info in soup.findAll("div", {"class": "title"}):
            link = info.a["href"]
            id = re.compile("video\/(.*?)\/", re.DOTALL + re.IGNORECASE).search(link).group(1)
            genreitem = ba.CreateEpisode()
            genreitem.SetName(info.a["title"])
            genreitem.SetId(id)
            genreitem.SetPage(page)
            genreitem.SetTotalpage(totalpage)
            genrelist.append(genreitem)

        if len(genrelist) < 1:
            mc.ShowDialogNotification("No data found for " + str(genre))

        return genrelist
Exemplo n.º 27
0
def ConvertFlashXML(path):
    data = FetchUrl(path)
    soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")
    i = 1
    add = 1
    sync = ''
    tmp = False
    for info in soup.findAll("p"):
        sync += str(i) + '\n'

        timesec1 = str(info['begin'])
        timesec1 = timesec1.split('.')
        timesec2 = str(info['end'])
        timesec2 = timesec2.split('.')

        for tt in [timesec1,timesec2]:
            if not tmp: tmp = True
            else: tmp = False
            hour = (int(tt[0])+add) / 3600
            minute = ((int(tt[0])+add) - (hour*3600)) / 60
            sec = (int(tt[0])+add) - (hour*3600) - (minute*60)
            sync += str(hour) + ':' + str(minute) + ':' + str(sec) + ',' + str(tt[1])

            if tmp: sync += ' --> '
            else: sync += '\n'

        sync += ConvertASCII(info.renderContents()).replace('\n','').replace('\t','').replace('<br />','\n')
        sync += '\n' + '\n'
        i += 1

    tmpPath = mc.GetTempDir()
    subFilePath = tmpPath+os.sep+'subcache.srt'
    f = open(subFilePath, "w")
    f.write(sync)
    f.close()
    return subFilePath
Exemplo n.º 28
0
    def Play(self, stream_name, stream_id, subtitle):

        id = re.compile('episode\/(.*?)\/', re.DOTALL + re.IGNORECASE).search(str(stream_id)).group(1)
        url = self.url_base + '/iplayer/episode/' + id + '/'
        data = ba.FetchUrl(stream_id)
        pid = re.compile('ep.setVersionPid\("(.*?)"\)', re.DOTALL + re.IGNORECASE).search(str(data)).group(1)
        
        surl = 'http://www.bbc.co.uk/mediaselector/4/mtis/stream/' + pid
        bitrate = []
        data = ba.FetchUrl(surl)
        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")
        for info in soup.findAll('media', {'bitrate':True}):
           bitrate.append(int(info['bitrate']))
        bitrate.sort()
        max = str(bitrate[-1])
    
        media = soup.find('media', {'bitrate':max})
        print media
        connection = media.find('connection', {'supplier':'akamai'})
        if not connection: connection = media.find('connection', {'supplier':'limelight'})

        identifier  = connection['identifier']
        server      = connection['server']
        supplier    = connection['supplier']
        try:
            auth    = connection['authString']
        except:
            auth    = connection['authstring']

        try:
            application = connection['application']
        except:
            application = 'live'

        #if subtitle:
        #    sub_url = soup.find('media', {'kind':'captions'})
        #    sub_url = sub_url.connection['href']

        timeout = 600
        swfplayer = 'http://www.bbc.co.uk/emp/10player.swf'
        #params = dict(protocol = "rtmp", port = "1935", server = server, auth = auth, ident = identifier, app = application)

        #if supplier == "akamai":
        #    url = "%(protocol)s://%(server)s:%(port)s/%(app)s?%(auth)s playpath=%(ident)s" % params
        #if supplier == "akamai":
        # note that librtmp has a small issue with constructing the tcurl here. we construct it ourselves for now (fixed in later librtmp)
        #    url = "%(protocol)s://%(server)s:%(port)s/ app=%(app)s?%(auth)s tcurl=%(protocol)s://%(server)s:%(port)s/%(app)s?%(auth)s playpath=%(ident)s" % params

        #    url += " swfurl=%s swfvfy=true timeout=%s" % (swfplayer, timeout)

        play = ba.CreatePlay()
        play.SetRTMPPath(identifier)
        if supplier == "akamai":
            play.SetRTMPDomain('rtmp://'+server+'/'+application)
            play.SetRTMPAuth('rtmp://'+server+'/'+application +'?'+ auth)
        elif supplier == "limelight":
            play.SetRTMPDomain('rtmp://'+server)
            play.SetRTMPAuth('rtmp://'+server+'/'+application +'?'+ auth)
        play.SetRTMPSwf(swfplayer)

        #play.SetPath(url)


        #url = 'http://www.bartsidee.nl/flowplayer2/index.html?net=' + str(domain) + '&id=mp4:' + str(id)
        #play = ba.CreatePlay()
        #play.SetPath(quote_plus(url))
        #play.SetDomain('bartsidee.nl')
        #play.SetJSactions(quote_plus('http://bartsidee.nl/boxee/apps/flow.js'))

        #if subtitle:
        #    play = ba.CreatePlay()
        #    play.SetPath(quote_plus(url))
        #    play.SetDomain('bbc.co.uk')
        #    play.SetJSactions(quote_plus('http://bartsidee.nl/boxee/apps/js/bbc1.js'))
        #else:
        #    play = ba.CreatePlay()
        #    play.SetPath(quote_plus(url))
        #    play.SetDomain('bbc.co.uk')
        #    play.SetJSactions(quote_plus('http://bartsidee.nl/boxee/apps/js/bbc0.js'))

        return play
Exemplo n.º 29
0
    def Episode(self, stream_name, stream_id, page, totalpage):
        data = ba.FetchUrl(stream_id, 3600)

        if data == "":
            mc.ShowDialogNotification("No episode found for " + str(stream_name))
            return ba.CreateEpisode()

        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")
        totalpage = len(soup.findAll('tr', 'srh'))

        try:
            episode_url = re.compile('VideoExpander.subheadingClicked\((.*?)\)"', re.DOTALL + re.IGNORECASE).search(str(data)).group(1)
        except:
            mc.ShowDialogNotification("No episode found for " + str(stream_name))
            return ba.CreateEpisode()

        season_number = re.compile('season_number=(.*?)\&', re.DOTALL + re.IGNORECASE).search(str(episode_url)).group(1)
        show_id = re.compile('show_id=(.*?)\&', re.DOTALL + re.IGNORECASE).search(str(episode_url)).group(1)

        pp = []
        for i in range(0,totalpage):
            pp.append(str(int(season_number) - i))
        intpage = int(page) - 1

        url = "http://www.hulu.com/videos/season_expander?order=desc&page=1&season_number=" + str(pp[intpage]) + "&show_id=" + str(show_id) + "&sort=season&video_type=episode"

        data = ba.FetchUrl(url)
        data = re.compile('srh-bottom-' + pp[intpage] +'", "(.*?)"\);', re.DOTALL + re.IGNORECASE).search(str(data)).group(1)
        data = data.replace('\\u003c','<').replace('\\u003e','>').replace('\\','')
        soup = BeautifulSoup(data)

        episodelist = list()
        name = []
        link = []
        number = []
        thumb = []
        for tmp in soup.findAll('td', {'class':'c0'}):
            number.append(tmp.contents[0])

        i = 0
        b = 0
        for tmp in soup.findAll('td', {'class':'c1'}):
            name.append(tmp.a.contents[0])
            link.append(tmp.a['href'])
            try:
                thumb.append(self.GetThumb(re.compile('/watch/(.*?)/', re.DOTALL + re.IGNORECASE).search(str(tmp.a['href'])).group(1)))
            except:
                thumb.append('')
            b += 1
            if len(tmp.findAll('div', 'vex-h')) == 0:
                i += 1

        if i != b: totalpage = page

        for x in range(0,i):
            episode = ba.CreateEpisode()
            episode.SetName(stream_name)
            episode.SetId(link[x])
            episode.SetDescription('Episode: ' + number[x] + ' - '  + name[x])
            episode.SetThumbnails(thumb[x])
            episode.SetDate('Season: ' + pp[intpage])
            episode.SetPage(page)
            episode.SetTotalpage(totalpage)
            episodelist.append(episode)

        return episodelist