예제 #1
0
    def Episode(self, stream_name, stream_id, page, totalpage):
        url = self.url_base + stream_id

        data = tools.urlopen(self.app, url, {'cache': 3600})

        if data == "":
            mc.ShowDialogNotification("No episode found for " +
                                      str(stream_name))
            return []

        rssfeed = re.compile('</a> <a href="(.*?)">RSS</a>').search(
            data).group(1)

        url = self.url_base + rssfeed
        data = tools.urlopen(self.app, url, {'cache': 3600})
        soup = BeautifulStoneSoup(data,
                                  convertEntities="xml",
                                  smartQuotesTo="xml")

        episodelist = list()
        for info in soup.findAll('item'):
            episode = CreateEpisode()
            episode.name = info.title.contents[0]
            episode.id = info.link.contents[0]
            episode.description = info.description.contents[0]
            episode.thumbnails = info.thumbnailimage.contents[0]
            episode.date = info.pubdate.contents[0]
            episode.page = page
            episode.totalpage = totalpage
            episodelist.append(episode)
        return episodelist
    def Play(self, stream_name, stream_id, subtitle):
        data = tools.urlopen(self.app, stream_id, {'cache':3600})
        streamid = re.compile('data-episode-id="(.*?)"', re.DOTALL + re.IGNORECASE).search(str(data)).group(1)

        if streamid == "": mc.ShowDialogNotification("Geen stream beschikbaar...")

        data = tools.urlopen(self.app, 'http://pi.omroep.nl/info/security', {'cache':0})
        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")
        try:
            key = soup.session.key.contents[0]
        except:
            mc.ShowDialogNotification("Kan de security key niet ophalen")
            return
        security = base64.b64decode(key)

        securitystr = str(security).split('|')[1]
        md5code = streamid + '|' + securitystr
        md5code = md5.md5(md5code).hexdigest()

        streamdataurl = 'http://pi.omroep.nl/info/stream/aflevering/' + str(streamid) + '/' + str(md5code).upper()
        data = tools.urlopen(self.app, streamdataurl, {'cache':0}).decode('utf-8')
        print data
        xmlSoup = BeautifulSoup(data)
        streamurl = xmlSoup.find(attrs={"compressie_formaat" : "wvc1"})
        url_play = streamurl.streamurl.contents[0].replace(" ","").replace("\n","").replace("\t","")

        play = CreatePlay()
        play.path               =   url_play
        if subtitle:
            play.subtitle       =   self.GetSubtitle(security, streamid)
            play.subtitle_type  =   'sami'

        return play
    def Episode(self, stream_name, stream_id, page, totalpage):
        url = self.url_base + stream_id

        data = tools.urlopen(self.app, url, {'cache':3600})

        if data == "":
            mc.ShowDialogNotification("No episode found for " + str(stream_name))
            return []

        rssfeed = re.compile('</a> <a href="(.*?)">RSS</a>').search(data).group(1)

        url = self.url_base + rssfeed
        data = tools.urlopen(self.app, url, {'cache':3600})
        soup = BeautifulStoneSoup(data, convertEntities="xml", smartQuotesTo="xml")

        episodelist = list()
        for info in soup.findAll('item'):
            episode                 =   CreateEpisode()
            episode.name            =   info.title.contents[0]
            episode.id              =   info.link.contents[0]
            episode.description     =   info.description.contents[0]
            episode.thumbnails      =   info.thumbnailimage.contents[0]
            episode.date            =   info.pubdate.contents[0]
            episode.page            =   page
            episode.totalpage       =   totalpage
            episodelist.append(episode)
        return episodelist
    def Episode(self, stream_name, stream_id, page, totalpage):
        data = tools.urlopen(self.app, stream_id, {'cache':3600})

        if not data:
            return []

        soup    = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")
        show_id = re.compile('show\/(.*?)\?size\=', re.DOTALL + re.IGNORECASE).search(str(data)).group(1)

        url  = self.url_base + "/api/2.0/videos.json?free_only="+self.free+"&include_seasons=true&order=asc&shorter_cache=true&show_id="+show_id+"&sort=original_premiere_date&video_type%5B%5D=episode&video_type%5B%5D=game&items_per_page=" + str(self.pageSize) + "&position=" + str(self.pageSize * (page - 1)) + "&_user_pgid=1&_content_pgid=67&_device_id=1"

        data = tools.urlopen(self.app, url)
        json_data = json.loads(data)
        
        if totalpage == "":
            if int(json_data['total_count']) > self.pageSize:
               totalpage = math.ceil(int(json_data['total_count']) / self.pageSize)
            else:
               totalpage = 1

        episodelist = list()
        for item in json_data['data']:
            episode             =   CreateEpisode()
            episode.name        =   stream_name
            episode.id          =   self.url_base + '/watch/'+str(item['video']['id'])
            episode.description =   'Episode: ' + str(item['video']['episode_number']) + ' - '  + str(item['video']['title'])
            episode.thumbnails  =   'http://ib1.huluim.com/video/'+str(item['video']['content_id'])+'?size=220x124'
            episode.date        =   'Season: ' + str(item['video']['season_number'])
            episode.page        =   page
            episode.totalpage   =   totalpage
            episodelist.append(episode)

        return episodelist
예제 #5
0
    def GetPath(self, stream_id):
        if len(stream_id.split('|')) > 1:
            urlpart = stream_id.split('|')
            url = urlpart[1] + '?url=' + urlpart[0]
            data = self.ParseProcessor(url)
            keys = data.keys()

            if len(data) < 2:
                return

            try:
                if data[0] == 'v2':
                    id = 1
            except:
                """"""

            try:
                if 'http' in data[0]:
                    id = 2
            except:
                """"""

            if id == 1:
                id_url = ''
                id_cookie = ''
                id_regex = ''
                id_postdata = ''

                if 's_url=' in keys: id_url = data['s_url=']
                if 's_cookie=' in keys: id_cookie = data['s_cookie=']
                if 'regex=' in keys: id_regex = data['regex=']
                if 's_postdata=' in keys: id_postdata = data['s_postdata=']
                if not id_url: id_url = urlpart[0]

                params = {'cookie': str(id_cookie), 'post':str(id_postdata)}
                data = tools.urlopen(self.app, str(id_url), params)

                try:
                    path = re.compile(str(id_regex), re.DOTALL + re.IGNORECASE).search(str(data)).group(1)
                except:
                    path = ""


            elif id == 2:
                id_url = data[0]
                id_regex = data[1]
                data = tools.urlopen(self.app, str(id_url))

                try:
                    path = re.compile(str(id_regex), re.DOTALL + re.IGNORECASE).search(str(data)).group(1)
                except:
                    path = ""

            else:
                path = ""
        else:
            path = stream_id

        return path
예제 #6
0
 def retreive(self, time=3600):
     url = "http://bartsidee.nl/boxee/tmp/rtl.json"
     if time ==3600:
         data = tools.urlopen(self.app, url, {'cache':5400})
     else:
         data = tools.urlopen(self.app, url)
     result = json.loads(data)
     result.reverse()
     return result
예제 #7
0
    def Play(self, stream_name, stream_id, subtitle):
        url = str(stream_id)
        data = tools.urlopen(self.app, url, {"cache": 3600})

        file = re.compile('file \: "(.*?)"').search(data).group(1)
        try:
            domain = re.compile('streamer \: "(.*?)"').search(data).group(1)
        except:
            domain = False

        if domain:
            url = (
                "http://www.bartsidee.nl/flowplayer/player.php?url="
                + str(domain)
                + "&clip="
                + str(file).replace(".flv", "")
            )
            play = CreatePlay()
            play.content_type = "video/x-flv"
            play.path = quote_plus(url)
            play.domain = "bartsidee.nl"

        else:
            play = CreatePlay()
            play.path = file

        return play
예제 #8
0
    def List(self):
        data = tools.urlopen(self.app, self.base_url)
        soup = BeautifulSoup(data,
                             convertEntities=BeautifulSoup.HTML_ENTITIES,
                             smartQuotesTo="xml")
        div = soup.findAll('div', {'id': 'mainContent'})[0]

        title = []
        path = []

        for info in div.findAll('a', {'class': 's'}):
            title_id = info.contents[0]
            title.append(title_id)

        for info in div.findAll('a', {'class': 'b'}):
            path_id = info['href']
            path.append(path_id)

        streamlist = list()
        for title_i, path_i in izip(title, path):
            stream = CreateList()
            stream.name = title_i
            stream.id = path_i
            streamlist.append(stream)

        return streamlist
    def Play(self, stream_name, stream_id, subtitle):
        id = re.compile('video\/(.*?)-').search(str(stream_id)).group(1)

        url  = 'http://eredivisielive.nl/content/playlist/website/%s_ere_lr.xml' % (id,)
        data = tools.urlopen(self.app, url)
        soup = BeautifulStoneSoup(data, convertEntities=BeautifulSoup.XML_ENTITIES, smartQuotesTo="xml")
        
        domain = soup.findAll('videodock:streamer')[0].contents[0]
        media  = soup.findAll('media:content')

        quality = []
        files   = {}
        for i in media:
            quality.append(int(i['bitrate']))
            files[int(i['bitrate'])] = i['url']

        quality = sorted(quality)

        url                 =   'http://www.bartsidee.nl/flowplayer/player.php?url=' + str(domain) + '&clip=mp4:' + str(files[quality.pop()])
        play                =   CreatePlay()
        play.content_type   =   'video/x-flv'
        play.path           =   quote_plus(url)
        play.domain         =   'bartsidee.nl'
        
        return play
예제 #10
0
    def Episode(self, stream_name, stream_id, page, totalpage):
        url = str(stream_id) + "/page/" + str(page)
        data = tools.urlopen(self.app, url, {"cache": 3600})
        soup = BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES, smartQuotesTo="xml")

        div_main = soup.findAll("div", {"id": "videogallery"})[0]

        try:
            div_nav = soup.findAll("div", {"class": "wp-pagenavi"})[0]
            pages = div_nav.findAll(attrs={"class": re.compile("^page")})
            totalpage = len(pages) + 1
        except:
            totalpage = 1

        episodelist = list()
        for info in div_main.findAll("div", {"class": "videoitem"}):
            div1 = info.findAll("div", {"class": "thumbnail"})[0]
            thumb = re.compile("background-image\: url\((.*?)\)").search(div1.div["style"]).group(1)

            episode = CreateEpisode()
            episode.name = div1.a["title"]
            episode.id = div1.a["href"]
            episode.description = " ".join(info.p.a.contents[0].split())
            episode.thumbnails = thumb
            episode.page = page
            episode.totalpage = totalpage
            episodelist.append(episode)

        return episodelist
예제 #11
0
    def Episode(self, stream_name, stream_id, page, totalpage):
        url = self.url_base + '/iplayer/widget/startswith/site/bigscreen/media_set/pc-bigscreen/json/1/bigscreen_layout/sd/service_type/tv/template/index/starts_with/' + quote(
            stream_id)
        data = tools.urlopen(self.app, url, {'cache': 3600})

        if len(data) < 10:
            mc.ShowDialogNotification("No episode found for " +
                                      str(stream_name))
            episodelist = list()
            return episodelist

        json_data = json.loads(data)

        episodelist = list()
        for info in json_data['data']:
            episode = CreateEpisode()
            episode.name = info['s']
            episode.id = self.url_base + info['url']
            episode.thumbnails = 'http://node1.bbcimg.co.uk/iplayer/images/episode/' + re.compile(
                'episode\/(.*?)\/', re.DOTALL + re.IGNORECASE).search(
                    info['url']).group(1) + '_288_162.jpg'
            episode.date = info['t']
            episode.page = page
            episode.totalpage = totalpage
            episodelist.append(episode)
        return episodelist
    def Episode(self, stream_name, stream_id, page, totalpage):
        url  = self.url_base + str(stream_id) + 'pagina/' + str(page) +'/'
        data = tools.urlopen(self.app, url, {'cache':3600})
        soup = BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES, smartQuotesTo="xml")

        div_main = soup.findAll('div', {'id':'video-overview'})[0]

        try:
            submenu   = soup.findAll('div', {'id':'pagination-pages'})[0]
            pages     = submenu.findAll('a')
            totalpage = len(pages) + 1
        except:
            totalpage = 1

        episodelist = list()
        for info in div_main.findAll('li'):
            if info.findAll('span', {'class':'video-payment-noprice-button'}):
                continue

            episode                 =   CreateEpisode()
            episode.name            =   info.findAll('span', {'class':'title'})[0].contents[0]
            episode.id              =   info.a['href']
            episode.thumbnails      =   info.a.img['src']
            episode.date            =   info.findAll('span', {'class':'date'})[0].contents[0]
            episode.page            =   page
            episode.totalpage       =   totalpage
            episodelist.append(episode)

        return episodelist
예제 #13
0
파일: itv.py 프로젝트: geoffmomin/Scope-TV
    def Episode(self, stream_name, stream_id, page, totalpage):
        url = 'http://mercury.itv.com/api/html/dotcom/Episode/Programme/' + quote(stream_id)
        data = tools.urlopen(self.app, url, {'cache':3600})
        soup = BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES, smartQuotesTo="xml")

        if len(data) < 10:
            mc.ShowDialogNotification("No episode found for " + str(stream_name))
            return []
        table = soup.find('tbody')

        episodelist = list()
        for info in table.findAll('tr'):
            time = info.find('td',{'class':'t_time'})
            duration = info.find('td',{'class':'t_duration'})
            details = info.find('td',{'class':'t_details'})

            episode             =   CreateEpisode()
            episode.name        =   stream_name
            episode.id          =   self.url_base + details.a['href']
            episode.description =   duration.contents[0] +' - '+ details.span.contents[0]
            episode.thumbnails  =   details.a.img['src']
            episode.date        =   time.contents[2]
            episode.page        =   page
            episode.totalpage   =   totalpage
            episodelist.append(episode)
        return episodelist
예제 #14
0
    def Episode(self, stream_name, stream_id, page, totalpage):
        url = self.url_base + '/tag/' + str(stream_id) + '?page=' + str(page)
        data = tools.urlopen(self.app, url, {'cache': 3600})
        soup = BeautifulSoup(data,
                             convertEntities=BeautifulSoup.HTML_ENTITIES,
                             smartQuotesTo="xml")

        div_main = soup.findAll('div', {'class': 'videoContainer'})[0]

        try:
            div_nav = soup.findAll('span', {'class': 'pager-list'})[0]
            pages = div_nav.findAll(attrs={'class': re.compile("^pager-next")})
            totalpage = len(pages) + 1
        except:
            totalpage = 1

        episodelist = list()
        for info in div_main.findAll('li'):
            episode = CreateEpisode()
            episode.name = info.h5.a.contents[0]
            episode.id = info.a['href']
            episode.thumbnails = info.a.img['src']
            episode.page = page
            episode.totalpage = totalpage
            episodelist.append(episode)

        return episodelist
    def Episode(self, stream_name, stream_id, page, totalpage):

        url  = self.url_base + '/ZDFmediathek/xmlservice/web/aktuellste?id='+stream_id+'&maxLength=50'
        data = tools.urlopen(self.app, url, {'cache':3600})
        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")
        
        if len(data) < 5:
            mc.ShowDialogNotification("No episode found for " + str(stream_name))
            return []

        teaser = soup.findAll('teaser')

        episodelist = list()
        for info in teaser:
            if info.type.contents[0] == 'video':
                title   = info.find('title')
                title   = info.find('title')
                detail  = info.find('detail')
                id      = info.find('assetid')
                airtime = info.find('airtime')
                airtime = airtime.contents[0]
                thumb   = self.url_base + '/ZDFmediathek/contentblob/'+ str(id.contents[0]) +'/timg276x155blob'

                episode                 = CreateEpisode()
                episode.name            = title.contents[0]
                episode.id              = id.contents[0]
                episode.description     = stream_name + ': ' + encodeUTF8(detail.contents[0])
                episode.thumbnails      = thumb
                episode.date            = airtime
                episode.page            = page
                episode.totalpage       = totalpage
                episodelist.append(episode)

        return episodelist
예제 #16
0
    def Episode(self, stream_name, stream_id, page, totalpage):
        url  = str(stream_id) + '/page/' + str(page)
        data = tools.urlopen(self.app, url, {'cache':3600})
        soup = BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES, smartQuotesTo="xml")

        div_main = soup.findAll('div', {'id':'videogallery'})[0]
        
        try:
            div_nav   = soup.findAll('div', {'class':'wp-pagenavi'} )[0]
            pages     = div_nav.findAll(attrs={'class' : re.compile("^page")})
            totalpage = len(pages) +1
        except:
            totalpage = 1

        episodelist = list()
        for info in div_main.findAll('div', {'class':'videoitem'}):
            div1  = info.findAll('div', {'class':'thumbnail'})[0]
            thumb = re.compile('background-image\: url\((.*?)\)').search(div1.div['style']).group(1)

            episode                 =   CreateEpisode()
            episode.name            =   div1.a['title']
            episode.id              =   div1.a['href']
            episode.description     =   ' '.join(info.p.a.contents[0].split())
            episode.thumbnails      =   thumb
            episode.page            =   page
            episode.totalpage       =   totalpage
            episodelist.append(episode)

        return episodelist
예제 #17
0
    def ParsePlaylist(self, url, max=False):
        raw  = tools.urlopen(self.app, url)
        output = cStringIO.StringIO()
        output.write(raw)
        output.seek(0, 0)

        data = csv.reader( output, delimiter="=", quoting=csv.QUOTE_NONE, quotechar='|')
        if max != 0: number = max
        else: number = 10000

        item = {}
        datalist = []
        for i, line in enumerate(data):
            if i < number:
                if line == [] or line == ['#']:
                    if item: datalist.append(item)
                    item = {}
                else:
                    if len(line) == 2:
                        item[line[0]] = line[1]
                    elif len(line) == 3:
                        item[line[0]] = line[1] + '=' + line[2]
                    elif len(line) > 3:
                        total = len(line) -2
                        item[line[0]] = line[1]
                        for i in range(2,total):
                            item[line[0]] = item[line[0]] + '=' + line[i]
            else:
                break
        output.close()
        return datalist
    def Genre(self, genre, filter, page, totalpage):
        id   = self.genre[genre]
        url  = self.url_base + '/ZDFmediathek/xmlservice/web/sendungVerpasst?startdate=' + id +'&enddate='+id+'&maxLength=50'
        
        data = tools.urlopen(self.app, url, {'cache':2400})
        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")

        genrelist = list()
        if len(data) < 20:
            mc.ShowDialogNotification("No episode found for " + str(genre))
            return []

        teaser = soup.findAll('teaser')

        for info in teaser:
            if info.type.contents[0] == 'video':
                title   = info.find('title')
                id      = info.find('assetid')
                airtime = info.find('airtime')
                airtime = airtime.contents[0]

                genreitem               = CreateEpisode()
                genreitem.name          = title.contents[0]
                genreitem.id            = id.contents[0]
                genreitem.date          = airtime[-5:]
                genreitem.page          = page
                genreitem.totalpage     = totalpage
                genrelist.append(genreitem)

        if len(genrelist) < 1:
            mc.ShowDialogNotification("No episode found for " + str(genre))

        return genrelist
예제 #19
0
    def Genre(self, genre, filter, page, totalpage):
        id = self.genre[genre]
        url = self.url_base + '/ard/servlet/ajax-cache/3517242/view=list/datum=' + id + '/senderId=208/zeit=1/index.html'
        data = tools.urlopen(self.app, url, {'cache': 2400})
        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")

        genrelist = list()
        if data < 20:
            mc.ShowDialogNotification("No episode found for " +
                                      str(stream_name))
            genrelist = list()
            return genrelist

        for info in soup.findAll('div', {'class': 'mt-media_item'}):
            if info.findAll('span', {'class': 'mt-icon mt-icon_video'}):
                detail = info.find('a')
                title = detail.contents[0]
                airtime = info.find('span', {'class': 'mt-airtime'})

                genreitem = CreateEpisode()
                genreitem.name = title
                genreitem.id = detail['href'].split('=')[1]
                genreitem.date = airtime.contents[0][-9:]
                genreitem.page = page
                genreitem.totalpage = totalpage
                genrelist.append(genreitem)
        if len(genrelist) < 1:
            mc.ShowDialogNotification("No episode found for " + str(genre))
        return genrelist
예제 #20
0
    def Episode(self, stream_name, stream_id, page, totalpage):
        data = tools.urlopen(self.app, stream_id, {"cache": 3600})

        if data == "":
            mc.ShowDialogNotification("Geen afleveringen gevonden voor " + str(stream_name))
            return [CreateEpisode()]

        soup = BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES, smartQuotesTo="xml")

        if totalpage == "":
            totalpage = 1

        div_content = soup.findAll("div", {"class": "content"})[0]
        thumb = div_content.findAll("img", {"align": "left"})[0]
        div_main = soup.findAll("div", {"class": "book-navigation"})[0]

        episodelist = list()
        for info in div_main.findAll("a"):
            episode = CreateEpisode()
            episode.name = info.contents[0].split("Episode")[0]
            episode.id = self.url_base + info["href"]
            episode.thumbnails = thumb["src"]
            episode.date = "Episode" + info.contents[0].split("Episode")[1]
            episode.page = page
            episode.totalpage = totalpage
            episodelist.append(episode)

        return episodelist
예제 #21
0
    def Play(self, stream_name, stream_id, subtitle):
        data = tools.urlopen(self.app, str(stream_id), {"cache": 3600})

        contentId = (
            re.compile('videoPlayer\\\\" value=\\\\"(.*?)\\\\"', re.DOTALL + re.MULTILINE).search(str(data)).group(1)
        )
        playerKey = (
            re.compile('playerKey\\\\" value=\\\\"(.*?)\\\\"', re.DOTALL + re.MULTILINE).search(str(data)).group(1)
        )
        seed = "61773bc7479ab4e69a5214f17fd4afd21fe1987a"

        amfHelper = BrightCoveHelper(playerKey, contentId, str(stream_id), seed)

        streams = {}
        for stream, bitrate in amfHelper.GetStreamInfo():
            s = {}
            s["uri"] = stream
            streams[bitrate] = s

        sort = []
        for key in sorted(streams.iterkeys()):
            sort.append(int(key))
            sort = sorted(sort)

        quality = sort.pop()
        rtmp = streams[int(quality)]["uri"]
        domain, file = rtmp.split("/&")

        url = "http://www.bartsidee.nl/flowplayer/player.php?url=" + str(domain) + "&clip=" + str(file)
        play = CreatePlay()
        play.content_type = "video/x-flv"
        play.path = quote_plus(url)
        play.domain = "bartsidee.nl"

        return play
    def Episode(self, show_name, show_id, page, totalpage):
        url = self.url_base + '/feeds/api/videos?q=' + quote_plus(show_id) + '&start-index=1&max-results=20&format=5&orderby=viewCount'
        data = tools.urlopen(self.app, url)
        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")

        title = [info.string for info in soup.findAll('title')]
        path  = [re.compile('v\/(.*?)\?').search(info['url']).group(1) for info in soup.findAll('media:content', {'isdefault' : 'true'})]
        thumb = [info['url'] for info in soup.findAll('media:thumbnail', {'height' : '360'})]
        desc  = [info.string for info in soup.findAll('content')]
        pup   = [info.string[0:10] for info in soup.findAll('published')]


        title.pop(0)

        episodelist = list()
        for title_i,path_i,thumb_i,desc_i,pup_i in izip(title,path,thumb,desc,pup):
            episode             = CreateEpisode()
            episode.name        = title_i
            episode.id          = path_i
            episode.description = desc_i
            episode.thumbnails  = thumb_i
            episode.date        = pup_i
            episode.page        = page
            episode.totalpage   = totalpage
            episodelist.append(episode)

        return episodelist
예제 #23
0
    def Episode(self, stream_name, stream_id, page, totalpage):
        url = self.url_base + stream_id + '?ajax,sb/sb'
        data = tools.urlopen(self.app, url, {'cache': 3600})

        if data == "":
            mc.ShowDialogNotification("No episode found for " +
                                      str(stream_name))
            return []

        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")

        episodelist = list()
        for info in soup.findAll('li'):
            if len(info.a.findAll('em')) > 0:
                episode = CreateEpisode()
                episode.name = stream_name
                episode.id = info.a['href']
                episode.description = info.a.span.contents[0].replace(
                    ' ', '').replace('\n', '').replace('\t', '')
                episode.thumbnails = info.a.img['src']
                episode.date = info.a.em.contents[0][-10:]
                episode.page = page
                episode.totalpage = totalpage
                episodelist.append(episode)

        return episodelist
예제 #24
0
    def Episode(self, stream_name, stream_id, page, totalpage):
        data = tools.urlopen(self.app, stream_id, {'cache': 3600})

        if data == "":
            mc.ShowDialogNotification("Geen afleveringen gevonden voor " +
                                      str(stream_name))
            return [CreateEpisode()]

        soup = BeautifulSoup(data,
                             convertEntities=BeautifulSoup.HTML_ENTITIES,
                             smartQuotesTo="xml")

        if totalpage == "":
            totalpage = 1

        div_content = soup.findAll('div', {'class': 'content'})[0]
        thumb = div_content.findAll('img', {'align': 'left'})[0]
        div_main = soup.findAll('div', {'class': 'book-navigation'})[0]

        episodelist = list()
        for info in div_main.findAll('a'):
            episode = CreateEpisode()
            episode.name = info.contents[0].split('Episode')[0]
            episode.id = self.url_base + info['href']
            episode.thumbnails = thumb['src']
            episode.date = 'Episode' + info.contents[0].split('Episode')[1]
            episode.page = page
            episode.totalpage = totalpage
            episodelist.append(episode)

        return episodelist
예제 #25
0
    def List(self):
        array = [
            'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
            'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'
        ]
        title = []
        id = []
        for letter in array:
            url = self.url_base + '/ZDFmediathek/xmlservice/web/sendungenAbisZ?characterRangeStart=' + letter + '&detailLevel=2&characterRangeEnd=' + letter
            data = tools.urlopen(self.app, url)
            soup = BeautifulSoup(data,
                                 convertEntities="xml",
                                 smartQuotesTo="xml")

            title.extend(soup.findAll('title'))
            id.extend(soup.findAll('assetid'))

        unique = dict([(key.contents[0], value.contents[0].replace('"', ''))
                       for key, value in izip(id, title)])
        order = sorted(unique.items(), key=lambda x: x[1])

        streamlist = list()
        for (id, title) in order:
            stream = CreateList()
            stream.name = title
            stream.id = id
            streamlist.append(stream)

        return streamlist
예제 #26
0
파일: een.py 프로젝트: geoffmomin/Scope-TV
    def Episode(self, stream_name, stream_id, page, totalpage):
        url  = self.url_base + '/tag/' + str(stream_id) + '?page=' + str(page)
        data = tools.urlopen(self.app, url, {'cache':3600})
        soup = BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES, smartQuotesTo="xml")

        div_main = soup.findAll('div', {'class':'videoContainer'})[0]
        
        try:
            div_nav   = soup.findAll('span', {'class':'pager-list'} )[0]
            pages     = div_nav.findAll(attrs={'class' : re.compile("^pager-next")})
            totalpage = len(pages) +1
        except:
            totalpage = 1

        episodelist = list()
        for info in div_main.findAll('li'):
            episode                 =   CreateEpisode()
            episode.name            =   info.h5.a.contents[0]
            episode.id              =   info.a['href']
            episode.thumbnails      =   info.a.img['src']
            episode.page            =   page
            episode.totalpage       =   totalpage
            episodelist.append(episode)

        return episodelist
예제 #27
0
    def Genre(self, genre, filter, page, totalpage):
        id = self.genre[genre]
        url = self.url_base + '/ZDFmediathek/xmlservice/web/sendungVerpasst?startdate=' + id + '&enddate=' + id + '&maxLength=50'

        data = tools.urlopen(self.app, url, {'cache': 2400})
        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")

        genrelist = list()
        if len(data) < 20:
            mc.ShowDialogNotification("No episode found for " + str(genre))
            return []

        teaser = soup.findAll('teaser')

        for info in teaser:
            if info.type.contents[0] == 'video':
                title = info.find('title')
                id = info.find('assetid')
                airtime = info.find('airtime')
                airtime = airtime.contents[0]

                genreitem = CreateEpisode()
                genreitem.name = title.contents[0]
                genreitem.id = id.contents[0]
                genreitem.date = airtime[-5:]
                genreitem.page = page
                genreitem.totalpage = totalpage
                genrelist.append(genreitem)

        if len(genrelist) < 1:
            mc.ShowDialogNotification("No episode found for " + str(genre))

        return genrelist
예제 #28
0
    def Play(self, stream_name, stream_id, subtitle):
        url = str(stream_id)
        data = tools.urlopen(self.app, url, {'cache': 3600})

        file = re.compile('file \: "(.*?)"').search(data).group(1)
        try:
            domain = re.compile('streamer \: "(.*?)"').search(data).group(1)
        except:
            domain = False

        if domain:
            url = 'http://www.bartsidee.nl/flowplayer/index.html?net=' + str(
                domain) + '&id=' + str(file).replace('.flv', '')
            play = CreatePlay()
            play.content_type = 'video/x-flv'
            play.path = quote_plus(url)
            play.domain = 'bartsidee.nl'
            play.jsactions = quote_plus(
                'http://bartsidee.nl/boxee/apps/js/flow.js')

        else:
            play = CreatePlay()
            play.path = file

        return play
    def Genre(self, genre, filter, page, totalpage):
        id = self.genre[genre]
        url = self.url_base + '/ard/servlet/ajax-cache/3517242/view=list/datum='+id+'/senderId=208/zeit=1/index.html'

        data = tools.urlopen(self.app, url, {'cache':2400})
        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")

        genrelist = list()
        if data < 20:
            mc.ShowDialogNotification("No episode found for " + str(stream_name))
            genrelist = list()
            return genrelist

        for info in soup.findAll( 'div', {'class' : 'mt-media_item'}):
            if info.findAll( 'span', {'class' : re.compile(r".*\mt-icon_video\b.*")} ):
                detail = info.find('a')
                title = detail.contents[0]
                airtime = info.find('span', {'class' : 'mt-airtime'})
                
                genreitem           =   CreateEpisode()
                genreitem.name      =   title
                genreitem.id        =   detail['href']
                genreitem.date      =   airtime.contents[0][-9:]
                genreitem.page      =   page
                genreitem.totalpage =   totalpage
                genrelist.append(genreitem)
        if len(genrelist) < 1:
            mc.ShowDialogNotification("No episode found for " + str(genre))
        return genrelist
예제 #30
0
    def Play(self, stream_name, stream_id, subtitle):
        data  = tools.urlopen(self.app, str(stream_id), {'cache':3600})
        videoPlayer = re.compile('videoPlayer\\\\" value=\\\\"(.*?)\\\\"', re.DOTALL + re.MULTILINE).search(str(data)).group(1)

        const       = '0559229564fa55a266eeeac4b89a5b9f75568382'
        playerID    = 1150434888001
        publisherID = 585049245001

        data = get_clip_info(const, playerID, videoPlayer, publisherID)
        streams = {}
        for i in data['renditions']:
            stream = {}
            stream["uri"] = i["defaultURL"]
            streams[i["encodingRate"]] = stream

        sort = []
        for key in sorted(streams.iterkeys()):
            sort.append(int(key))
            sort = sorted(sort)

        quality = sort.pop()
        rtmp = streams[int(quality)]["uri"]
        domain, file  = rtmp.split('/&')

        play                =   CreatePlay()
        play.content_type   =   'video/x-flv'
        play.rtmpurl        =   file
        play.rtmpdomain     =   domain

        return play
예제 #31
0
    def Episode(self, stream_name, stream_id, page, totalpage):
        url = str(stream_id) + '/page/' + str(page)
        data = tools.urlopen(self.app, url, {'cache': 3600})
        soup = BeautifulSoup(data,
                             convertEntities=BeautifulSoup.HTML_ENTITIES,
                             smartQuotesTo="xml")

        div_main = soup.findAll('div', {'id': 'videogallery'})[0]

        try:
            div_nav = soup.findAll('div', {'class': 'wp-pagenavi'})[0]
            pages = div_nav.findAll(attrs={'class': re.compile("^page")})
            totalpage = len(pages) + 1
        except:
            totalpage = 1

        episodelist = list()
        for info in div_main.findAll('div', {'class': 'videoitem'}):
            div1 = info.findAll('div', {'class': 'thumbnail'})[0]
            thumb = re.compile('background-image\: url\((.*?)\)').search(
                div1.div['style']).group(1)

            episode = CreateEpisode()
            episode.name = div1.a['title']
            episode.id = div1.a['href']
            episode.description = ' '.join(info.p.a.contents[0].split())
            episode.thumbnails = thumb
            episode.page = page
            episode.totalpage = totalpage
            episodelist.append(episode)

        return episodelist
    def Episode(self, stream_name, stream_id, page, totalpage):
        data = tools.urlopen(self.app, stream_id, {'cache':600})
        doc = etree.fromstring(data)
        try:
            episodes = doc.xpath('//item')
        except:
            episodes = []

        if len(episodes) < 1:
            mc.ShowDialogNotification("Geen afleveringen gevonden voor " + str(stream_name))
            return []

        episodes.reverse()

        episodelist = list()
        unique = []
        for item in episodes:
            date = item.findtext('broadcastdatetime')
            type = item.findtext('classname')

            if date not in unique and type in ["aflevering", "uitzending"]:
                episode             =   CreateEpisode()
                episode.name        =   stream_name + " - " + item.findtext('episodetitel')
                episode.id          =   item.findtext('movie')
                episode.thumbnails  =   item.findtext('thumbnail')
                episode.date        =   self.getDate(item.findtext('broadcastdatetime').split('T')[0]) 
                episode.description =   item.findtext('samenvattingkort').encode('utf-8') 
                episode.page        =   page
                episode.totalpage   =   totalpage
                episodelist.append(episode)
                unique.append(date)

        return episodelist
예제 #33
0
    def Episode(self, stream_name, stream_id, page, totalpage):
        url = (
            self.url_base
            + "/iplayer/widget/startswith/site/bigscreen/media_set/pc-bigscreen/json/1/bigscreen_layout/sd/service_type/tv/template/index/starts_with/"
            + quote(stream_id)
        )
        data = tools.urlopen(self.app, url, {"cache": 3600})

        if len(data) < 10:
            mc.ShowDialogNotification("No episode found for " + str(stream_name))
            episodelist = list()
            return episodelist

        json_data = json.loads(data)

        episodelist = list()
        for info in json_data["data"]:
            episode = CreateEpisode()
            episode.name = info["s"]
            episode.id = self.url_base + info["url"]
            episode.thumbnails = (
                "http://node1.bbcimg.co.uk/iplayer/images/episode/"
                + re.compile("episode\/(.*?)\/", re.DOTALL + re.IGNORECASE).search(info["url"]).group(1)
                + "_288_162.jpg"
            )
            episode.date = info["t"]
            episode.page = page
            episode.totalpage = totalpage
            episodelist.append(episode)
        return episodelist
예제 #34
0
    def Episode(self, stream_name, stream_id, page, totalpage):
        url = 'http://mercury.itv.com/api/html/dotcom/Episode/Programme/' + quote(
            stream_id)
        data = tools.urlopen(self.app, url, {'cache': 3600})
        soup = BeautifulSoup(data,
                             convertEntities=BeautifulSoup.HTML_ENTITIES,
                             smartQuotesTo="xml")

        if len(data) < 10:
            mc.ShowDialogNotification("No episode found for " +
                                      str(stream_name))
            return []
        table = soup.find('tbody')

        episodelist = list()
        for info in table.findAll('tr'):
            time = info.find('td', {'class': 't_time'})
            duration = info.find('td', {'class': 't_duration'})
            details = info.find('td', {'class': 't_details'})

            episode = CreateEpisode()
            episode.name = stream_name
            episode.id = self.url_base + details.a['href']
            episode.description = duration.contents[
                0] + ' - ' + details.span.contents[0]
            episode.thumbnails = details.a.img['src']
            episode.date = time.contents[2]
            episode.page = page
            episode.totalpage = totalpage
            episodelist.append(episode)
        return episodelist
예제 #35
0
    def Play(self, stream_name, stream_id, subtitle):
        play = CreatePlay()

        id = re.compile('tv8play.se\/play\/(.*?)\/').search(
            str(stream_id)).group(1)

        url = 'http://viastream.viasat.tv/PlayProduct/' + id
        data = tools.urlopen(self.app, url)
        soup = BeautifulStoneSoup(data,
                                  convertEntities="xml",
                                  smartQuotesTo="xml")

        video = soup.findAll('video')[0]
        video = '%r' % video.url.contents[0]
        video = video.replace("u'", "").replace("'", "")

        rtmp = video.split("/")
        rtmpURL = "/".join(rtmp[:4])
        playPath = "/".join(rtmp[4:])
        authPath = ''

        play.rtmpurl = playPath
        play.rtmpdomain = rtmpURL
        play.rtmpauth = authPath

        return play
예제 #36
0
    def Genre(self, genre, filter, page, totalpage):
        url = (
            self.url_base
            + "/iplayer/widget/listview/site/bigscreen/media_set/pc-bigscreen/json/1/bigscreen_layout/sd/service_type/tv/category/"
            + quote(self.genre_links[genre])
            + "/perpage/100/block_type/episode"
        )
        data = tools.urlopen(self.app, url, {"cache": 3600})

        if len(data) < 10:
            mc.ShowDialogNotification("No episode found for " + str(stream_name))
            episodelist = list()
            return episodelist

        json_data = json.loads(data)

        genrelist = list()
        for info in json_data["data"]:
            genreitem = CreateEpisode()
            genreitem.name = info["s"]
            genreitem.id = self.url_base + info["url"]
            genreitem.date = info["t"]
            genreitem.page = page
            genreitem.totalpage = totalpage
            genrelist.append(genreitem)

        return genrelist
    def Genre(self, genre, filter, page, totalpage):
        url = self.url_base + '/api/2.0/shows.json?asset_scope=tv&genre='+genre.replace(" ", "+")+'&order=desc&sort=view_count_week&video_type=tv&items_per_page=' + str(self.pageSize) + '&position='+ str(self.pageSize * (page - 1)) + '&_user_pgid=1&_content_pgid=67&_device_id=1&free_only='+self.free
        
        data = tools.urlopen(self.app, url, {'cache':3600})

        if data == "":
            mc.ShowDialogNotification("No genre found for " + str(genre))
            return []

        json_data = json.loads(data)
        
        if totalpage == "":
            if int(json_data['total_count']) > self.pageSize:
               totalpage = math.ceil(int(json_data['total_count']) / self.pageSize)
            else:
               totalpage = 1

        genrelist = list()
        for item in json_data['data']:
            genreitem           =   CreateEpisode()
            genreitem.episode   =   "True"
            genreitem.name      =   '[UPPERCASE]'+ item['show']['name'] +'[/UPPERCASE] ' + item['show']['description']
            genreitem.id        =   self.url_base + '/' +str(item['show']['canonical_name'])
            genreitem.page      =   page
            genreitem.totalpage =   totalpage
            genrelist.append(genreitem)

        return genrelist
예제 #38
0
    def Episode(self, stream_name, stream_id, page, totalpage):

        url = self.url_base + '/ard/servlet/ajax-cache/3516962/view=list/documentId=' + stream_id + '/goto=' + str(
            page) + '/index.html'
        url2 = self.url_base + '/ard/servlet/ajax-cache/3516992/view=list/documentId=' + stream_id + '/goto=' + str(
            page) + '/index.html'
        data = tools.urlopen(self.app, url, {'cache': 3600})

        if len(data) < 20:
            data = tools.urlopen(self.app, url2, {'cache': 3600})
            if len(data) < 20:
                mc.ShowDialogNotification("No episode found for " +
                                          str(stream_name))
                episodelist = list()
                return episodelist

        soup = BeautifulSoup(data,
                             convertEntities=BeautifulSoup.HTML_ENTITIES,
                             smartQuotesTo="xml")

        if totalpage == "":
            try:
                pages = soup.find('li', {'class': 'mt-paging ajax-paging-li'})
                pages = pages.findAll('span')[2]
                pages = pages.contents[0][-2:].replace(' ', '')
                totalpage = int(pages)
            except:
                totalpage = 1

        episodelist = list()
        for info in soup.findAll('div', {'class': 'mt-media_item'}):
            if info.findAll('span', {'class': 'mt-icon mt-icon_video'}):
                detail = info.find('a')
                airtime = info.find('span', {'class': 'mt-airtime'})
                thumb = info.find('img')

                episode = CreateEpisode()
                episode.name = stream_name
                episode.id = detail['href'].split('=')[1]
                episode.description = detail.contents[0]
                episode.thumbnails = self.url_base + thumb['data-src']
                episode.date = airtime.contents[0]
                episode.page = page
                episode.totalpage = totalpage
                episodelist.append(episode)

        return episodelist
예제 #39
0
    def Genre(self, genre, filter, page, totalpage):
        url = 'http://mercury.itv.com/api/html/dotcom/Schedule/'
        data = tools.urlopen(self.app, url, {'cache': 3600})
        soup = BeautifulSoup(data,
                             convertEntities=BeautifulSoup.HTML_ENTITIES,
                             smartQuotesTo="xml")

        if len(data) < 10:
            mc.ShowDialogNotification("No episode found for " +
                                      str(stream_name))
            return []

        day = soup.find('li', {'class': re.compile("^" + genre)})

        net = []

        if filter and filter != 'None':
            net.append(filter)
        else:
            for id in self.filter:
                net.append(id)
        if 'None' in net: net.remove('None')

        data = {}
        data_sorted = []
        for i in net:
            netdata = day.find('li', {'class': re.compile("^" + i)})
            for info in netdata.findAll(
                    attrs={'class': re.compile("^whatsOnTime")}):
                if info.a:
                    title = info.find('span', {'class': 'title'})
                    time = info.find('span', {'class': 'time'})
                    #date:[name,id,filter]
                    data[time.contents[0]] = [
                        title.contents[0], self.url_base + info.a['href'], i
                    ]
        date = data.keys()
        date.sort(reverse=True)
        for i in date:
            data_sorted.append({
                'name': data[i][0],
                'id': data[i][1],
                'filter': data[i][2],
                'date': i
            })

        genrelist = list()
        for info_sorted in data_sorted:
            genreitem = CreateEpisode()
            genreitem.name = info_sorted['name']
            genreitem.id = info_sorted['id']
            genreitem.date = info_sorted['date']
            genreitem.filter = info_sorted['filter']
            genreitem.page = page
            genreitem.totalpage = totalpage
            genrelist.append(genreitem)

        return genrelist
    def Episode(self, stream_name, stream_id, page, totalpage):

        url = self.url_base + '/ard/servlet/ajax-cache/3516962/view=list/documentId='+stream_id+'/goto='+str(page)+'/index.html'
        url2 = self.url_base + '/ard/servlet/ajax-cache/3516992/view=list/documentId='+stream_id+'/goto='+str(page)+'/index.html'
        data = tools.urlopen(self.app, url, {'cache':3600})
        
        if len(data) < 20:
            data = tools.urlopen(self.app, url2, {'cache':3600} )
            if len(data) < 20:
                mc.ShowDialogNotification("No episode found for " + str(stream_name))
                episodelist = list()
                return episodelist

        soup = BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES, smartQuotesTo="xml")

        if totalpage == "":
            try:
                pages = soup.find( 'li', {'class' : 'mt-paging ajax-paging-li'})
                pages = pages.findAll('span')[2]
                pages = pages.contents[0][-2:].replace(' ','')
                totalpage = int(pages)
            except:
                totalpage = 1


        episodelist = list()
        for info in soup.findAll( 'div', {'class' : 'mt-media_item'}):
            if info.findAll( 'span', {'class' : re.compile(r".*\mt-icon_video\b.*")}):
                
                detail = info.find('a')
                airtime = info.find('span', {'class' : 'mt-airtime'})
                thumb = info.find('img')

                episode             =   CreateEpisode()
                episode.name        =   stream_name
                episode.id          =   detail['href']
                episode.description =   detail.contents[0]
                if thumb:
                    episode.thumbnails  =   self.url_base + thumb['src']
                episode.date        =   airtime.contents[0]
                episode.page        =   page
                episode.totalpage   =   totalpage
                episodelist.append(episode)

        return episodelist
    def YouTube(self, id):

        url = 'http://www.youtube.com/get_video_info?video_id=%s' % id
        html = tools.urlopen(self.app, url)

        try:
            results    = re.compile('(%26itag%3D)').search(html).groups()[0]
            searchdata = html
        except: results = ''

        if not results:
            url  = 'http://www.youtube.com/watch?v=%s' % id
            html = tools.urlopen(self.app, url)
            searchdata = re.compile('flashvars="([^"]+)').search(html).groups()[0]

        formats = {
            '17': 'mp4',
            '18': 'mp4',
            '22': 'mp4',
            '37': 'mp4',
            '38': 'video',
            '43': 'webm',
            '45': 'webm',
            '34': 'flv',
            '5' : 'flv',
            '0' : 'flv',
            }
        result = None
        ext = None

        searchdata = unquote_plus(searchdata)
        for key in formats.keys():
            if result:
                continue

            try:    playurl = re.compile('[=,]url=([^&]+)[^,]+&itag=%s' % key).search(searchdata).groups()[0]
            except: playurl = ''

            print playurl

            if playurl:
                result = unquote_plus(playurl)
                ext    = formats[key]

        return result, ext
예제 #42
0
파일: hulu.py 프로젝트: geoffmomin/Scope-TV
 def GetThumb(self, id):
     url = "http://www.hulu.com/videos/info/" + str(id)
     data = tools.urlopen(self.app, url,{'xhr':True})
     try:
         return re.compile('"thumbnail_url":"(.*?)"', re.DOTALL + re.IGNORECASE).search(str(data)).group(1)
     except:
         try:
             return re.compile('"thumbnail_url": "(.*?)"', re.DOTALL + re.IGNORECASE).search(str(data)).group(1)
         except:
             return str('')
    def getGenres(self):
        url  = self.url_base + "/api/2.0/genres.json?sort=view_count_week&type=tv&items_per_page=32&position=0&_user_pgid=1&_content_pgid=67&_device_id=1"
        data = tools.urlopen(self.app, url, {'cache':36000})
        
        json_data = json.loads(data)
        
        genres = []
        for item in json_data['data']:
            genres.append(item["genre"]["name"])

        return genres
예제 #44
0
 def GetThumb(self, id):
     url = "http://www.hulu.com/videos/info/" + str(id)
     data = tools.urlopen(self.app, url, {'xhr': True})
     try:
         return re.compile('"thumbnail_url":"(.*?)"', re.DOTALL +
                           re.IGNORECASE).search(str(data)).group(1)
     except:
         try:
             return re.compile('"thumbnail_url": "(.*?)"', re.DOTALL +
                               re.IGNORECASE).search(str(data)).group(1)
         except:
             return str('')
예제 #45
0
파일: itv.py 프로젝트: geoffmomin/Scope-TV
    def Search(self, search):
        url     = 'http://mercury.itv.com/api/json/dotcom/Programme/Search/' + quote(search)
        data    = tools.urlopen(self.app, url)
        json_data = json.loads(data)

        streamlist = list()
        for info in json_data['Result']:
            stream = CreateList()
            stream.name     =   info['Details'][0]['Programme']['Programme']['Title']
            stream.id       =   info['Details'][0]['Programme']['MostRecentEpisodeId']
            streamlist.append(stream)

        return streamlist
예제 #46
0
def extract_words(args):
    url = args.url
    response = urlopen(url)
    html = response.read()
    words = []

    soup = BeautifulSoup(html)
    for link in soup.find_all('a'):
        if u'href' in link.attrs and u'title' in link.attrs:
            href = link['href']
            title = link['title']
            if re.match("""\/wiki\/([a-z]+)""", href):
                words.append(title)
예제 #47
0
    def Play(self, stream_name, stream_id, subtitle):
        data = tools.urlopen(self.app, stream_id, {'cache': 3600})
        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")
        streamid = re.compile("load_player\('(.*?)'", re.DOTALL +
                              re.IGNORECASE).search(str(soup)).group(1)
        if streamid == "":
            mc.ShowDialogNotification("Geen stream beschikbaar...")

        data = tools.urlopen(self.app, 'http://pi.omroep.nl/info/security',
                             {'cache': 0})
        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")
        try:
            key = soup.session.key.contents[0]
        except:
            mc.ShowDialogNotification("Kan de security key niet ophalen")
            return
        security = base64.b64decode(key)

        securitystr = str(security).split('|')[1]
        md5code = streamid + '|' + securitystr
        md5code = md5.md5(md5code).hexdigest()

        streamdataurl = 'http://pi.omroep.nl/info/stream/aflevering/' + str(
            streamid) + '/' + str(md5code).upper()
        data = tools.urlopen(self.app, streamdataurl, {
            'cache': 0
        }).decode('utf-8')
        xmlSoup = BeautifulSoup(data)
        streamurl = xmlSoup.find(attrs={"compressie_formaat": "wvc1"})
        url_play = streamurl.streamurl.contents[0].replace(" ", "").replace(
            "\n", "").replace("\t", "")

        play = CreatePlay()
        play.path = url_play
        if subtitle:
            play.subtitle = self.GetSubtitle(security, streamid)
            play.subtitle_type = 'sami'

        return play
    def List(self):
        url = self.url_base + '/ard/servlet/ajax-cache/3551682/view=module/index.html'
        data = tools.urlopen(self.app, url)
        data = '[' +re.compile('sendungVerpasstListe = \[(.*?)\]', re.DOTALL + re.IGNORECASE).search(str(data)).group(1) + ']'
        json_data = json.loads(data)
        streamlist = list()
        for info in json_data:
            stream          =   CreateList()
            stream.name     =   info['titel'].replace('&amp;', '&')
            stream.id       =   info['link'].split('=')[1]
            streamlist.append(stream)

        return streamlist
예제 #49
0
    def Search(self, search):
        url = 'http://mercury.itv.com/api/json/dotcom/Programme/Search/' + quote(
            search)
        data = tools.urlopen(self.app, url)
        json_data = json.loads(data)

        streamlist = list()
        for info in json_data['Result']:
            stream = CreateList()
            stream.name = info['Details'][0]['Programme']['Programme']['Title']
            stream.id = info['Details'][0]['Programme']['MostRecentEpisodeId']
            streamlist.append(stream)

        return streamlist
예제 #50
0
    def List(self):
        url = self.url_base + '/ard/servlet/ajax-cache/3551682/view=module/index.html'
        data = tools.urlopen(self.app, url)
        data = '[' + re.compile('sendungVerpasstListe = \[(.*?)\]', re.DOTALL +
                                re.IGNORECASE).search(str(data)).group(1) + ']'
        json_data = json.loads(data)
        streamlist = list()
        for info in json_data:
            stream = CreateList()
            stream.name = info['titel'].replace('&amp;', '&')
            stream.id = info['link'].split('=')[1]
            streamlist.append(stream)

        return streamlist
예제 #51
0
    def Search(self, search):
        url = 'http://search.bbc.co.uk/suggest?scope=iplayer&format=xml&callback=xml.suggest&q=' + quote_plus(
            search)
        data = tools.urlopen(self.app, url)

        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")

        streamlist = list()
        for info in soup.findAll('text'):
            stream = CreateList()
            stream.name = info.contents[0]
            stream.id = ConvertASCII(info.contents[0])
            streamlist.append(stream)

        return streamlist
예제 #52
0
    def Episode(self, stream_name, stream_id, page, totalpage):
        url = self.url_base + '/quicksearch/episodes?page=' + str(
            page) + '&series_id=' + stream_id
        data = tools.urlopen(self.app, url, {'cache': 3600})

        if data == "":
            mc.ShowDialogNotification("No episode found for " +
                                      str(stream_name))
            episodelist = list()
            return episodelist

        data = re.compile('"episode_results", "(.*?)"\)',
                          re.DOTALL + re.IGNORECASE).search(str(data)).group(1)
        data = data.replace('\\"', '"').replace('\\n', '').replace('\\t', '')
        soup = BeautifulSoup(data,
                             convertEntities=BeautifulSoup.HTML_ENTITIES,
                             smartQuotesTo="xml")

        if totalpage == "":
            try:
                pages = soup.findAll('div', {'class': 'pagination'})[0]
                pages = pages.findAll('a')
                totalpage = int(pages[len(pages) - 2].contents[0])
            except:
                totalpage = 1

        episodelist = list()
        for info in soup.findAll('li'):
            try:
                id = info.h3.a['href']
            except:
                id = False
            if id:
                episode = CreateEpisode()
                episode.name = stream_name
                episode.id = self.url_base + id
                episode.description = info.span.contents[0].replace(
                    ' ', '') + ' - ' + info.p.contents[0]
                try:
                    episode.thumbnails = info.a.img['data-src']
                except:
                    episode.thumbnails = info.a.img['src']
                episode.date = info.h3.a.contents[0]
                episode.page = page
                episode.totalpage = totalpage
                episodelist.append(episode)

        return episodelist
예제 #53
0
    def process(self, url):
        html = tools.urlopen(self.app, url, {'cache':3600})
        soup = BeautifulSoup(html, convertEntities=BeautifulSoup.HTML_ENTITIES, smartQuotesTo="xml")
        data = []

        div_main = soup.findAll('div', {'class' : 'i iGrid'})
        if not div_main:
            return data

        for div in div_main:
            thumb = self.url_base + div.a.img['src']
            label = div.div.h2.a.contents[0]
            id   = self.url_base + div.div.h2.a['href']
            data.append({'label':label, 'thumb':thumb, 'id':id,})

        return data
예제 #54
0
    def Search(self, search):
        url         = self.url_base + '/script/common/ajax_zoek.php'
        params      = 'keyword='+ quote(search)
        data        = tools.urlopen(self.app, url, {'post':params})

        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")

        streamlist = list()
        for info in soup.findAll('li'):
            title = re.compile('---(.*?)$', re.DOTALL + re.IGNORECASE).search(str(info.span.contents[0])[4:]).group(1)
            id    = re.compile('---(.*?)---', re.DOTALL + re.IGNORECASE).search(str(info.span.contents[0])).group(1)
            stream          = CreateList()
            stream.name     = title
            stream.id       = id
            streamlist.append(stream)
        return streamlist
예제 #55
0
    def Play(self, stream_name, stream_id, subtitle):
        url = self.url_base + '/ard/servlet/content/3517136?documentId=' + stream_id
        data = tools.urlopen(self.app, url)
        req = re.compile('mediaCollection.addMediaStream\((.*?)"\);')

        rtmp = []
        wmv = []
        for match in re.findall(req, str(data)):
            if 'rtmp' in match:
                url = match[7:]
                rtmp.append(url)
            else:
                url = match[11:]
                wmv.append(url)

        rtmp_count = len(rtmp)
        wmv_count = len(wmv)
        if rtmp_count > 0:
            rtmpurl = rtmp[rtmp_count - 1]
            rtmplist = rtmpurl.split('", "')

            playPath = rtmplist[1]
            rtmpURL = rtmplist[0]
            authPath = ''

            if 'rtmpt' in rtmpurl:
                url = 'http://www.bartsidee.nl/flowplayer/index.html?net=' + str(
                    rtmpURL) + '&id=' + str(playPath)
                play = CreatePlay()
                play.content_type = 'video/x-flv'
                play.path = quote_plus(url)
                play.domain = 'bartsidee.nl'
                play.jsactions = quote_plus(
                    'http://bartsidee.nl/boxee/apps/js/flow.js')
            else:
                play = CreatePlay()
                play.content_type = 'video/x-flv'
                play.rtmpurl = playPath
                play.rtmpdomain = rtmpURL
                play.rtmpauth = authPath

        else:
            wmvurl = wmv[wmv_count - 1]
            play = CreatePlay()
            play.path = wmvurl

        return play
예제 #56
0
    def Genre(self, genre, filter, page, totalpage):
        url = self.url_base + '/' + genre

        if filter != "":
            url = url + '/' + str(self.filterlist[filter])
        url = url + '?'
        url = url + 'page=' + str(page) + '&has_free=1'
        data = tools.urlopen(self.app, url, {'cache': 3600})

        if data == "":
            mc.ShowDialogNotification("No genre found for " + str(genre))
            return []

        soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")
        if totalpage == 1:
            totalpage = 10

        div_show = soup.find('table', {'id': 'results_list'})

        genrelist = list()
        for info in div_show.findAll('div', {'class': 'home-thumb'}):
            try:
                info.findAll(attrs={"class": "clips_collection_background"})[0]
                episode = False
            except:
                episode = True

            if episode:
                div_title = info.findAll(
                    attrs={"class": "show-title-container"})[0]
                div_info = info.findAll(attrs={"class": "video-info"})[0]

                path = div_title.a['href']
                title = div_title.a.contents[0]
                try:
                    desc = div_info.contents[0].replace('&nbsp;', '')
                except:
                    desc = ''

                genreitem = CreateEpisode()
                genreitem.name = '[UPPERCASE]' + title + '[/UPPERCASE] ' + desc
                genreitem.id = path
                genreitem.page = page
                genreitem.totalpage = totalpage
                genrelist.append(genreitem)

        return genrelist
예제 #57
0
    def List(self):
        url = self.url_base
        data = tools.urlopen(self.app, url)
        soup = BeautifulSoup(data,
                             convertEntities=BeautifulSoup.HTML_ENTITIES,
                             smartQuotesTo="xml")

        div_main = soup.find('div', {'id': 'programma_menu'})

        streamlist = []
        for item in div_main.findAll('a'):
            stream = CreateList()
            stream.name = item.contents[0]
            stream.id = item['href']
            streamlist.append(stream)

        return streamlist