Пример #1
0
def getMovies(url):
    progress = xbmcgui.DialogProgress()
    progress.create('Progress', 'Please wait...')
    progress.update(1, "", "Loading list - 1%", "")

    soup = BeautifulSoup(http_req(url))

    pages = str(soup.find("div", {"id": "numarpagini"}))
    pages = max(int(x) for x in re.findall(r'([\d]+)</a>', pages))
    page = int(re.search('pagina=(\d+)', url).group(1))

    div = soup.find("div", {"id": "content"})
    links  = div.findAll("a", {"class": "link"})
    thumbs = re.findall(r'<img src="../(.+?)"', str(div))
    years = re.findall(r'Aparitie: ?(\d+)', str(div))

    total = len(links)
    current = 0

    while current <= total - 1:
        year = re.search('(\d{4,4})', years[current])
        year = year.group(1) if year else 'unknown'
        name = "%s (%s)" % (htmlFilter(links[current].text), year)
        link = urlFilter(links[current]['href'])
        thumbnail = urlFilter(thumbs[current])
        
        addDir(name, link, 8, thumbnail, name, folder=False)
        
        if progress.iscanceled(): sys.exit()
        
        percent = int(((current + 1) * 100) / total)
        message = "Loading list - " + str(percent) + "%"
        progress.update(percent, "", message, "")
        
        current += 1

    if not page == pages:
        url = re.sub('pagina=\d+', 'pagina=' + str(page + 1), url)
        addDir("Next Page >>", url, 9)

    progress.close()

    xbmcplugin.endOfDirectory(int(sys.argv[1]))
Пример #2
0
def getMovies(url):
    progress = xbmcgui.DialogProgress()
    progress.create('Progress', 'Please wait...')
    progress.update(1, "", "Loading list - 1%", "")

    soup = BeautifulSoup(http_req(url))

    pages = str(soup.find("div", {"id": "numarpagini"}))
    pages = max(int(x) for x in re.findall(r'([\d]+)</a>', pages))
    page = int(re.search('pagina=(\d+)', url).group(1))

    div = soup.find("div", {"id": "content"})
    links = div.findAll("a", {"class": "link"})
    thumbs = re.findall(r'<img src="../(.+?)"', str(div))
    years = re.findall(r'Aparitie: ?(\d+)', str(div))

    total = len(links)
    current = 0

    while current <= total - 1:
        year = re.search('(\d{4,4})', years[current])
        year = str(year.group(1)) if year else 'unknown'
        name = "%s (%s)" % (htmlFilter(links[current].text), year)
        link = urlFilter(links[current]['href'])
        thumbnail = urlFilter(thumbs[current])

        addDir(name, link, 8, thumbnail, name, folder=False)

        if progress.iscanceled(): sys.exit()

        percent = int(((current + 1) * 100) / total)
        message = "Loading list - " + str(percent) + "%"
        progress.update(percent, "", message, "")

        current += 1

    if not page == pages:
        url = re.sub('pagina=\d+', 'pagina=' + str(page + 1), url)
        addDir("Next Page >>", url, 9)

    progress.close()

    xbmcplugin.endOfDirectory(int(sys.argv[1]))
def load_events(sortByStatus):

    response = PluginUtils.do_request(LOLEVENTURL)
    if (response is None):
        return None

    events = []

    # Now lets parse results
    decoded_data = json.load(response)
    root = decoded_data['data']

    LoLEvent = namedtuple('LoLEvent', 'title status eventId imageUrl')

    # For Each Item in Children
    for post in root['children']:
        html = post['data']['selftext_html']
        if (html is not None):
            soup = BeautifulSoup(PluginUtils.unescape(html))

            imgUrl = ''
            link = soup.find('a', href='#EVENT_PICTURE')
            if (link is not None):
                imgUrl = link.title

        status = 99
        # Using numbers for status so we can easily sort by this
        # link_flair_css_class: "ongoing"
        # link_flair_css_class: "finished"
        # link_flair_css_class: "twitchongoing"
        # link_flair_css_class: "featured"
        # link_flair_css_class: null
        flair_css = post['data']['link_flair_css_class']
        if (flair_css is not None):
            if (flair_css.lower()== FEATURED_STRING):
                status = 0
            if (flair_css.lower()== ACTIVE_STRING):
                status = 1
            if (flair_css.lower()== FINISHED_STRING):
                status = 2

        childEvent = LoLEvent(title = post['data']['title'],
                              status = status,
                              eventId = post['data']['id'],
                              imageUrl = imgUrl)

        events.append(childEvent)


    if (sortByStatus):
        # sort
        return sorted(events, key=attrgetter('status'))
    else:
        return events
Пример #4
0
	def getVideoUrl(self, smil):
		chunkArray = []

		print "Ooyala: --> Calculating video URL..."

		soup = BeautifulSoup(smil)
		rtmpDomain = soup.find('rtmpdomains').string
		rtmpDomain = rtmpDomain.split(',')[0]
		httpDomain = soup.find('domains').string
		httpDomain = httpDomain.split(',')[0]
		vData = soup.find('vdata')
		if vData:
			streams = vData.findAll('stream', attrs={'f':'h264'})
			streams = sorted(streams, key=lambda x: x['w'])
# 			version = vData['version']
			version = "1:1" ## Overriding 'version' for PostTV
			embedCode = vData['embedcode']
			ts = str(vData.find('ts').string)
			tsArray = ts.split(",")
			domain = ""
			if len(tsArray) > 2:
				domain = "http://" + httpDomain + "/"
			else:
				domain = rtmpDomain + "/mp4:s/"
			vidLength = len(tsArray)-1
			for i, startTime in enumerate(tsArray[:vidLength]):
				index = i
				start = int(startTime)
				br = int(streams[-1]['br'])
				width = int(streams[-1]['w'])
				url = self.getChunkUrl(embedCode, version, index, start, br, width)
				url = domain + url
				chunkArray.append(url)
		else:
			print "Ooyala: --> Could not find vData, trying the other."
			promo = soup.find('promo')
			promoVids = re.findall('[A-z0-9]{32}', promo)
			chunkArray.append(rtmpDomain + str(promoVids[0]) + str(promoVids[-1]))
			print "Ooyala: --> Formed the following URL: " + str(chunkArray[0])
		print "Ooyala: --> Successfully formed URL."
		return chunkArray
Пример #5
0
def parse_encrypted_url_response(stream_type, response_xml):
    soup = BeautifulSoup(response_xml)
    raw = soup.find('path')
    if stream_type == StreamTypes.RTMP:
        result = raw.getText()
    elif stream_type == StreamTypes.LIVE:
        processed = raw.getText()[11:] #Strip 'addaptive://' from the start
        host = processed[:processed.find('/')]
        url = processed[processed.find('/'):]
        result = 'http://' + host + '/play?url=' + url
    
    return result
Пример #6
0
def load_event_content(eventId):

    LoLEventDay = namedtuple('LoLEventDay', 'dayId day matches recommended imageUrl')
    LoLEventMatch = namedtuple('LoLEventMatch', 'gameId team1 team2 videoLinks')

    url = LOLMATCHESURL % eventId

    response = PluginUtils.do_request(url)
    if (response is None):
        return None
    # Now lets parse results
    decoded_data = json.load(response)

    selfText = decoded_data[0]['data']['children'][0]['data']['selftext_html']

    eventTitle = ''
    days = []

    soup = BeautifulSoup(PluginUtils.unescape(selfText))

    # Get all the recommended matches, we add those to the events
    # We do it like this Game H1_C1_C4
    recommended = ''
    #a href="/spoiler"
    spoilers = soup.findAll("a", href="/spoiler")
    if (spoilers is not None):
        for spoiler in spoilers:
            # add them to the list
            games = spoiler.text.replace(',', '_')
            recommended += games + "_"

    imgUrl = ''
    link = soup.find('a', href='#EVENT_PICTURE')
    if (link is not None):
        imgUrl = link.title

    # find all tables
    tables = soup.findAll("table")
    for idx, table in enumerate(tables):
        if (table is not None):

            titleLink = table.find("a", href="http://www.table_title.com")
            if (titleLink is not None):
                eventTitle = titleLink['title']

            YouTubeColumns = []
            Team1Index = -1
            Team2Index = -1

            # Investigate the right columns for youtube links
            rows = table.find("thead").findAll("tr")
            for row in rows :
                cols = row.findAll("th")
                for i, col in enumerate(cols):
                 if (col.text.lower() == "youtube"):
                     YouTubeColumns.append(i)
                 if (col.text.lower() == "team 1"):
                     Team1Index = i
                 if (col.text.lower() == "team 2"):
                     Team2Index = i

            #
            matches=[]

            rows = table.find("tbody").findAll("tr")
            for row in rows :
                videos = []
                cols = row.findAll("td")
                if (cols is not None):
                    for yv in YouTubeColumns:
                        if (cols[yv] is not None):
                            if (cols[yv].a is not None):

                                youTubeData = PluginUtils.parse_youtube_url(cols[yv].a['href'])
                                videos.append({'text' : cols[yv].a.text,
                                               'videoId' : youTubeData['videoId'],
                                               'time' : youTubeData['time'] })

                matches.append(LoLEventMatch(cols[0].text, cols[Team1Index].text, cols[Team2Index].text, videos))

            days.append(LoLEventDay(dayId = idx,
                                day=eventTitle,
                                matches = matches,
                                recommended = recommended,
                                imageUrl = imgUrl))
    return days
Пример #7
0
def load_events(sortByStatus, after):
    # The reddit api does things like this:
    # /r/bla.json?limit=pagesize&after=postId
    # Let's build a URL

    urlAppend = '?limit=' + str(PAGE_SIZE)

    if (after is not 'none'):
        urlAppend += '&after=' + after
    
    response = PluginUtils.do_request(LOLEVENTURL + urlAppend)
    if (response is None):
        return None

    events = []

    # Now lets parse results
    decoded_data = json.load(response)
    root = decoded_data['data']
    # after link = 
    afterPost = root['after']

    LoLEvent = namedtuple('LoLEvent', 'title status eventId createdOn imageUrl')

    # For Each Item in Children
    for post in root['children']:
        html = post['data']['selftext_html']
        if (html is not None):
            soup = BeautifulSoup(PluginUtils.unescape(html))

            imgUrl = ''
            isEvent = False
            link = soup.find('a', href='#EVENT_TITLE')
            if (link is not None):
                isEvent = True

            link = soup.find('a', href='#EVENT_PICTURE')
            if (link is not None):
                imgUrl = link.title

        status = 99
        # Using numbers for status so we can easily sort by this
        # link_flair_css_class: "ongoing"
        # link_flair_css_class: "finished"
        # link_flair_css_class: "twitchongoing"
        # link_flair_css_class: "featured"
        # link_flair_css_class: "finishedfeatured"
        # link_flair_css_class: null
        flair_css = post['data']['link_flair_css_class']
        if (flair_css is not None):
            if (flair_css.lower()== FEATURED_STRING):
                status = 0
            if (flair_css.lower()== ACTIVE_STRING):
                status = 1
            if (flair_css.lower()== FINISHED_STRING):
                status = 2
            if (flair_css.lower()== FINISHEDFEATURED_STRING):
                status = 2

        # Some don't have link_flair_css_class but are events
        if (status == 99 and isEvent):
            status = 98


        childEvent = LoLEvent(title = post['data']['title'],
                              status = status,
                              eventId = post['data']['id'],
                              createdOn = datetime.datetime.fromtimestamp(int(post['data']['created'])),
                              imageUrl = imgUrl)

        events.append(childEvent)


    if (sortByStatus):
        # sort
        return afterPost, sorted(events, key=attrgetter('status'))
    else:
        return afterPost, events
Пример #8
0
def parse_game_servlet_response(response_xml):
    soup = BeautifulSoup(response_xml)
    streams = Streams()
    home_path = away_path = home_condensed_path = away_condensed_path = None
    
    #Figure out stream type
    if soup.find('isarchived') and soup.find('isarchived').getText()=='true':
        streams.stream_type = StreamTypes.RTMP
    else:
        streams.tream_type = StreamTypes.LIVE
        
    #Home Streams
    if soup.find('hashomeprogram') and soup.find('hashomeprogram').getText() == 'true':
        if streams.stream_type == StreamTypes.RTMP:
            home_path = soup.find('homeprogram').find('publishpoint').getText()[:-2]
            if soup.find('homecondensed'):
                home_condensed_path = soup.find('homecondensed').find('publishpoint').getText()[:-2]
    
        else:
            home_path = soup.find('homeprogramid').getText()
    
    #Away Streams
    if soup.find('hasawayprogram') and soup.find('hasawayprogram').getText() == 'true':
        if streams.stream_type == StreamTypes.RTMP:
            away_path = soup.find('awayprogram').find('publishpoint').getText()[:-2]
            if soup.find('awaycondensed'):
                home_condensed_path = soup.find('awaycondensed').find('publishpoint').getText()[:-2]
        else:
            away_path = soup.find('awayprogramid').getText()
    
    streams.paths = {
            'home'              :   home_path,
            'home_condensed'    :   home_condensed_path,
            'away'              :   away_path,
            'away_condensed'    :   away_condensed_path }
    return streams