Exemplo n.º 1
0
def displayChannels(request_obj, response_obj):
    content = BeautifulSoup.SoupStrainer('div', {'class':re.compile(r'\bchannels\b')})
    soup = HttpClient().getBeautifulSoup(url='http://www.watchsuntv.com/play', parseOnlyThese=content)
    channels = soup.findAll('li', {'class':'channel-info'})
    list_items = XBMCInterfaceUtils.callBackDialogProgressBar(getattr(sys.modules[__name__], '__displayChannels__'), channels, 'Preparing channel items', 'Failed to retrieve channel information, please try again later')
    response_obj.extendItemList(list_items)
    response_obj.set_xbmc_sort_method(xbmcplugin.SORT_METHOD_LABEL)
Exemplo n.º 2
0
def retrieveVideoLinks(request_obj, response_obj):
    video_source_id = 1
    video_source_img = None
    video_source_name = None
    video_part_index = 0
    video_playlist_items = []
    ignoreAllLinks = False
    
    content = BeautifulSoup.SoupStrainer('blockquote', {'class':re.compile(r'\bpostcontent\b')})
    soup = HttpClient().getBeautifulSoup(url=request_obj.get_data()['episodeUrl'], parseOnlyThese=content)
    for e in soup.findAll('br'):
        e.extract()
    Logger.logDebug(soup)
    if soup.has_key('div'):
        soup = soup.findChild('div', recursive=False)
    prevChild = ''
    prevAFont = None
    for child in soup.findChildren():
        if (child.name == 'img' or child.name == 'b' or (child.name == 'font' and not child.findChild('a'))):
            if (child.name == 'b' and prevChild == 'a') or (child.name == 'font' and child == prevAFont):
                continue
            else:
                if len(video_playlist_items) > 0:
                    response_obj.addListItem(__preparePlayListItem__(video_source_id, video_source_img, video_source_name, video_playlist_items))
                if video_source_img is not None:
                    video_source_id = video_source_id + 1
                    video_source_img = None
                    video_source_name = None
                    video_part_index = 0
                    video_playlist_items = []
                ignoreAllLinks = False
        elif not ignoreAllLinks and child.name == 'a' and not re.search('multi', str(child['href']), re.IGNORECASE):
            video_part_index = video_part_index + 1
            video_link = {}
            video_link['videoTitle'] = 'Source #' + str(video_source_id) + ' | ' + 'Part #' + str(video_part_index) + ' | ' + child.getText()
            video_link['videoLink'] = str(child['href'])
            try:
                try:
                    __prepareVideoLink__(video_link)
                except Exception, e:
                    Logger.logFatal(e)
                    video_hosting_info = SnapVideo.findVideoHostingInfo(video_link['videoLink'])
                    if video_hosting_info is None or video_hosting_info.get_video_hosting_name() == 'UrlResolver by t0mm0':
                        raise
                    video_link['videoSourceImg'] = video_hosting_info.get_video_hosting_image()
                    video_link['videoSourceName'] = video_hosting_info.get_video_hosting_name()
                video_playlist_items.append(video_link)
                video_source_img = video_link['videoSourceImg']
                video_source_name = video_link['videoSourceName']
                
                item = ListItem()
                item.add_request_data('videoLink', video_link['videoLink'])
                item.add_request_data('videoTitle', video_link['videoTitle'])
                item.set_next_action_name('SnapAndPlayVideo')
                xbmcListItem = xbmcgui.ListItem(label='Source #' + str(video_source_id) + ' | ' + 'Part #' + str(video_part_index) , iconImage=video_source_img, thumbnailImage=video_source_img)
                item.set_xbmc_list_item_obj(xbmcListItem)
                response_obj.addListItem(item)
                prevAFont = child.findChild('font')
            except:
Exemplo n.º 3
0
def __retrieveTVShows__(tvShowsUrl):
    tvShows = []
    if tvShowsUrl is None:
        return tvShows
    tvShowsUrl = BASE_WSITE_URL + tvShowsUrl
    contentDiv = BeautifulSoup.SoupStrainer('div', {'id':'forumbits', 'class':'forumbits'})
    soup = HttpClient().getBeautifulSoup(url=tvShowsUrl, parseOnlyThese=contentDiv)
    for tvShowTitleTag in soup.findAll('h2', {'class':'forumtitle'}):
        aTag = tvShowTitleTag.find('a')
        tvshowUrl = str(aTag['href'])
        if tvshowUrl[0:4] != "http":
            tvshowUrl = BASE_WSITE_URL + '/' + tvshowUrl
        tvshowName = aTag.getText()
        if not re.search('Past Shows', tvshowName, re.IGNORECASE):
            tvShows.append({"name":HttpUtils.unescape(tvshowName), "url":tvshowUrl})
    return tvShows
Exemplo n.º 4
0
def retrieveTVShowEpisodes(request_obj, response_obj):
    contentDiv = BeautifulSoup.SoupStrainer('div', {'id':'content'})
    url = request_obj.get_data()['tvShowUrl']
    channelType = request_obj.get_data()['channelType']
    if request_obj.get_data().has_key('page'):
        url = url + 'page/' + request_obj.get_data()['page']
    soup = HttpClient().getBeautifulSoup(url=url, parseOnlyThese=contentDiv)
    for aTag in soup.findAll('a', {'rel':'bookmark'}):
        episodeName = aTag.getText()
        try:
            time.strptime(episodeName, '%B %d, %Y')
            continue
        except:
            if re.search('Written Episode', episodeName):
                pass
            else:
                item = ListItem()
                item.add_request_data('episodeName', episodeName)
                item.add_request_data('episodeUrl', str(aTag['href']))
                item.set_next_action_name(channelType + '_Episode_VLinks')
                xbmcListItem = xbmcgui.ListItem(label=episodeName)
                item.set_xbmc_list_item_obj(xbmcListItem)
                response_obj.addListItem(item)
            
    pagesDiv = soup.find('div', {'class':'wp-pagenavi'})
    if pagesDiv is not None:
        pagesInfoTag = pagesDiv.find('span', {'class':'pages'}, recursive=False)
        if pagesInfoTag is not None:
            pageInfo = re.compile('Page (.+?) of (.+?) ').findall(pagesInfoTag.getText() + ' ')
            currentPage = int(pageInfo[0][0].replace(',',''))
            totalPages = int(pageInfo[0][1].replace(',',''))
            for page in range(1, totalPages + 1):
                if page == 1 or page == totalPages or page == currentPage - 1 or page == currentPage + 1:
                    if page != currentPage:
                        item = ListItem()
                        item.add_request_data('channelType', channelType)
                        item.add_request_data('tvShowName', request_obj.get_data()['tvShowName'])
                        item.add_request_data('tvShowUrl', request_obj.get_data()['tvShowUrl'])
                        if page != 1:
                            item.add_request_data('page', str(page))
                        pageName = AddonUtils.getBoldString('              ->              Page #' + str(page))
                            
                        item.set_next_action_name('Show_Episodes_Next_Page')
                        xbmcListItem = xbmcgui.ListItem(label=pageName)
                        item.set_xbmc_list_item_obj(xbmcListItem)
                        response_obj.addListItem(item)
Exemplo n.º 5
0
def retrievePakVideoLinks(request_obj, response_obj):
    video_source_id = 0
    video_source_img = None
    video_part_index = 0
    video_playlist_items = []
    
    contentDiv = BeautifulSoup.SoupStrainer('div', {'id':'restricted-content', 'class':'post-content'})
    soup = HttpClient().getBeautifulSoup(url=request_obj.get_data()['episodeUrl'], parseOnlyThese=contentDiv)
    videoFrameTags = soup.findAll('iframe', {'class':re.compile('(youtube|dailymotion)-player')})
    for frameTag in videoFrameTags:
        videoLink = str(frameTag['src'])
        source_img = None
        if re.search('youtube', videoLink):
            source_img = 'http://www.automotivefinancingsystems.com/images/icons/socialmedia_youtube_256x256.png'
        elif re.search('dailymotion', videoLink):
            source_img = 'http://aux.iconpedia.net/uploads/1687271053.png'
            
        if video_source_img is None or video_source_img != source_img:
            if len(video_playlist_items) > 0:
                response_obj.addListItem(__preparePlayListItem__(video_source_id, video_source_img, video_playlist_items))
            video_source_id = video_source_id + 1
            video_source_img = source_img
            video_part_index = 0
            video_playlist_items = []
            
        video_part_index = video_part_index + 1
        video_link = {}
        video_link['videoTitle'] = 'Source #' + str(video_source_id) + ' | ' + 'Part #' + str(video_part_index)
        video_link['videoLink'] = videoLink
        video_playlist_items.append(video_link)
        
        item = ListItem()
        item.add_request_data('videoLink', video_link['videoLink'])
        item.add_request_data('videoTitle', video_link['videoTitle'])
        item.set_next_action_name('SnapAndPlayVideo')
        xbmcListItem = xbmcgui.ListItem(label='Source #' + str(video_source_id) + ' | ' + 'Part #' + str(video_part_index) , iconImage=video_source_img, thumbnailImage=video_source_img)
        item.set_xbmc_list_item_obj(xbmcListItem)
        response_obj.addListItem(item)
            
    if len(video_playlist_items) > 0:
        response_obj.addListItem(__preparePlayListItem__(video_source_id, video_source_img, video_playlist_items))
Exemplo n.º 6
0
def __retrieveChannels__(tvChannels, dtUrl, channelType):
    contentDiv = BeautifulSoup.SoupStrainer('div', {'class':'copy fix'})
    soup = HttpClient().getBeautifulSoup(url=dtUrl, parseOnlyThese=contentDiv)
    for tvChannelTag in soup.findAll('tbody'):
        try:
            tvChannel = {}
            running_tvshows = []
            finished_tvshows = []
            tmp_tvshows_list = None
            firstRow = False
            for trTag in tvChannelTag.findAll('tr', recursive=False):
                if not firstRow:
                    channelImg = str(trTag.find('img')['src'])
                    channelName = re.compile(BASE_WSITE_URL + '/category/(tv-serials|pakistan-tvs)/(.+?)/').findall(str(trTag.find('a')['href']))[0][1]
                    channelName = string.upper(channelName.replace('-', ' '))
                    tvChannels[channelName] = tvChannel
                    tvChannel['iconimage'] = channelImg
                    tvChannel['channelType'] = channelType
                    firstRow = True
                else:
                    divTag = trTag.find('div')
                    if divTag != None:
                        txt = divTag.getText()
                        if re.search('running', txt, flags=re.IGNORECASE):
                            tmp_tvshows_list = running_tvshows
                            tvChannel['running_tvshows'] = running_tvshows
                        elif re.search('finished', txt, flags=re.IGNORECASE):
                            tmp_tvshows_list = finished_tvshows
                            tvChannel['finished_tvshows'] = finished_tvshows
                        else:
                            print 'UNKNOWN TV SHOW CATEGORY'
                    else:
                        for aTag in trTag.findAll('a'):
                            tvshowUrl = str(aTag['href'])
                            tvshowName = aTag.getText()
                            tmp_tvshows_list.append({'name':HttpUtils.unescape(tvshowName), 'url':tvshowUrl})
        except:
            print 'Failed to load a tv channel links.'