Beispiel #1
0
 def do_clean(self):
     logging.getLogger().debug(
         'addon exiting, deleting objects as part of exit plan...')
     self._xoze_context.do_clean()
     if self._current_addon is not None:
         self._current_addon.do_clean()
         del self._current_addon
         del self._current_addon_id
     if self._service_publisher is not None:
         self._service_publisher.unpublish_services()
         self._service_publisher.do_clean()
         del self._service_publisher
     del self._addon
     del self._addon_id
     del self._addon_ver
     del self._addon_path
     del self._addon_profile_path
     del self._configurations
     del self._xoze_context
     http_client = HttpClient()
     http_client.do_clean()
     del http_client
     cache_manager = CacheManager()
     cache_manager.do_clean()
     del cache_manager
     snap_video = SnapVideo()
     snap_video.do_clean()
     del snap_video
Beispiel #2
0
def __retrieve_tv_shows__(tv_channel_url):
    tv_channel = {}
    tv_channel["running_tvshows"] = []
    tv_channel["finished_tvshows"] = []

    logging.getLogger().debug('TV Channel URL: ' + tv_channel_url)
    tv_shows = tv_channel["running_tvshows"]
    if tv_channel_url is None:
        return tv_shows
    tv_channel_url = BASE_WSITE_URL + tv_channel_url
    logging.getLogger().debug(tv_channel_url)
    contentDiv = BeautifulSoup.SoupStrainer('li', {'class': 'categories'})
    soup = HttpClient().get_beautiful_soup(url=tv_channel_url,
                                           parseOnlyThese=contentDiv)
    #     soup = BeautifulSoup.BeautifulSoup(HttpClient().get_html_content(url=tv_channel_url)).findAll('div', {'id':'forumbits', 'class':'forumbits'})[0]
    for title_tag in soup.findAll('li'):
        aTag = title_tag.findNext('a')
        tv_show_url = str(aTag['href'])
        if tv_show_url[0:4] != "http":
            tv_show_url = BASE_WSITE_URL + '/' + tv_show_url
        tv_show_name = aTag.getText()
        if not re.search('Completed Shows', tv_show_name, re.IGNORECASE):
            tv_shows.append({
                "name": http.unescape(tv_show_name),
                "url": tv_show_url,
                "iconimage": ""
            })
        else:
            tv_shows = tv_channel["finished_tvshows"]
    return tv_channel
Beispiel #3
0
def __retrieve_tv_shows__(tv_channel_url):
    tv_channel = {}
    tv_channel["running_tvshows"] = []
    tv_channel["finished_tvshows"] = []
    
    logging.getLogger().debug('TV Channel URL: ' + tv_channel_url)
    tv_shows = tv_channel["running_tvshows"]
    if tv_channel_url is None:
        return tv_shows
    tv_channel_url = BASE_WSITE_URL + tv_channel_url
    logging.getLogger().debug(tv_channel_url)
    contentDiv = BeautifulSoup.SoupStrainer('li', {'class':'categories'})
    soup = HttpClient().get_beautiful_soup(url=tv_channel_url, parseOnlyThese=contentDiv)
#     soup = BeautifulSoup.BeautifulSoup(HttpClient().get_html_content(url=tv_channel_url)).findAll('div', {'id':'forumbits', 'class':'forumbits'})[0]
    for title_tag in soup.findAll('li'):
        aTag = title_tag.findNext('a')
        tv_show_url = str(aTag['href'])
        if tv_show_url[0:4] != "http":
            tv_show_url = BASE_WSITE_URL + '/' + tv_show_url
        tv_show_name = aTag.getText()
        if not re.search('Completed Shows', tv_show_name, re.IGNORECASE):
            tv_shows.append({"name":http.unescape(tv_show_name), "url":tv_show_url, "iconimage":""})
        else:
            tv_shows = tv_channel["finished_tvshows"]
    return tv_channel
Beispiel #4
0
 def do_clean(self):
     logging.getLogger().debug('addon exiting, deleting objects as part of exit plan...')
     self._xoze_context.do_clean()
     if self._current_addon is not None:
         self._current_addon.do_clean()
         del self._current_addon
         del self._current_addon_id
     if self._service_publisher is not None:
         self._service_publisher.unpublish_services()
         self._service_publisher.do_clean()
         del self._service_publisher
     del self._addon
     del self._addon_id
     del self._addon_ver
     del self._addon_path
     del self._addon_profile_path
     del self._configurations
     del self._xoze_context
     http_client = HttpClient()
     http_client.do_clean()
     del http_client
     cache_manager = CacheManager()
     cache_manager.do_clean()
     del cache_manager
     snap_video = SnapVideo()
     snap_video.do_clean()
     del snap_video
Beispiel #5
0
def load_tv_show_episodes(req_attrib, modelMap):
    logging.getLogger().debug('load tv show episodes...')
    url = req_attrib['tv-show-url']
    tv_show_url = req_attrib['tv-show-url']
    tv_show_name = req_attrib['tv-show-name']
    channel_type = req_attrib['channel-type']
    channel_name = req_attrib['channel-name']
    currentPage = 1

    if req_attrib.has_key('tv-show-page') and req_attrib['tv-show-page'] != '':
        currentPage = int(req_attrib['tv-show-page'])
        if currentPage != 1:
            url = url + 'page/' + req_attrib['tv-show-page'] + '/'
    logging.getLogger().debug('load tv show episodes...' + url)
    contentDiv = BeautifulSoup.SoupStrainer('div', {'id': 'left-div'})
    soup = HttpClient().get_beautiful_soup(url=url + '?tag=video',
                                           parseOnlyThese=contentDiv)
    #     soup = BeautifulSoup.BeautifulSoup(HttpClient().get_html_content(url=url)).findAll('div', {'id':'contentBody'})[0]

    tv_show_episode_items = []

    threads = soup.findAll('h2', {'class': 'titles'})
    tv_show_episode_items.extend(
        __retrieveTVShowEpisodes__(threads, tv_show_name, channel_type,
                                   channel_name))
    logging.getLogger().debug('In DTB: total tv show episodes: %s' %
                              str(len(tv_show_episode_items)))

    pagesDiv = soup.findChild('p', {'class': 'pagination'})
    if pagesDiv is not None:
        pagesInfoTags = pagesDiv.findAllNext('a')
        for pagesInfoTag in pagesInfoTags:
            logging.getLogger().debug(pagesInfoTag)
            pageInfo = re.compile('page/(.+?)/').findall(pagesInfoTag['href'])

            if len(pageInfo) > 0:
                if re.search('Old', pagesInfoTag.getText(), re.IGNORECASE):
                    item = xbmcgui.ListItem(label='<< Older Entries')
                elif re.search('Next', pagesInfoTag.getText(), re.IGNORECASE):
                    item = xbmcgui.ListItem(label='Next Entries >>')
                item.setProperty('tv-show-page', pageInfo[0][0])
                item.setProperty('channel-type', channel_type)
                item.setProperty('channel-name', channel_name)
                item.setProperty('tv-show-name', tv_show_name)
                item.setProperty('tv-show-url', tv_show_url)
                tv_show_episode_items.append(item)
            else:
                item = xbmcgui.ListItem(label='Newest Entries >>')
                item.setProperty('tv-show-page', '1')
                item.setProperty('channel-type', channel_type)
                item.setProperty('channel-name', channel_name)
                item.setProperty('tv-show-name', tv_show_name)
                item.setProperty('tv-show-url', tv_show_url)
                tv_show_episode_items.append(item)

    modelMap['tv_show_episode_items'] = tv_show_episode_items
Beispiel #6
0
def load_tv_show_episodes(req_attrib, modelMap):
    logging.getLogger().debug('load tv show episodes...')
    url = req_attrib['tv-show-url']
    tv_show_url = req_attrib['tv-show-url']
    tv_show_name = req_attrib['tv-show-name']
    channel_type = req_attrib['channel-type']
    channel_name = req_attrib['channel-name']
    currentPage = 1
    
    if req_attrib.has_key('tv-show-page') and req_attrib['tv-show-page'] != '':
        currentPage = int(req_attrib['tv-show-page'])
        if currentPage != 1:
            url = url + 'page/' + req_attrib['tv-show-page'] + '/'
    logging.getLogger().debug('load tv show episodes...' + url)
    contentDiv = BeautifulSoup.SoupStrainer('div', {'id':'left-div'})
    soup = HttpClient().get_beautiful_soup(url=url + '?tag=video', parseOnlyThese=contentDiv)
#     soup = BeautifulSoup.BeautifulSoup(HttpClient().get_html_content(url=url)).findAll('div', {'id':'contentBody'})[0]
    
    tv_show_episode_items = []
    
    threads = soup.findAll('h2', {'class':'titles'})
    tv_show_episode_items.extend(__retrieveTVShowEpisodes__(threads, tv_show_name, channel_type, channel_name))
    logging.getLogger().debug('In DTB: total tv show episodes: %s' % str(len(tv_show_episode_items)))
    
    pagesDiv = soup.findChild('p', {'class':'pagination'})
    if pagesDiv is not None:
        pagesInfoTags = pagesDiv.findAllNext('a')
        for pagesInfoTag in pagesInfoTags:
            logging.getLogger().debug(pagesInfoTag)
            pageInfo = re.compile('page/(.+?)/').findall(pagesInfoTag['href'])
            
            if len(pageInfo) > 0:
                if re.search('Old', pagesInfoTag.getText(), re.IGNORECASE):
                    item = xbmcgui.ListItem(label='<< Older Entries')
                elif re.search('Next', pagesInfoTag.getText(), re.IGNORECASE):
                    item = xbmcgui.ListItem(label='Next Entries >>')
                item.setProperty('tv-show-page', pageInfo[0][0])
                item.setProperty('channel-type', channel_type)
                item.setProperty('channel-name', channel_name)
                item.setProperty('tv-show-name', tv_show_name)
                item.setProperty('tv-show-url', tv_show_url)
                tv_show_episode_items.append(item)
            else:
                item = xbmcgui.ListItem(label='Newest Entries >>')
                item.setProperty('tv-show-page', '1')
                item.setProperty('channel-type', channel_type)
                item.setProperty('channel-name', channel_name)
                item.setProperty('tv-show-name', tv_show_name)
                item.setProperty('tv-show-url', tv_show_url)
                tv_show_episode_items.append(item)
    
    modelMap['tv_show_episode_items'] = tv_show_episode_items
Beispiel #7
0
def __retrieve_tv_shows__(tv_channel_url):
    tv_shows = []
    if tv_channel_url is None:
        return tv_shows
    tv_channel_url = BASE_WSITE_URL + tv_channel_url
    contentDiv = BeautifulSoup.SoupStrainer('div', {'class':'all-tv-shows'})
    soup = HttpClient().get_beautiful_soup(url=tv_channel_url, parseOnlyThese=contentDiv, accept_500_error=True)
    list_item = soup.find('ul')
    for item in list_item.findChildren('li'):
        aTag = item.findChild('a')
        
        tv_show_url = str(aTag['href'])
        if tv_show_url[0:4] != "http":
            tv_show_url = BASE_WSITE_URL + '/' + tv_show_url
        tv_show_name = aTag.getText()
        tv_shows.append({"name":http.unescape(tv_show_name), "url":tv_show_url, "iconimage":""})
    return tv_shows
Beispiel #8
0
def __retrieve_tv_shows__(tv_channel_url):
    tv_shows = []
    if tv_channel_url is None:
        return tv_shows
    tv_channel_url = BASE_WSITE_URL + tv_channel_url
    contentDiv = BeautifulSoup.SoupStrainer("div", {"class": "all-tv-shows"})
    soup = HttpClient().get_beautiful_soup(url=tv_channel_url, parseOnlyThese=contentDiv, accept_500_error=True)
    list_item = soup.find("ul")
    for item in list_item.findChildren("li"):
        aTag = item.findChild("a")

        tv_show_url = str(aTag["href"])
        if tv_show_url[0:4] != "http":
            tv_show_url = BASE_WSITE_URL + "/" + tv_show_url
        tv_show_name = aTag.getText()
        tv_shows.append({"name": http.unescape(tv_show_name), "url": tv_show_url, "iconimage": ""})
    return tv_shows
Beispiel #9
0
def __parseDesiHomeUrl__(video_url):
    video_link = None
    logging.getLogger().debug('video_url = ' + video_url)
    html = HttpClient().get_html_content(url=video_url)
    if re.search('dailymotion.com', html, flags=re.I):
        video_link = 'http://www.dailymotion.com/' + re.compile(
            'dailymotion.com/(.+?)"').findall(html)[0] + '&'
    elif re.search('hostingbulk.com', html, flags=re.I):
        video_link = 'http://hostingbulk.com/' + re.compile(
            'hostingbulk.com/(.+?)"').findall(html)[0] + '&'
    elif re.search('movzap.com', html, flags=re.I):
        video_link = 'http://movzap.com/' + re.compile(
            'movzap.com/(.+?)"').findall(html)[0] + '&'
    return video_link
Beispiel #10
0
def __retrieve_tv_shows__(tv_channel_url):
    tv_shows = []
    if tv_channel_url is None:
        return tv_shows
    tv_channel_url = BASE_WSITE_URL + tv_channel_url
    contentDiv = BeautifulSoup.SoupStrainer('div', {'class': 'all-tv-shows'})
    soup = HttpClient().get_beautiful_soup(url=tv_channel_url,
                                           parseOnlyThese=contentDiv,
                                           accept_500_error=True)
    list_item = soup.find('ul')
    for item in list_item.findChildren('li'):
        aTag = item.findChild('a')

        tv_show_url = str(aTag['href'])
        if tv_show_url[0:4] != "http":
            tv_show_url = BASE_WSITE_URL + '/' + tv_show_url
        tv_show_name = aTag.getText()
        tv_shows.append({
            "name": http.unescape(tv_show_name),
            "url": tv_show_url,
            "iconimage": ""
        })
    return tv_shows
Beispiel #11
0
def _retrieve_video_links_(req_attrib, modelMap):
    
    modelMap['channel-name'] = req_attrib['channel-name']
    modelMap['tv-show-name'] = req_attrib['tv-show-name']
    modelMap['episode-name'] = req_attrib['episode-name']
    
    video_source_id = 1
    video_source_img = None
    video_source_name = None
    video_part_index = 0
    video_playlist_items = []
    ignoreAllLinks = False
    
    list_items = []
    contentDiv = BeautifulSoup.SoupStrainer('div', {'id':'left-div'})
    soup = HttpClient().get_beautiful_soup(url=req_attrib['episode-url'], parseOnlyThese=contentDiv)
#     soup = BeautifulSoup.BeautifulSoup(HttpClient().get_html_content(url=req_attrib['episode-url'])).findAll('blockquote', {'class':re.compile(r'\bpostcontent\b')})[0]
      
    centerTag = soup.findNext('center')
    logging.getLogger().debug(centerTag)
    prevChild = ''
    prevAFont = None
    isHD = 'false'
    videoSource = ''
    for child in soup.findChildren():
        if child.name == 'span':
            if len(video_playlist_items) > 0:
                list_items.append(__preparePlayListItem__(video_source_id, video_source_img, video_source_name, video_playlist_items, modelMap, isHD))
            
            logging.getLogger().debug(videoSource)
            videoSource = child.getText()
            if(re.search('720p', videoSource, re.I)):
                isHD = 'true'
            else:
                isHD = 'false'
            if video_source_img is not None:
                video_source_id = video_source_id + 1
                video_source_img = None
                video_source_name = None
                video_part_index = 0
                video_playlist_items = []
            ignoreAllLinks = False
        elif not ignoreAllLinks and child.name == 'a':
            if (str(child['href']) != 'https://www.facebook.com/iamdesirulez'):       
                video_part_index = video_part_index + 1
                video_link = {}
                video_link['videoTitle'] = 'Source #' + str(video_source_id) + ' | ' + 'Part #' + str(video_part_index) + ' | ' + child.getText()
                video_link['videoLink'] = str(child['href'])
                video_link['videoSource'] = videoSource
                try:
                    try:
                        __prepareVideoLink__(video_link)
                    except Exception, e:
                        logging.getLogger().exception(e)
                        video_hosting_info = SnapVideo().findVideoHostingInfo(video_link['videoLink'])
                        if video_hosting_info is None or video_hosting_info.get_name() == 'UrlResolver by t0mm0':
                            raise
                        video_link['videoSourceImg'] = video_hosting_info.get_icon()
                        video_link['videoSourceName'] = video_hosting_info.get_name()
                    video_playlist_items.append(video_link)
                    video_source_img = video_link['videoSourceImg']
                    video_source_name = video_link['videoSourceName']
                
                    item = xbmcgui.ListItem(label='Source #' + str(video_source_id) + ' | ' + 'Part #' + str(video_part_index) , iconImage=video_source_img, thumbnailImage=video_source_img)
                    item.setProperty('videoLink', video_link['videoLink'])
                    item.setProperty('videoTitle', video_link['videoTitle'])
                    item.setProperty('videoSourceName', video_source_name)
                    item.setProperty('isContinuousPlayItem', 'false')
                    list_items.append(item)
                
                    prevAFont = child.findChild('font')
                except:
                    logging.getLogger().error('Unable to recognize a source = ' + str(video_link['videoLink']))
                    video_source_img = None
                    video_source_name = None
                    video_part_index = 0
                    video_playlist_items = []
                    ignoreAllLinks = True
                    prevAFont = None
Beispiel #12
0
def _retrieve_video_links_(req_attrib, modelMap):

    modelMap['channel-name'] = req_attrib['channel-name']
    modelMap['tv-show-name'] = req_attrib['tv-show-name']
    modelMap['episode-name'] = req_attrib['episode-name']

    video_source_id = 1
    video_source_img = None
    video_source_name = None
    video_part_index = 0
    video_playlist_items = []
    ignoreAllLinks = False

    list_items = []
    contentDiv = BeautifulSoup.SoupStrainer('div', {'id': 'left-div'})
    soup = HttpClient().get_beautiful_soup(url=req_attrib['episode-url'],
                                           parseOnlyThese=contentDiv)
    #     soup = BeautifulSoup.BeautifulSoup(HttpClient().get_html_content(url=req_attrib['episode-url'])).findAll('blockquote', {'class':re.compile(r'\bpostcontent\b')})[0]

    centerTag = soup.findNext('center')
    logging.getLogger().debug(centerTag)
    prevChild = ''
    prevAFont = None
    isHD = 'false'
    videoSource = ''
    for child in soup.findChildren():
        if child.name == 'span':
            if len(video_playlist_items) > 0:
                list_items.append(
                    __preparePlayListItem__(video_source_id, video_source_img,
                                            video_source_name,
                                            video_playlist_items, modelMap,
                                            isHD))

            logging.getLogger().debug(videoSource)
            videoSource = child.getText()
            if (re.search('720p', videoSource, re.I)):
                isHD = 'true'
            else:
                isHD = 'false'
            if video_source_img is not None:
                video_source_id = video_source_id + 1
                video_source_img = None
                video_source_name = None
                video_part_index = 0
                video_playlist_items = []
            ignoreAllLinks = False
        elif not ignoreAllLinks and child.name == 'a':
            if (str(child['href']) != 'https://www.facebook.com/iamdesirulez'):
                video_part_index = video_part_index + 1
                video_link = {}
                video_link['videoTitle'] = 'Source #' + str(
                    video_source_id) + ' | ' + 'Part #' + str(
                        video_part_index) + ' | ' + child.getText()
                video_link['videoLink'] = str(child['href'])
                video_link['videoSource'] = videoSource
                try:
                    try:
                        __prepareVideoLink__(video_link)
                    except Exception, e:
                        logging.getLogger().error(e)
                        video_hosting_info = SnapVideo().findVideoHostingInfo(
                            video_link['videoLink'])
                        if video_hosting_info is None or video_hosting_info.get_name(
                        ) == 'UrlResolver by t0mm0':
                            raise
                        video_link[
                            'videoSourceImg'] = video_hosting_info.get_icon()
                        video_link[
                            'videoSourceName'] = video_hosting_info.get_name()
                    video_playlist_items.append(video_link)
                    video_source_img = video_link['videoSourceImg']
                    video_source_name = video_link['videoSourceName']

                    item = xbmcgui.ListItem(label='Source #' +
                                            str(video_source_id) + ' | ' +
                                            'Part #' + str(video_part_index),
                                            iconImage=video_source_img,
                                            thumbnailImage=video_source_img)
                    item.setProperty('videoLink', video_link['videoLink'])
                    item.setProperty('videoTitle', video_link['videoTitle'])
                    item.setProperty('videoSourceName', video_source_name)
                    item.setProperty('isContinuousPlayItem', 'false')
                    list_items.append(item)

                    prevAFont = child.findChild('font')
                except:
                    logging.getLogger().error(
                        'Unable to recognize a source = ' +
                        str(video_link['videoLink']))
                    video_source_img = None
                    video_source_name = None
                    video_part_index = 0
                    video_playlist_items = []
                    ignoreAllLinks = True
                    prevAFont = None