예제 #1
0
    def __init__(self, params):
        import re
        from addon import Addon
        from addondict import AddonDict as XBMCDict
        from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment

        a = Addon()
        site = self.__module__
        mode = params['mode']

        home_url = 'http://xtheatre.net/'
        search_url = home_url + '?s='
        false_positives = [
            'http://watchxxxhd.net/watch-full-movies-hd/',
            'http://watchxxxhd.net', 'http://watchxxxhd.net/category/movies/',
            'http://watchxxxhd.net/category/ategorized222/',
            'http://watchxxxhd.net/watch-full-movies-hd/'
        ]

        if mode == 'main':
            item_list = [{
                'site': site,
                'mode': 'list',
                'title': a.language(30006),
                'content': '',
                'url': home_url + '?filtre=date&cat=0',
                'cover_url': a.image('all.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'categories',
                'title': a.language(30005),
                'content': '',
                'url': home_url + 'categories/',
                'cover_url': a.image('categories.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30004),
                'content': 'search',
                'url': search_url,
                'cover_url': a.image('search.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }]
            item_list.extend(a.favs_hist_menu(site))
            item_list.extend(a.extended_menu())
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'categories':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'ul', {'class': 'listing-cat'}))
            item_list = []
            if soup:
                for item in soup.findAll('li'):
                    if item:
                        if item.a.get('href') not in false_positives:
                            try:
                                vidcount = item.findAll(
                                    'span',
                                    {'class': 'nb_cat border-radius-5'
                                     })[0].string.encode('UTF-8')
                                vidcount = re.sub('\svideo[s]*', '', vidcount)
                            except:
                                vidcount = '0'
                            if vidcount and vidcount != '0':
                                img = item.find('img')
                                if img:
                                    try:
                                        img = img.get('data-lazy-src')
                                    except:
                                        try:
                                            img = img.get('src')
                                        except:
                                            img = ''
                                if not img:
                                    img = ''
                                title = item.a.get('title').encode(
                                    'UTF-8') + ' (%s)' % vidcount
                                item_list.extend([{
                                    'site':
                                    site,
                                    'mode':
                                    'list',
                                    'url':
                                    item.a.get('href'),
                                    'content':
                                    '',
                                    'title':
                                    title,
                                    'cover_url':
                                    a.image(img, image),
                                    'backdrop_url':
                                    a.art(),
                                    'type':
                                    3
                                }])

            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'list':
            if params.get('content', '') == 'search':
                item = a.search_input()
                if item:
                    params['url'] = search_url + item
                else:
                    exit(1)
            elif params.get('content', '') == 'goto':
                last_item = re.search('/page/([0-9]+)/', params['url'])
                if last_item:
                    last_item = int(last_item.group(1))
                else:
                    last_item = 10000
                item = a.page_input(last_item)
                if item:
                    params['url'] = re.sub('/page/[0-9]+/',
                                           '/page/' + str(item) + '/',
                                           params['url'])
                else:
                    exit(1)
            html = a.get_page(params['url'])
            soup = BeautifulSoup(
                html,
                parseOnlyThese=SoupStrainer(
                    'ul', {'class': 'listing-videos listing-extract'}))
            item_list = []
            params['mode'] = 'play'
            params['content'] = 'movies'
            params['type'] = 0
            params['context'] = 0
            params['duration'] = '7200'
            if soup:
                xbmcdict = XBMCDict(0).update(params)
                for item in soup.findAll(
                        'li', {'class': 'border-radius-5 box-shadow'}):
                    if item:
                        if item.a.get('href') not in false_positives:
                            _dict = xbmcdict.copy()
                            _dict['url'] = item.a.get('href')
                            _dict['title'] = item.a.get('title').encode(
                                'UTF-8')
                            _dict['tvshowtitle'] = _dict['title']
                            _dict['originaltitle'] = _dict['title']
                            img = item.find('img')
                            if img:
                                try:
                                    img = img.get('data-lazy-src')
                                except:
                                    try:
                                        img = img.get('src')
                                    except:
                                        img = ''
                            if not img:
                                img = ''
                            _dict['cover_url'] = a.image(img)
                            _dict['thumb_url'] = _dict['cover_url']
                            _dict['poster'] = _dict['cover_url']
                            _dict['sub_site'] = site
                            plot = item.find('div', {'class': 'right'})
                            if plot:
                                plot = plot.p.contents[0].encode('utf-8')
                                _dict['plot'] = plot
                                _dict['plotoutline'] = plot
                            item_list.extend([_dict])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div', {'class': 'pagination'}))
            last_item = False
            if soup:
                for item in soup.findAll('a'):
                    if (item.string.encode('UTF-8')
                            == 'Last »') or (item.get('class') == 'last'):
                        last_item = item.get('href')
                        break
                if last_item is False:
                    for last_item in soup.findAll('a', {'class': 'inactive'}):
                        pass
                    if last_item: last_item = last_item.get('href')
                item = soup.find('span', {'class': 'current'})
                if item:
                    if item.parent:
                        item = item.parent
                        if item.previousSibling:
                            if item.previousSibling.find('a'):
                                item_list.extend([{
                                    'site':
                                    site,
                                    'mode':
                                    'list',
                                    'url':
                                    item.previousSibling.a.get('href'),
                                    'content':
                                    params['content'],
                                    'title':
                                    a.language(30017, True),
                                    'cover_url':
                                    a.image('previous.png', image),
                                    'backdrop_url':
                                    a.art(),
                                    'type':
                                    3
                                }])
                        if item.nextSibling:
                            if item.nextSibling.find('a'):
                                item_list.extend([{
                                    'site':
                                    site,
                                    'mode':
                                    'list',
                                    'url':
                                    item.nextSibling.a.get('href'),
                                    'content':
                                    params['content'],
                                    'title':
                                    a.language(30018, True),
                                    'cover_url':
                                    a.image('next.png', image),
                                    'backdrop_url':
                                    a.art(),
                                    'type':
                                    3
                                }])
            if last_item:
                item_list.extend([{
                    'site': site,
                    'mode': 'list',
                    'url': last_item,
                    'content': 'goto',
                    'title': a.language(30019, True),
                    'cover_url': a.image('goto.png', image),
                    'backdrop_url': a.art(),
                    'type': 3
                }])

            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'play':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div', {'class': 'video-embed'}))
            item_list = []
            if soup:
                for script in soup.findAll(re.compile('s_*c_*r_*i_*p_*t')):
                    item = ''
                    if script.get('src'):
                        if 'http://videomega.tv/validatehash.php' in script[
                                'src']:
                            item = script['src']
                        elif 'ref=' in script.get('src'):
                            temp = re.search('.*ref=[\'"](.+?)[\'"]',
                                             script.get('src'))
                            if temp:
                                item = 'http://videomega.tv/iframe.php?ref=' + temp.group(
                                    1)
                        xbmcdict = XBMCDict(0).update(params)
                        if item:
                            _dict = xbmcdict.copy()
                            _dict['url'] = item
                            item_list.extend([_dict])
                if soup.find('iframe', src=True):
                    item = ''
                    for iframe in soup.findAll('iframe', src=True):
                        if iframe.get('data-lazy-src'):
                            item = iframe.get('data-lazy-src')
                            r = re.search('.+old=(.+)$', item)
                            if r:
                                item = r.group(1)
                        else:
                            item = iframe.get('src').replace('\\', '')
                        xbmcdict = XBMCDict(0).update(params)
                        if item:
                            _dict = xbmcdict.copy()
                            _dict['url'] = item
                            item_list.extend([_dict])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div', {'id': 'video-infos'}))
            if soup:
                item = ''
                for p in soup.findAll('p'):
                    if p.iframe:
                        item = p.iframe.get('src')
                        xbmcdict = XBMCDict(0).update(params)
                        if item:
                            _dict = xbmcdict.copy()
                            _dict['url'] = item
                            item_list.extend([_dict])
            if item_list:
                from playback import Playback
                Playback().choose_sources(item_list)
            else:
                a.alert(a.language(30904, True), sound=False)
예제 #2
0
    def __init__(self, params):
        import re
        from addon import Addon
        from addondict import AddonDict as XBMCDict
        from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment

        a = Addon()
        site = self.__module__
        mode = params['mode']

        home_url = 'http://pornhardx.com/'
        movies_url = home_url + 'category/full-movie/'
        scenes_url = home_url + 'video/'
        search_url = home_url + '?s='
        false_positives = [
            'http://pornhardx.com/video',
            'http://pornhardx.com/video/?order=viewed',
            'http://pornhardx.com/video/?order=liked', 'http://pornhardx.com/'
        ]

        if mode == 'main':
            item_list = []
            item_list.extend([{
                'site': site,
                'mode': 'list',
                'title': a.language(30006),
                'content': '',
                'url': scenes_url,
                'cover_url': a.image('all.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }])
            item_list.extend([{
                'site': site,
                'mode': 'list',
                'title': a.language(30003),
                'content': '',
                'url': home_url,
                'cover_url': a.image('recent.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }])
            item_list.extend([{
                'site': site,
                'mode': 'categories',
                'title': a.language(30005),
                'content': '',
                'url': scenes_url,
                'cover_url': a.image('categories.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }])
            item_list.extend([{
                'site': site,
                'mode': 'list',
                'title': a.language(30004),
                'content': 'search',
                'url': search_url,
                'cover_url': a.image('search.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }])
            item_list.extend(a.favs_hist_menu(site))
            item_list.extend(a.extended_menu())
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'categories':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div', {'id': 'navigation-wrapper'}))
            item_list = []
            if soup:
                for item in soup.findAll('a', {'href': True}):
                    if item:
                        if item.get('href') not in false_positives:
                            if 'full-movie' in params['url']:
                                if movies_url != item.get(
                                        'href') and 'full-movie' in item.get(
                                            'href'):
                                    item_list.extend([{
                                        'site':
                                        site,
                                        'mode':
                                        'list',
                                        'url':
                                        item.get('href'),
                                        'content':
                                        '',
                                        'title':
                                        item.contents[0].encode('UTF-8'),
                                        'cover_url':
                                        a.image(image, image),
                                        'backdrop_url':
                                        a.art(),
                                        'type':
                                        3
                                    }])
                            elif 'full-movie' not in item.get('href'):
                                item_list.extend([{
                                    'site':
                                    site,
                                    'mode':
                                    'list',
                                    'url':
                                    item.get('href'),
                                    'content':
                                    '',
                                    'title':
                                    item.contents[0].encode('UTF-8'),
                                    'cover_url':
                                    a.image(image, image),
                                    'backdrop_url':
                                    a.art(),
                                    'type':
                                    3
                                }])
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'list':
            if params.get('content', '') == 'search':
                item = a.search_input()
                if item:
                    params['url'] = search_url + item
                else:
                    exit(1)
            elif params.get('content', '') == 'goto':
                last_item = re.search('/page/([0-9]+)/', params['url'])
                if last_item:
                    last_item = int(last_item.group(1))
                else:
                    last_item = 10000
                item = a.page_input(last_item)
                if item:
                    params['url'] = re.sub('/page/[0-9]+/',
                                           '/page/' + str(item) + '/',
                                           params['url'])
                else:
                    exit(1)
            html = a.get_page(params['url'])
            soup = BeautifulSoup(
                html,
                parseOnlyThese=SoupStrainer(
                    'div',
                    {'class': re.compile('col-sm-8(?:\s*main-content)*')}))
            item_list = []
            params['mode'] = 'play'
            params['content'] = 'movies'
            params['type'] = 0
            params['context'] = 0
            params['duration'] = '7200'
            if soup:
                xbmcdict = XBMCDict(0).update(params)
                for item in soup.findAll(
                        'div',
                    {
                        'class':
                        re.compile(
                            '.*(?:col-xs-6 item|post type-post status-publish).*'
                        )
                    }):
                    if item:
                        if item.a.get('href') not in false_positives:
                            _dict = xbmcdict.copy()
                            if 'full-movie' not in params['url']:
                                _dict['duration'] = '1500'
                                _dict['content'] = 'episodes'
                            if item.h3:
                                _dict['url'] = item.h3.a.get('href')
                                if item.h3.a.contents:
                                    _dict['title'] = item.h3.a.contents[
                                        0].encode('UTF-8')
                                else:
                                    _dict['title'] = 'Untitled'
                            elif item.h2:
                                _dict['url'] = item.h2.a.get('href')
                                if item.h2.a.contents:
                                    _dict['title'] = item.h2.a.contents[
                                        0].encode('UTF-8')
                                else:
                                    _dict['title'] = 'Untitled'
                            _dict['tvshowtitle'] = _dict['title']
                            _dict['originaltitle'] = _dict['title']
                            _dict['cover_url'] = a.image(item.img.get('src'))
                            _dict['thumb_url'] = _dict['cover_url']
                            _dict['poster'] = _dict['cover_url']
                            _dict['sub_site'] = site

                            item_list.extend([_dict])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'ul', {'class': 'pagination'}))
            if soup.li:
                item = soup.find('a', {'class': 'prev page-numbers'})
                if item:
                    item_list.extend([{
                        'site': site,
                        'mode': 'list',
                        'url': item.get('href'),
                        'content': params['content'],
                        'title': a.language(30017, True),
                        'cover_url': a.image(image, image),
                        'backdrop_url': a.art(),
                        'type': 3
                    }])
                item = soup.find('a', {'class': 'next page-numbers'})
                if item:
                    item_list.extend([{
                        'site': site,
                        'mode': 'list',
                        'url': item.get('href'),
                        'content': params['content'],
                        'title': a.language(30018, True),
                        'cover_url': a.image(image, image),
                        'backdrop_url': a.art(),
                        'type': 3
                    }])
                    if len(soup.findAll('a')) > 2:
                        last_item = soup.find('a', {
                            'class': 'next page-numbers'
                        }).parent.previousSibling.a.get('href')
                        item_list.extend([{
                            'site': site,
                            'mode': 'list',
                            'url': last_item,
                            'content': 'goto',
                            'title': a.language(30019, True),
                            'cover_url': a.image(image, image),
                            'backdrop_url': a.art(),
                            'type': 3
                        }])
                else:
                    item = soup.find('span', {'class': 'page-numbers current'})
                    if item:
                        if len(soup.findAll('a')) > 2:
                            last_item = soup.find(
                                'span', {
                                    'class': 'page-numbers current'
                                }).parent.previousSibling.a.get('href')
                            item_list.extend([{
                                'site':
                                site,
                                'mode':
                                'list',
                                'url':
                                last_item,
                                'content':
                                'goto',
                                'title':
                                a.language(30019, True),
                                'cover_url':
                                a.image('goto.png', image),
                                'backdrop_url':
                                a.art(),
                                'type':
                                3
                            }])
            else:
                soup = BeautifulSoup(html,
                                     parseOnlyThese=SoupStrainer(
                                         'ul', {'class': 'pager'}))
                item = soup.find('li', {'class': 'previous'})
                if item:
                    item_list.extend([{
                        'site':
                        site,
                        'mode':
                        'list',
                        'url':
                        item.previousSibling.get('href'),
                        'content':
                        params['content'],
                        'title':
                        a.language(30017, True),
                        'cover_url':
                        a.image('previous.png', image),
                        'backdrop_url':
                        a.art(),
                        'type':
                        3
                    }])
                item = soup.find('li', {'class': 'next'})
                if item:
                    item_list.extend([{
                        'site': site,
                        'mode': 'list',
                        'url': item.previousSibling.get('href'),
                        'content': params['content'],
                        'title': a.language(30018, True),
                        'cover_url': a.image('next.png', image),
                        'backdrop_url': a.art(),
                        'type': 3
                    }])
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'play':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer('body'))
            item = ''
            item_list = []
            if soup:
                for item in soup.findAll('param', {'name': 'FlashVars'}):
                    item = item.get('value')
                    item = re.search('.*?proxy\.link=(.+?)&(?:proxy|skin).*?',
                                     item)
                    if item:
                        if item not in item_list:
                            item = item.group(1)
                        else:
                            item = ''
                    else:
                        item = ''
                    xbmcdict = XBMCDict(0).update(params)
                    if item:
                        _dict = xbmcdict.copy()
                        _dict['url'] = item
                        item_list.extend([_dict])
                item = ''
                for item in soup.findAll('video'):
                    for source in soup.findAll('source'):
                        src = source.get('src')
                        if src:
                            xbmcdict = XBMCDict(0).update(params)
                            if item and ('..' not in src):
                                _dict = xbmcdict.copy()
                                try:
                                    _dict['src_title'] = source.get(
                                        'data-res') + 'p'
                                except:
                                    pass
                                _dict['url'] = src
                                item_list.extend([_dict])
                    try:
                        src = item.get('src')
                        if src:
                            xbmcdict = XBMCDict(0).update(params)
                            if item and ('..' not in src):
                                _dict = xbmcdict.copy()
                                try:
                                    _dict['src_title'] = source.get(
                                        'data-res') + 'p'
                                except:
                                    pass
                                _dict['url'] = src
                                item_list.extend([_dict])
                    except:
                        pass
                for script in soup.findAll('script'):
                    item = ''
                    if script.get('src'):
                        if 'http://videomega.tv/validatehash.php' in script[
                                'src']:
                            item = script['src']
                        elif 'ref=' in script.get('src'):
                            temp = re.search('.*ref=[\'"](.+?)[\'"]',
                                             script.get('src'))
                            if temp:
                                item = 'http://videomega.tv/iframe.php?ref=' + temp.group(
                                    1)
                        xbmcdict = XBMCDict(0).update(params)
                        if item:
                            _dict = xbmcdict.copy()
                            _dict['url'] = item
                            item_list.extend([_dict])
                for iframe in soup.findAll('iframe'):
                    item = ''
                    if iframe.get('src'):
                        if 'http://videomega.tv/validatehash.php' in iframe[
                                'src']:
                            item = iframe['src']
                        elif 'ref=' in iframe.get('src'):
                            temp = re.search('.*ref=[\'"](.+?)[\'"]',
                                             iframe.get('src'))
                            if temp:
                                item = 'http://videomega.tv/iframe.php?ref=' + temp.group(
                                    1)
                        else:
                            item = iframe.get('src')
                        xbmcdict = XBMCDict(0).update(params)
                        if item:
                            _dict = xbmcdict.copy()
                            _dict['url'] = item
                            item_list.extend([_dict])

            if item_list:
                from playback import Playback
                Playback().choose_sources(item_list)
            else:
                a.alert(a.language(30904, True), sound=False)
예제 #3
0
    def __init__(self, params):
        import re
        from addon import Addon
        from addondict import AddonDict as XBMCDict
        from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment

        a = Addon()
        site = self.__module__
        mode = params['mode']

        home_url = 'http://www.freeomovie.com/'
        movies_url = home_url + 'category/full-movie/'
        scenes_url = home_url + 'category/clips/'
        search_url = home_url + '/?s='
        false_positives = [
            'http://www.freeomovie.com/category/full-movie/',
            'http://www.freeomovie.com/category/clips/'
        ]

        if mode == 'main':
            item_list = [{
                'site': site,
                'mode': 'list',
                'title': a.language(30006),
                'content': '',
                'url': home_url,
                'cover_url': a.image('all.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30001),
                'content': '',
                'url': movies_url,
                'cover_url': a.image('movies.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30002),
                'content': '',
                'url': scenes_url,
                'cover_url': a.image('scenes.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'categories',
                'title': a.language(30005),
                'content': '',
                'url': home_url,
                'cover_url': a.image('categories.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30004),
                'content': 'search',
                'url': search_url,
                'cover_url': a.image('search.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }]
            item_list.extend(a.favs_hist_menu(site))
            item_list.extend(a.extended_menu())
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'categories':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div',
                                     {'class': 'multi-column-taxonomy-list'}))
            item_list = []
            if soup:
                for item in soup.findAll('a'):
                    if item:
                        if item.get('href') not in false_positives:
                            item_list.extend([{
                                'site':
                                site,
                                'mode':
                                'list',
                                'url':
                                item.get('href'),
                                'content':
                                '',
                                'title':
                                item.string.encode('UTF-8'),
                                'cover_url':
                                a.image(image, image),
                                'backdrop_url':
                                a.art(),
                                'type':
                                3
                            }])

            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'list':
            if params.get('content', '') == 'search':
                item = a.search_input()
                if item:
                    params['url'] = search_url + item
                else:
                    exit(1)
            elif params.get('content', '') == 'goto':
                last_item = re.search('/page/([0-9]+)/', params['url'])
                if last_item:
                    last_item = int(last_item.group(1))
                else:
                    last_item = 10000
                item = a.page_input(last_item)
                if item:
                    params['url'] = re.sub('/page/[0-9]+/',
                                           '/page/' + str(item) + '/',
                                           params['url'])
                else:
                    exit(1)
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div', {'id': 'content'}))
            item_list = []
            params['mode'] = 'play'
            params['content'] = 'movies'
            params['type'] = 0
            params['context'] = 0
            params['duration'] = '7200'
            if soup:
                xbmcdict = XBMCDict(0).update(params)
                for item in soup.findAll('div', {'class': 'postbox'}):
                    if item:
                        if item.h2.a.get('href') not in false_positives:
                            _dict = xbmcdict.copy()
                            if scenes_url in params['url']:
                                _dict['duration'] = '1500'
                                _dict['content'] = 'episodes'
                            _dict['url'] = item.h2.a.get('href')
                            _dict['title'] = item.h2.a.get('title').encode(
                                'UTF-8')
                            _dict['tvshowtitle'] = _dict['title']
                            _dict['originaltitle'] = _dict['title']
                            _dict['cover_url'] = a.image(item.img.get('src'))
                            _dict['thumb_url'] = _dict['cover_url']
                            _dict['poster'] = _dict['cover_url']
                            _dict['sub_site'] = site
                            item_list.extend([_dict])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div', {'class': 'wp-pagenavi'}))
            last_item = False
            if soup:
                for item in soup.findAll('a', href=True):
                    if item:
                        if item.get('class') == 'previouspostslink':
                            item_list.extend([{
                                'site':
                                site,
                                'mode':
                                'list',
                                'url':
                                item.get('href'),
                                'content':
                                params['content'],
                                'title':
                                a.language(30017, True),
                                'cover_url':
                                a.image('previous.png', image),
                                'backdrop_url':
                                a.art(),
                                'type':
                                3
                            }])
                        if item.get('class') == 'nextpostslink':
                            item_list.extend([{
                                'site':
                                site,
                                'mode':
                                'list',
                                'url':
                                item.get('href'),
                                'content':
                                params['content'],
                                'title':
                                a.language(30018, True),
                                'cover_url':
                                a.image('next.png', image),
                                'backdrop_url':
                                a.art(),
                                'type':
                                3
                            }])
                        if item.get('class') == 'last':
                            last_item = item.get('href')
                if not last_item:
                    try:
                        if not soup.find('a', {'class': 'nextpostslink'}):
                            last_item = soup.findAll('a',
                                                     href=True)[-1].get('href')
                        else:
                            last_item = soup.findAll('a',
                                                     href=True)[-2].get('href')
                    except:
                        pass
                if last_item:
                    item_list.extend([{
                        'site': site,
                        'mode': 'list',
                        'url': last_item,
                        'content': 'goto',
                        'title': a.language(30019, True),
                        'cover_url': a.image('goto.png', image),
                        'backdrop_url': a.art(),
                        'type': 3
                    }])

            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'play':
            html = a.get_page(params['url'])
            item_list = []
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div', {'class': 'videosection'}))
            if soup:
                xbmcdict = XBMCDict(0).update(params)
                pages = soup.findAll('li', {'class': re.compile('pg.')})
                if pages:
                    old_li = pages[0].get('class')
                    _dict = xbmcdict.copy()
                    _dict['multi-part'] = True
                    parts = []
                    for li in pages:
                        if old_li != li.get('class'):
                            _dict['parts'] = parts
                            item_list.extend([_dict])
                            _dict = xbmcdict.copy()
                            _dict['multi-part'] = True
                            old_li = li.get('class')
                            parts = []
                        url = re.search('.+myurl=(.+)', li.a.get('href'),
                                        re.IGNORECASE)
                        if url:
                            url = url.group(1)
                            parts.extend([url])
                    if parts:
                        _dict['parts'] = parts
                        item_list.extend([_dict])
                alink = soup.find('a', {'target': '_blank'})
                if alink:
                    alink = alink.get('href')
                    if 'main.exoclick.com' not in alink:
                        _dict = xbmcdict.copy()
                        _dict['url'] = alink
                        item_list.extend([_dict])
                iframes = soup.findAll('iframe', {'src': True})
                if iframes:
                    for iframe in iframes:
                        iframe = iframe.get('src')
                        if 'main.exoclick.com' not in iframe:
                            _dict = xbmcdict.copy()
                            _dict['url'] = iframe
                            item_list.extend([_dict])
                if not item_list:
                    soup = BeautifulSoup(html,
                                         parseOnlyThese=SoupStrainer(
                                             'ul', {'id': 'countrytabs'}))
                    if soup:
                        xbmcdict = XBMCDict(0).update(params)
                        for index, items in enumerate(
                                soup.findAll('a', href=True)):
                            item = ''
                            if not items.get('id') == 'jpg':
                                item = items.get('href')
                                item = re.search('.*myURL\[\]=(.+)$', item,
                                                 re.DOTALL)
                                if item:
                                    item = re.sub('&tab=[0-9]+', '',
                                                  item.group(1))
                                if item:
                                    _dict = xbmcdict.copy()
                                    _dict['url'] = item
                                    _dict['count'] = index
                                    item_list.extend([_dict])
            if item_list:
                from playback import Playback
                Playback().choose_sources(item_list)
            else:
                a.alert(a.language(30904, True), sound=False)
예제 #4
0
    def __init__(self, params):
        import re
        from addon import Addon
        from addondict import AddonDict as XBMCDict
        from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment

        a = Addon()
        site = self.__module__
        mode = params['mode']

        base_url = 'http://yespornplease.com'
        home_url = base_url + '/index.php'
        popular_url = base_url + '/index.php?p=1&m=today'
        search_url = base_url + '/search.php?q='
        false_positives = ['']

        if mode == 'main':
            item_list = [{
                'site': site,
                'mode': 'list',
                'title': a.language(30006),
                'content': '',
                'url': home_url,
                'cover_url': a.image('all.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30016),
                'content': '',
                'url': popular_url,
                'cover_url': a.image('popular.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'categories',
                'title': a.language(30005),
                'content': '',
                'url': home_url,
                'cover_url': a.image('categories.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30004),
                'content': 'search',
                'url': search_url,
                'cover_url': a.image('search.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }]
            item_list.extend(a.favs_hist_menu(site))
            item_list.extend(a.extended_menu())
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'categories':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div', {'id': 'categories'}))
            item_list = []
            if soup:
                for item in soup.findAll('a'):
                    if item:
                        item_list.extend([{
                            'site':
                            site,
                            'mode':
                            'list',
                            'url':
                            item.get('href').replace(' ', '+'),
                            'content':
                            '',
                            'title':
                            item.string.encode('UTF-8'),
                            'cover_url':
                            a.image(image, image),
                            'backdrop_url':
                            a.art(),
                            'type':
                            3
                        }])
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'list':
            if params.get('content', '') == 'search':
                item = a.search_input()
                if item:
                    params['url'] = search_url + item.replace(' ', '+')
                else:
                    exit(1)
            elif params.get('content', '') == 'goto':
                last_item = re.search('p=([0-9]+)', params['url'])
                if last_item:
                    last_item = int(last_item.group(1))
                else:
                    last_item = 10000
                item = a.page_input(last_item)
                if item:
                    params['url'] = re.sub('p=[0-9]+', 'p=' + str(item),
                                           params['url']).replace(' ', '+')
                else:
                    exit(1)
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div', {'id': 'videos'}))
            item_list = []
            params['mode'] = 'play'
            params['content'] = 'movies'
            params['type'] = 0
            params['context'] = 0
            params['duration'] = '7200'
            if soup:
                xbmcdict = XBMCDict(0).update(params)
                for item in soup.findAll('div', {'class': 'video-preview'}):
                    if item:
                        _dict = xbmcdict.copy()
                        temp = item.find('div', {'class': 'jcarousel'}).a
                        if temp:
                            temp = temp.get('href')
                            if not temp.startswith('http://'):
                                temp = base_url + temp
                            _dict['url'] = temp
                            _dict['title'] = item.find('div', {
                                'class': 'preview-title'
                            }).get('title').encode('UTF-8')
                            _dict['tvshowtitle'] = _dict['title']
                            _dict['originaltitle'] = _dict['title']
                            temp = item.find('div', {
                                'class': 'jcarousel'
                            }).img.get('src')
                            if temp.startswith('//'): temp = 'http:' + temp
                            _dict['cover_url'] = a.image(temp)
                            _dict['thumb_url'] = _dict['cover_url']
                            _dict['poster'] = _dict['cover_url']
                            temp = item.find('div', {
                                'class': 'preview-info-box length'
                            }).b.string
                            if temp:
                                temp = re.search('([0-9]+):([0-9]+):([0-9]+)',
                                                 temp)
                                _dict['duration'] = str(
                                    (int(temp.group(1)) * 60 * 60) +
                                    (int(temp.group(2)) * 60) +
                                    int(temp.group(3)))
                            _dict['sub_site'] = site

                            item_list.extend([_dict])

                soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer('body'))
                if soup.find('a', {'id': 'prev-page'}):
                    item = soup.find('a', {
                        'id': 'prev-page'
                    }).get('href').replace(' ', '+')
                    if not item.startswith('http://'): item = base_url + item
                    if 'index.php' in params['url']:
                        item = item.replace('search.php', 'index.php')
                    item_list.extend([{
                        'site':
                        site,
                        'mode':
                        'list',
                        'url':
                        item,
                        'content':
                        params['content'],
                        'title':
                        a.language(30017, True),
                        'cover_url':
                        a.image('previous.png', image),
                        'backdrop_url':
                        a.art(),
                        'type':
                        3
                    }])
                if soup.find('a', {'id': 'next-page'}):
                    item = soup.find('a', {
                        'id': 'next-page'
                    }).get('href').replace(' ', '+')
                    if 'index.php' in params['url']:
                        item = item.replace('search.php', 'index.php')
                    if not item.startswith('http://'): item = base_url + item
                    item_list.extend([{
                        'site': site,
                        'mode': 'list',
                        'url': item,
                        'content': params['content'],
                        'title': a.language(30018, True),
                        'cover_url': a.image('next.png', image),
                        'backdrop_url': a.art(),
                        'type': 3
                    }])

                soup = BeautifulSoup(html,
                                     parseOnlyThese=SoupStrainer(
                                         'div', {'id': 'pagination'}))
                last_item = False
                if soup:
                    for item in reversed(soup.findAll('a')):
                        last_item = item.get('href')
                        if not last_item.startswith('http://'):
                            last_item = base_url + last_item
                        break
                if last_item:
                    item_list.extend([{
                        'site': site,
                        'mode': 'list',
                        'url': last_item,
                        'content': 'goto',
                        'title': a.language(30019, True),
                        'cover_url': a.image('goto.png', image),
                        'backdrop_url': a.art(),
                        'type': 3
                    }])

            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'play':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'object', {'id': 'videoContainer'}))
            item_list = []
            if soup:
                item = soup.find('param', {'name': 'flashvars'})
                item = re.search('.*?video_url=(.+?)&.*?', str(item))
                if item: item = item.group(1)
                xbmcdict = XBMCDict(0).update(params)
                if item:
                    _dict = xbmcdict.copy()
                    _dict['url'] = item
                    item_list.extend([_dict])
                else:
                    a.alert(a.language(30904, True), sound=False)
            if item_list:
                from playback import Playback
                Playback().choose_sources(item_list)
            else:
                a.alert(a.language(30904, True), sound=False)
예제 #5
0
    def __init__(self, params):
        import re
        from addon import Addon
        from addondict import AddonDict as XBMCDict
        from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment

        a = Addon()
        site = self.__module__
        mode = params['mode']

        home_url = 'http://qwertty.net'
        search_url = home_url + '/index.php?do=search&subaction=search&full_search=0&search_start=0&result_from=1&story='
        false_positives = ['']

        if mode == 'main':
            item_list = [{'site': site, 'mode': 'list', 'title': a.language(30006), 'content': '',
                          'url': home_url, 'cover_url': a.image('all.png', image), 'backdrop_url': a.art(), 'type': 3},
                         {'site': site, 'mode': 'categories', 'title': a.language(30005), 'content': '',
                          'url': home_url, 'cover_url': a.image('categories.png', image), 'backdrop_url': a.art(),
                          'type': 3},
                         {'site': site, 'mode': 'list', 'title': a.language(30004), 'content': 'search',
                          'url': search_url, 'cover_url': a.image('search.png', image), 'backdrop_url': a.art(),
                          'type': 3}]
            item_list.extend(a.favs_hist_menu(site))
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'categories':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer('div', {'class': 'navi-wrap'}))
            item_list = []
            if soup:
                for item in soup.findAll('a'):
                    if item: item_list.extend([{'site': site, 'mode': 'list', 'url': home_url + item.get('href'),
                                                'content': '', 'title': item.string.encode('UTF-8'),
                                                'cover_url': a.image(image, image), 'backdrop_url': a.art(),
                                                'type': 3}])
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'list':
            if params.get('content', '') == 'search':
                item = a.search_input()
                if item:
                    params['url'] = search_url + item
                else:
                    exit(1)
            elif params.get('content', '') == 'goto':
                if 'do=search' in params['url']:
                    last_item = re.search('search_start=([0-9]+)', params['url'])
                else:
                    last_item = re.search('/page/([0-9]+)/', params['url'])
                if last_item:
                    last_item = int(last_item.group(1))
                else:
                    last_item = 10000
                item = a.page_input(last_item)
                if item:
                    if 'do=search' in params['url']:
                        page = re.sub(r'(search_start=)([0-9]+)', '\g<01>' + str(item), params['url'])
                        params['url'] = re.sub(r'(result_from=)([0-9]+)', '\g<01>' + str(int(str(item)) * 10 + 1), page)
                    else:
                        params['url'] = re.sub('/page/[0-9]+/', '/page/' + str(item) + '/', params['url'])
                else:
                    exit(1)
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer('div', {'id': 'dle-content'}))
            item_list = []
            params['mode'] = 'play'
            params['content'] = 'movies'
            params['type'] = 0
            params['context'] = 0
            params['duration'] = '7200'
            if soup:
                xbmcdict = XBMCDict(0).update(params)
                for item in soup.findAll('div', {'class': 'short-item'}):
                    if item:
                        _dict = xbmcdict.copy()
                        _dict['url'] = item.a.get('href')
                        _dict['title'] = item.a.img.get('alt').encode('UTF-8')
                        _dict['tvshowtitle'] = _dict['title']
                        _dict['originaltitle'] = _dict['title']
                        item = home_url + item.a.img.get('src').replace('/thumbs', '')
                        _dict['cover_url'] = a.image(item)
                        _dict['thumb_url'] = _dict['cover_url']
                        _dict['poster'] = _dict['cover_url']
                        _dict['sub_site'] = site

                        item_list.extend([_dict])
            soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer('div', {'class': 'bottom-nav'}))
            if soup:
                last_item = len(soup.findAll('a', href=True)) - 1
                for index, item in enumerate(soup.findAll('a', href=True)):
                    page = ''
                    if item:
                        if index == 0 and item.string.encode('UTF-8') != 'Back': last_item -= 1
                        if item.string.encode('UTF-8') == 'Back':
                            if item.get('href') == '#':
                                temp = re.search('.*list_submit\(([0-9]+)\).*', item.get('onclick'))
                                if temp:
                                    page = re.sub(r'(search_start=)([0-9]+)', '\g<01>' + temp.group(1), params['url'])
                                    page = re.sub(r'(result_from=)([0-9]+)',
                                                  '\g<01>' + str(int(temp.group(1)) * 10 + 1), page)
                            else:
                                page = item.get('href')
                            if page:
                                item_list.extend(
                                    [{'site': site, 'mode': 'list', 'url': page, 'content': params['content'],
                                      'title': a.language(30017, True), 'cover_url': a.image('previous.png', image),
                                      'backdrop_url': a.art(), 'type': 3}])
                        if item.string.encode('UTF-8') == 'Next':
                            if item.get('href') == '#':
                                temp = re.search('.*list_submit\(([0-9]+)\).*', item.get('onclick'))
                                if temp:
                                    page = re.sub(r'(search_start=)([0-9]+)', '\g<01>' + temp.group(1), params['url'])
                                    page = re.sub(r'(result_from=)([0-9]+)',
                                                  '\g<01>' + str(int(temp.group(1)) * 10 + 1), page)
                            else:
                                page = item.get('href')
                            if page:
                                item_list.extend(
                                    [{'site': site, 'mode': 'list', 'url': page, 'content': params['content'],
                                      'title': a.language(30018, True), 'cover_url': a.image('next.png', image),
                                      'backdrop_url': a.art(), 'type': 3}])
                        if index == last_item:
                            if item.get('href') == '#':
                                temp = re.search('.*list_submit\(([0-9]+)\).*', item.get('onclick'))
                                if temp:
                                    page = re.sub(r'(search_start=)([0-9]+)', '\g<01>' + temp.group(1), params['url'])
                                    page = re.sub(r'(result_from=)([0-9]+)',
                                                  '\g<01>' + str(int(temp.group(1)) * 10 + 1), page)
                            else:
                                page = item.get('href')
                            if page:
                                item_list.extend([{'site': site, 'mode': 'list', 'url': page, 'content': 'goto',
                                                   'title': a.language(30019, True),
                                                   'cover_url': a.image('goto.png', image),
                                                   'backdrop_url': a.art(), 'type': 3}])
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'play':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer('div', {'class': 'full-text clearfix desc-text'}))
            item = soup.find('a')
            item_list = []
            xbmcdict = XBMCDict(0).update(params)
            if item:
                _dict = xbmcdict.copy()
                _dict['url'] = item.get('href')
                item_list.extend([_dict])
            else:
                a.alert(a.language(30904, True), sound=False)
            if item_list:
                from playback import Playback
                Playback().choose_sources(item_list)
            else:
                a.alert(a.language(30904, True), sound=False)
예제 #6
0
    def __init__(self, params):
        import re
        from addon import Addon
        from addondict import AddonDict as XBMCDict
        from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment

        a = Addon()
        site = self.__module__
        mode = params['mode']

        home_url = 'http://playporn.to/'
        search_url = home_url + '?submit=Search&s='
        movies_url = home_url + 'category/xxx-movie-stream/'
        scenes_url = home_url + 'category/xxx-clips-scenes-stream/'
        false_positives = ['http://playporn.to/deutsche-milfs-anonym-sex/']

        if mode == 'main':
            item_list = [{
                'site': site,
                'mode': 'list',
                'title': a.language(30003),
                'content': '',
                'url': home_url,
                'cover_url': a.image('recent.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'sub',
                'title': a.language(30001),
                'content': '',
                'url': movies_url,
                'cover_url': a.image('movies.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'sub',
                'title': a.language(30002),
                'content': '',
                'url': scenes_url,
                'cover_url': a.image('scenes.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30004),
                'content': 'search',
                'url': search_url,
                'cover_url': a.image('search.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }]
            item_list.extend(a.favs_hist_menu(site))
            item_list.extend(a.extended_menu())
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'sub':
            item_list = [{
                'site': site,
                'mode': 'list',
                'title': a.language(30006),
                'content': '',
                'url': params['url'],
                'cover_url': a.image('all.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'category',
                'title': a.language(30005),
                'content': '',
                'url': home_url,
                'cover_url': a.image('categories.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }]
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'category':
            index = 1
            if 'scenes' in params['url'].lower(): index = 2
            html = a.get_page(home_url)
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer('ul', 'nav fl'))
            item_list = []
            for item in soup.findAll('ul')[index].findAll({'a': True}):
                item_list.extend([{
                    'site': 'playporn',
                    'mode': 'list',
                    'url': item.get('href'),
                    'content': '',
                    'title': item.contents[0].encode('UTF-8'),
                    'cover_url': a.image(image, image),
                    'backdrop_url': a.art(),
                    'type': 3
                }])
            if item_list:
                a.add_items(item_list)
                a.end_of_directory()

        elif mode == 'list':
            if params.get('content', '') == 'search':
                item = a.search_input()
                if item:
                    params['url'] = search_url + item
                else:
                    exit(1)
            elif params.get('content', '') == 'goto':
                last_item = re.search('/page/([0-9]+)/', params['url'])
                if last_item:
                    last_item = int(last_item.group(1))
                else:
                    last_item = 10000
                item = a.page_input(last_item)
                if item:
                    params['url'] = re.sub('/page/[0-9]+/',
                                           '/page/' + str(item) + '/',
                                           params['url'])
                else:
                    exit(1)
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer('body'))
            item_list = []
            params['mode'] = 'play'
            params['content'] = 'movies'
            params['type'] = 0
            params['context'] = 0
            params['duration'] = '7200'
            xbmcdict = XBMCDict(0).update(params)
            for item in soup.findAll('div', 'photo-thumb-image'):
                if not item.a.get('href') in false_positives:
                    _dict = xbmcdict.copy()
                    if 'scenes' in params['url']:
                        _dict['duration'] = '2700'
                        _dict['content'] = 'episodes'
                    _dict['url'] = item.a.get('href')
                    _dict['title'] = item.a.get('title').encode('UTF-8')
                    _dict['tvshowtitle'] = _dict['title']
                    _dict['originaltitle'] = _dict['title']
                    _dict['cover_url'] = a.image(item.img.get('src'))
                    _dict['thumb_url'] = _dict['cover_url']
                    _dict['poster'] = _dict['cover_url']
                    _dict['sub_site'] = site

                    item_list.extend([_dict])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div', 'more_entries'))
            if soup:
                item = soup.find('a', 'previouspostslink')
                if item:
                    item_list.extend([{
                        'site':
                        site,
                        'mode':
                        'list',
                        'url':
                        item.get('href'),
                        'content':
                        params['content'],
                        'title':
                        a.language(30017, True),
                        'cover_url':
                        a.image('previous.png', image),
                        'backdrop_url':
                        a.art(),
                        'type':
                        3
                    }])
                item = soup.find('a', 'nextpostslink')
                if item:
                    item_list.extend([{
                        'site': site,
                        'mode': 'list',
                        'url': item.get('href'),
                        'content': params['content'],
                        'title': a.language(30018, True),
                        'cover_url': a.image('next.png', image),
                        'backdrop_url': a.art(),
                        'type': 3
                    }])
                item = soup.find('a', 'last')
                if item:
                    item_list.extend([{
                        'site': site,
                        'mode': 'list',
                        'url': item.get('href'),
                        'content': 'goto',
                        'title': a.language(30019, True),
                        'cover_url': a.image('goto.png', image),
                        'backdrop_url': a.art(),
                        'type': 3
                    }])
            if item_list:
                a.add_items(item_list)
                a.end_of_directory()

        elif mode == 'play':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div', {'id': 'loopedSlider'}))
            soup = soup.find(text=lambda text: isinstance(text, Comment))
            if soup:
                soup = re.sub('&lt;', '<', soup.encode('utf-8'))
                soup = re.sub('&gt;', '>', soup)
                soup = BeautifulSoup(soup,
                                     parseOnlyThese=SoupStrainer(
                                         'div', 'video'))
                if soup:
                    item_list = []
                    xbmcdict = XBMCDict(0).update(params)
                    for item in soup.findAll('iframe'):
                        _dict = xbmcdict.copy()
                        _dict['url'] = item.get('src').replace(
                            'http://playporn.to/stream/all/?file=',
                            '').encode('UTF-8')
                        if 'flashx.tv' in _dict['url'].lower():
                            item = re.search('hash=(.+?)&', _dict['url'])
                            if item:
                                _dict[
                                    'url'] = 'http://flashx.tv/video/' + item.group(
                                        1) + '/'
                        elif 'played.to' in _dict['url'].lower():
                            item = re.search('embed-([a-zA-Z0-9]+?)-.+?html',
                                             _dict['url'])
                            if item:
                                _dict[
                                    'url'] = 'http://played.to/' + item.group(
                                        1)
                        item_list.extend([_dict])
                    if item_list:
                        from playback import Playback
                        Playback().choose_sources(item_list)
                    else:
                        a.alert(a.language(30904, True), sound=False)
                else:
                    a.alert(a.language(30904, True), sound=False)
            else:
                a.alert(a.language(30904, True), sound=False)