示例#1
0
    def addons(self,
               token_filter=None,
               object_filter=None,
               inherited_filter=None,
               entity_filter=None):
        config_filename = self.addon_config_filename

        if not token_filter is None:
            filter_re = re.compile(token_filter)
            filter_t = lambda s: filter_re.search(s) != None
        else:
            filter_t = lambda s: True

        if not object_filter is None:
            filter_o = lambda a: object_filter in a.objects[0]
        else:
            filter_o = lambda a: True

        if not inherited_filter is None:
            filter_i = lambda a: inherited_filter in a.objects[1]
        else:
            filter_i = lambda a: True

        if not entity_filter is None:
            filter_e = lambda a: entity_filter in a.entities
        else:
            filter_e = lambda a: True

        for path, ds, fs in walk(self.sources_path, followlinks=True):
            if config_filename in fs and '__init__.py' in fs and filter_t(
                    basename(path)):
                addon = Addon(join(path, config_filename))
                if filter_o(addon) and filter_i(addon) and filter_e(addon):
                    yield addon
                ds = []
    def updateAddons(self):
        """
        For all the addons in the addon db, check their version number
        and download status, and install any addons that have updates available
        or have never been installed.
        
        Arguments:
        - `self`:
        """
         
        with self.conn:
            cur = self.conn.execute("SELECT * FROM addons;")
            for addon in cur:
                addon = Addon(*addon)

                # check to see if an update is available
                # if the addon isn't installed at all, we will
                # still use the version fetched by this function
                # to record.

                update_available = addon.updateAvailable()
                if update_available and addon.installed:

                    # TODO: fix this. shouldn't need to check for the version twice in one fuction
                    addon.newest_file = addon.getNewestVersion()
                    print("Upate available, installing %s." % addon.name)
                    self.installAddon(addon)
                    
                else:
                    print("%s is up to date." % addon.name)
示例#3
0
    def addons(self,
               token_filter=None,
               model_filter=None,
               inherited_filter=None,
               data_filter=None,
               field_filter=None):
        config_filename = self.addon_config_filename
        addonsourcepath = self.get_addonsourcepath()

        filter_re = re.compile(token_filter) if token_filter else None

        def filter_name(p):
            return filter_re.search(p) is not None if token_filter else True

        def filter_addon(a):
            return (
                (model_filter in a.models[0] if model_filter else True)
                and (data_filter in a.data if data_filter else True) and
                (inherited_filter in a.models[1] if inherited_filter else True)
                and (field_filter in [f for fn, cl, f in a.fields]
                     if field_filter else True))

        for path, ds, fs in walk(self.sources_path, followlinks=True):
            if (config_filename in fs and '__init__.py' in fs
                    and filter_name(basename(path))
                    and realpath(path) != realpath(addonsourcepath)):
                addon = Addon(join(path, config_filename))
                if (filter_addon(addon)):
                    yield addon
示例#4
0
    def __init__(self):
        self.ad = Addon()
        self.js = MyJson()

        if self.ad.get_state_local_db():
            self.tdata = self.js.read_json(self.ad.db_file)
        else:
            self.tdata = self.js.get_json(
                "https://github.com/SLiX69/script.skip.intro/raw/master/resources/data.json"
            )
 def listInstalledAddons(self):
     """
     List all currently installed addons.
     
     Arguments:
     - `self`:
     """
     with self.conn:
         installed_addons = self.conn.execute("SELECT * FROM addons;")
         for addon in installed_addons:
             addon = Addon(*addon)
             print("%s - version: %s - installed: %s" % (addon.name, addon.newest_file, addon.installed))
示例#6
0
 def __init__(self):
     import urlresolver
     self.addon = Addon()
     self.common = self.addon.common
     self.urlresolver = urlresolver
     self.urlresolver.plugnplay.plugin_dirs = []
     if self.common.resolvers:
         self.urlresolver.plugnplay.set_plugin_dirs(
             self.urlresolver.common.plugins_path,
             self.common.resolvers_path, self.common.builtin_resolvers_path)
     else:
         self.urlresolver.plugnplay.set_plugin_dirs(
             self.urlresolver.common.plugins_path,
             self.common.builtin_resolvers_path)
     self.urlresolver.plugnplay.load_plugins()
示例#7
0
def notify(addon_id, typeq, title, message, times, line2='', line3=''):
    import xbmc
    addon_tmp = Addon(addon_id)
    if title == '':
        title = '[B]' + addon_tmp.get_name() + '[/B]'
    if typeq == 'small':
        if times == '':
            times = '5000'
        smallicon = notify_icon
        xbmc.executebuiltin("XBMC.Notification(" + title + "," + message +
                            "," + times + "," + smallicon + ")")
    elif typeq == 'big':
        dialog = xbmcgui.Dialog()
        dialog.ok(' ' + title + ' ', ' ' + message + ' ', line2, line3)
    else:
        dialog = xbmcgui.Dialog()
        dialog.ok(' ' + title + ' ', ' ' + message + ' ')
示例#8
0
    def __init__(self, params):
        site = self.__module__
        addon = Addon()
        common = addon.common
        mode = params['mode']

        if mode == 'main':

            sql_params = (params['sub_site'], )
            execute = 'SELECT * FROM ' + common.hist_db_table + ' WHERE sub_site=? ORDER BY id DESC'
            selected = common.db.fetchall(execute, sql_params)
            item_list = []
            if selected:
                for this_id, site, content, url, params in selected:
                    try:
                        params = AddonDict(
                            common.addon_type()).str_update(params)
                        item_list.extend([params])
                    except:
                        pass
            if item_list:
                addon.add_items(item_list)
            addon.end_of_directory()

        elif mode == 'clear_history':
            if not params['sub_site']:
                execute = 'DROP TABLE ' + common.hist_db_table
                sql_params = ''
            else:
                execute = 'DELETE FROM ' + common.hist_db_table + ' WHERE sub_site=?'
                sql_params = (params['sub_site'], )
            clear_hist = xbmcgui.Dialog().yesno(
                common.addon_name + ' - ' + addon.language(30879, True),
                ' ',
                addon.language(30880, True),
                nolabel=addon.language(30899, True),
                yeslabel=addon.language(30898, True))
            if common.to_bool(clear_hist):
                cleared = common.db.execute(execute, sql_params)
                if common.to_bool(cleared):
                    common.db.execute('VACUUM ' + common.hist_db_table)
                    addon.alert(str(addon.language(30881, True)))
                    xbmc.executebuiltin('Container.Refresh')
                else:
                    addon.alert(str(addon.language(30919, True)))
 def selectAddon(self, name):
     """
     Lookup and return an addon from the db by name. 
     Arguments:
     - `self`:
     - `name`:
     """
     
     with self.conn:
         addon = self.conn.execute("SELECT * FROM addons WHERE name=?",\
                                   (name, ))
         if addon:
             
             return Addon(*addon.fetchone())
             
         else:
             print("Couldn't find an addon named %s." % name)
             return False
    def installAddon(self, addon):
        """
        Download, and extract an addon into the addons folder.
        
        Arguments:
        - `self`:
        - `addon`: Addon or String
        """

        if isinstance(addon, str):
            addon = Addon(addon)

        # check if item is already in the DB
        with self.conn:
            in_db = self.conn.execute("SELECT name FROM addons WHERE name=?",\
                                      (addon.name, ))

            if not addon.installed and not in_db.fetchone():

                # add new
                if addon.getFile() and addon.unzipFile(self.addons_dir):
                    addon.newest_file = addon.getNewestVersion()
                    with self.conn:
                        # print debugging
                        print("Adding record for %s." % addon.name)
                        print("Version -> %s" % addon.newest_file)
                        print("Files extracted -> %s" % json.dumps(addon.files))
                        self.conn.execute("INSERT INTO addons VALUES (?, ?, ?, ?)",\
                                          (addon.name, addon.newest_file, True, json.dumps(addon.files), ))

            else:
                #update... assume new version number and files only
                if addon.getFile() and addon.unzipFile(self.addons_dir):
                    addon.newest_file = addon.getNewestVersion()
                    with self.conn:
                        # print debugging
                        print("Updating record for %s." % addon.name)
                        print("Version -> %s" % addon.newest_file)
                        print("Files extracted -> %s" % json.dumps(addon.files))
                        self.conn.execute("UPDATE addons SET version=?,files=?,downloaded=? WHERE name=?;",\
                                          (addon.newest_file, json.dumps(addon.files), True, addon.name, ))
示例#11
0
    def __init__(self, params):
        import re
        from addon import Addon
        from addondict import AddonDict as XBMCDict
        from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment

        a = Addon()
        site = self.__module__
        mode = params['mode']

        home_url = 'http://qwertty.net'
        search_url = home_url + '/index.php?do=search&subaction=search&full_search=0&search_start=0&result_from=1&story='
        false_positives = ['']

        if mode == 'main':
            item_list = [{'site': site, 'mode': 'list', 'title': a.language(30006), 'content': '',
                          'url': home_url, 'cover_url': a.image('all.png', image), 'backdrop_url': a.art(), 'type': 3},
                         {'site': site, 'mode': 'categories', 'title': a.language(30005), 'content': '',
                          'url': home_url, 'cover_url': a.image('categories.png', image), 'backdrop_url': a.art(),
                          'type': 3},
                         {'site': site, 'mode': 'list', 'title': a.language(30004), 'content': 'search',
                          'url': search_url, 'cover_url': a.image('search.png', image), 'backdrop_url': a.art(),
                          'type': 3}]
            item_list.extend(a.favs_hist_menu(site))
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'categories':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer('div', {'class': 'navi-wrap'}))
            item_list = []
            if soup:
                for item in soup.findAll('a'):
                    if item: item_list.extend([{'site': site, 'mode': 'list', 'url': home_url + item.get('href'),
                                                'content': '', 'title': item.string.encode('UTF-8'),
                                                'cover_url': a.image(image, image), 'backdrop_url': a.art(),
                                                'type': 3}])
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'list':
            if params.get('content', '') == 'search':
                item = a.search_input()
                if item:
                    params['url'] = search_url + item
                else:
                    exit(1)
            elif params.get('content', '') == 'goto':
                if 'do=search' in params['url']:
                    last_item = re.search('search_start=([0-9]+)', params['url'])
                else:
                    last_item = re.search('/page/([0-9]+)/', params['url'])
                if last_item:
                    last_item = int(last_item.group(1))
                else:
                    last_item = 10000
                item = a.page_input(last_item)
                if item:
                    if 'do=search' in params['url']:
                        page = re.sub(r'(search_start=)([0-9]+)', '\g<01>' + str(item), params['url'])
                        params['url'] = re.sub(r'(result_from=)([0-9]+)', '\g<01>' + str(int(str(item)) * 10 + 1), page)
                    else:
                        params['url'] = re.sub('/page/[0-9]+/', '/page/' + str(item) + '/', params['url'])
                else:
                    exit(1)
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer('div', {'id': 'dle-content'}))
            item_list = []
            params['mode'] = 'play'
            params['content'] = 'movies'
            params['type'] = 0
            params['context'] = 0
            params['duration'] = '7200'
            if soup:
                xbmcdict = XBMCDict(0).update(params)
                for item in soup.findAll('div', {'class': 'short-item'}):
                    if item:
                        _dict = xbmcdict.copy()
                        _dict['url'] = item.a.get('href')
                        _dict['title'] = item.a.img.get('alt').encode('UTF-8')
                        _dict['tvshowtitle'] = _dict['title']
                        _dict['originaltitle'] = _dict['title']
                        item = home_url + item.a.img.get('src').replace('/thumbs', '')
                        _dict['cover_url'] = a.image(item)
                        _dict['thumb_url'] = _dict['cover_url']
                        _dict['poster'] = _dict['cover_url']
                        _dict['sub_site'] = site

                        item_list.extend([_dict])
            soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer('div', {'class': 'bottom-nav'}))
            if soup:
                last_item = len(soup.findAll('a', href=True)) - 1
                for index, item in enumerate(soup.findAll('a', href=True)):
                    page = ''
                    if item:
                        if index == 0 and item.string.encode('UTF-8') != 'Back': last_item -= 1
                        if item.string.encode('UTF-8') == 'Back':
                            if item.get('href') == '#':
                                temp = re.search('.*list_submit\(([0-9]+)\).*', item.get('onclick'))
                                if temp:
                                    page = re.sub(r'(search_start=)([0-9]+)', '\g<01>' + temp.group(1), params['url'])
                                    page = re.sub(r'(result_from=)([0-9]+)',
                                                  '\g<01>' + str(int(temp.group(1)) * 10 + 1), page)
                            else:
                                page = item.get('href')
                            if page:
                                item_list.extend(
                                    [{'site': site, 'mode': 'list', 'url': page, 'content': params['content'],
                                      'title': a.language(30017, True), 'cover_url': a.image('previous.png', image),
                                      'backdrop_url': a.art(), 'type': 3}])
                        if item.string.encode('UTF-8') == 'Next':
                            if item.get('href') == '#':
                                temp = re.search('.*list_submit\(([0-9]+)\).*', item.get('onclick'))
                                if temp:
                                    page = re.sub(r'(search_start=)([0-9]+)', '\g<01>' + temp.group(1), params['url'])
                                    page = re.sub(r'(result_from=)([0-9]+)',
                                                  '\g<01>' + str(int(temp.group(1)) * 10 + 1), page)
                            else:
                                page = item.get('href')
                            if page:
                                item_list.extend(
                                    [{'site': site, 'mode': 'list', 'url': page, 'content': params['content'],
                                      'title': a.language(30018, True), 'cover_url': a.image('next.png', image),
                                      'backdrop_url': a.art(), 'type': 3}])
                        if index == last_item:
                            if item.get('href') == '#':
                                temp = re.search('.*list_submit\(([0-9]+)\).*', item.get('onclick'))
                                if temp:
                                    page = re.sub(r'(search_start=)([0-9]+)', '\g<01>' + temp.group(1), params['url'])
                                    page = re.sub(r'(result_from=)([0-9]+)',
                                                  '\g<01>' + str(int(temp.group(1)) * 10 + 1), page)
                            else:
                                page = item.get('href')
                            if page:
                                item_list.extend([{'site': site, 'mode': 'list', 'url': page, 'content': 'goto',
                                                   'title': a.language(30019, True),
                                                   'cover_url': a.image('goto.png', image),
                                                   'backdrop_url': a.art(), 'type': 3}])
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'play':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer('div', {'class': 'full-text clearfix desc-text'}))
            item = soup.find('a')
            item_list = []
            xbmcdict = XBMCDict(0).update(params)
            if item:
                _dict = xbmcdict.copy()
                _dict['url'] = item.get('href')
                item_list.extend([_dict])
            else:
                a.alert(a.language(30904, True), sound=False)
            if item_list:
                from playback import Playback
                Playback().choose_sources(item_list)
            else:
                a.alert(a.language(30904, True), sound=False)
示例#12
0
import urllib,urllib2,sys,re,xbmcplugin,xbmcgui,xbmcaddon,xbmc,os
import datetime
from datetime import date
import time
from addon import Addon
from threading import Timer

addon_id='plugin.video.hushamiptv'
ADDON = xbmcaddon.Addon(id=addon_id)
ADDON_HELPER = Addon(addon_id, sys.argv)

base_url = 'http://iptv.husham.com'
api_url = 'http://api.iptvapi.com/api/v1/'
site_id = '14'

api_key = '49216ba9-f7fa-479f-8e24-7b8426694c64'

# get parameters
mode = ADDON_HELPER.queries['mode']
play = ADDON_HELPER.queries.get('play', None)
image = ADDON_HELPER.queries.get('img', '')
title = ADDON_HELPER.queries.get('title', None)
dir_end = ADDON_HELPER.queries.get('dir_end', 'true')
dir_update = ADDON_HELPER.queries.get('dir_update', 'false')
url = ADDON_HELPER.queries.get('url', '')
referer = ADDON_HELPER.queries.get('referer', base_url)
channel_id = ADDON_HELPER.queries.get('channel_id', 0)
date = ADDON_HELPER.queries.get('date', None)
date_title = ADDON_HELPER.queries.get('date_title', '')

def Exit():
示例#13
0
"""
JSON-RPC methods implementation

The methods are called via POST request at this address.
Don't forget to add ('Content-Type': 'application/json') header to your http-request.
The API is compliant with JSON-RPC 2.0, though 'jsonrpc' and 'id' keys are optional in requests.
Example:
{"method": "pause_torrent", "params": {"info_hash":"21df87c3cc3209e3b6011a88053aec35a58582a9"}}

"params" is a JSON object (dict) containing method call parameters. Some methods do not take any parameters.
For those methods "params" key can be equal null or omitted.
"""

from addon import Addon

addon = Addon()


def ping(torrent_client, params=None):
    """
    Connection test method

    :return: 'pong'
    """
    return 'pong'


def add_torrent(torrent_client, params):
    """
    Add torrent method
示例#14
0
    def __init__(self, params):
        import re
        from addon import Addon
        from addondict import AddonDict
        from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment

        a = Addon()
        site = self.__module__
        mode = params['mode']

        base_url = 'https://chaturbate.com'
        home_url = base_url

        false_positives = ['#']

        if mode == 'main':
            item_list = [{
                'site': site,
                'mode': 'list',
                'title': a.language(30021),
                'content': '',
                'url': home_url,
                'cover_url': a.image('featuredcams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'bygender',
                'title': a.language(30017),
                'content': '',
                'cover_url': a.image('bygender.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'byage',
                'title': a.language(30018),
                'content': '',
                'cover_url': a.image('byage.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'byregion',
                'title': a.language(30019),
                'content': '',
                'cover_url': a.image('byregion.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'bystatus',
                'title': a.language(30020),
                'content': '',
                'cover_url': a.image('bystatus.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }]
            item_list.extend(a.favs_hist_menu(site))
            item_list.extend(a.extended_menu())
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'bygender':
            item_list = [{
                'site': site,
                'mode': 'list',
                'title': a.language(30022),
                'content': '',
                'url': base_url + '/female-cams/',
                'cover_url': a.image('femalecams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30023),
                'content': '',
                'url': base_url + '/male-cams/',
                'cover_url': a.image('malecams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30024),
                'content': '',
                'url': base_url + '/couple-cams/',
                'cover_url': a.image('couplecams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30025),
                'content': '',
                'url': base_url + '/transsexual-cams/',
                'cover_url': a.image('transcams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }]
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'byage':
            item_list = [{
                'site': site,
                'mode': 'list',
                'title': a.language(30026),
                'content': '',
                'url': base_url + '/teen-cams/',
                'cover_url': a.image('teencams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30027),
                'content': '',
                'url': base_url + '/18to21-cams/',
                'cover_url': a.image('18to21cams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30028),
                'content': '',
                'url': base_url + '/20to30-cams/',
                'cover_url': a.image('20to30cams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30029),
                'content': '',
                'url': base_url + '/30to50-cams/',
                'cover_url': a.image('30to50cams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30030),
                'content': '',
                'url': base_url + '/mature-cams/',
                'cover_url': a.image('maturecams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }]
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'byregion':
            item_list = [{
                'site': site,
                'mode': 'list',
                'title': a.language(30031),
                'content': '',
                'url': base_url + '/north-american-cams/',
                'cover_url': a.image('north-americancams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30032),
                'content': '',
                'url': base_url + '/other-region-cams/',
                'cover_url': a.image('other-regioncams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30033),
                'content': '',
                'url': base_url + '/euro-russian-cams/',
                'cover_url': a.image('euro-russiancams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30034),
                'content': '',
                'url': base_url + '/philippines-cams/',
                'cover_url': a.image('philippinescams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30035),
                'content': '',
                'url': base_url + '/asian-cams/',
                'cover_url': a.image('asiancams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30036),
                'content': '',
                'url': base_url + '/south-american-cams/',
                'cover_url': a.image('south-americancams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }]
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'bystatus':
            item_list = [{
                'site': site,
                'mode': 'list',
                'title': a.language(30037),
                'content': '',
                'url': base_url + '/exhibitionist-cams/',
                'cover_url': a.image('exhibitionistcams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30038),
                'content': '',
                'url': base_url + '/hd-cams/',
                'cover_url': a.image('hdcams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }]
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'list':
            if params.get('content', '') == 'goto':
                last_item = re.search('page=([0-9]+)', params['url'])
                if last_item:
                    last_item = int(last_item.group(1))
                else:
                    last_item = 10000
                item = a.page_input(last_item)
                if item:
                    params['url'] = re.sub('page=[0-9]+', 'page=' + str(item),
                                           params['url']).replace(' ', '+')
                else:
                    exit(1)
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div',
                                     {'class': 'c-1 endless_page_template'}))
            item_list = []
            params['mode'] = 'play'
            params['content'] = 'episodes'
            params['type'] = 0
            params['context'] = 0
            params['duration'] = ''
            params['sub_site'] = site
            if soup:
                ul = soup.find('ul', {'class': 'list'})
                if ul:
                    addondict = AddonDict(0).update(params)
                    for item in ul.findAll('li'):
                        _dict = addondict.copy()
                        clip_link = item.find('a')
                        if clip_link:
                            url = clip_link.get('href')
                            if not url.startswith('http://'):
                                url = base_url + url
                            _dict['url'] = url
                            ctitle = ''
                            cage = ''
                            cname = ''
                            ccams = ''
                            details = item.find('div', {'class': 'details'})
                            if details:
                                temp = details.find('a')
                                if temp:
                                    cname = str(temp.contents[0])
                                temp = details.find(
                                    'span', {'class': re.compile('age.*')})
                                if temp:
                                    cage = temp.string.encode('utf-8')
                                temp = details.find('li', {'class': 'cams'})
                                if temp:
                                    ccams = str(temp.contents[0])
                                temp = details.find('li', {'title': True})
                                if temp:
                                    ctitle = temp.get('title').encode('UTF-8')
                            if cname:
                                usetitle = '%s [%syr, %s] %s' % (cname, cage,
                                                                 ccams, ctitle)
                                _dict['title'] = usetitle
                                _dict['tvshowtitle'] = _dict['title']
                                _dict['originaltitle'] = _dict['title']
                                img = item.find('img')
                                if img:
                                    img = img.get('src')
                                    if img.startswith('//'):
                                        img = 'http:' + img
                                else:
                                    img = ''
                                _dict['cover_url'] = a.image(img)
                                _dict['thumb_url'] = _dict['cover_url']
                                _dict['poster'] = _dict['cover_url']
                                item_list.extend([_dict])

                    pages = BeautifulSoup(html,
                                          parseOnlyThese=SoupStrainer(
                                              'ul', {'class': 'paging'}))
                    if pages:
                        previouspage = pages.find(
                            'a', {'class': re.compile('prev.*')})
                        nextpage = pages.find('a',
                                              {'class': re.compile('next.*')})
                        lastpage = pages.find('span',
                                              {'class': 'endless_separator'})
                        if lastpage:
                            lastpage = lastpage.findNext('a')

                        if previouspage:
                            previouspage = previouspage.get('href').replace(
                                ' ', '+')
                            if previouspage != '#':
                                if not previouspage.startswith('http://'):
                                    previouspage = base_url + previouspage
                                item_list.extend([{
                                    'site':
                                    site,
                                    'mode':
                                    'list',
                                    'url':
                                    previouspage,
                                    'content':
                                    params['content'],
                                    'title':
                                    a.language(30017, True),
                                    'cover_url':
                                    a.image('previous.png', image),
                                    'backdrop_url':
                                    a.art(),
                                    'type':
                                    3
                                }])
                        if nextpage:
                            nextpage = nextpage.get('href').replace(' ', '+')
                            if nextpage != '#':
                                if not nextpage.startswith('http://'):
                                    nextpage = base_url + nextpage
                                item_list.extend([{
                                    'site':
                                    site,
                                    'mode':
                                    'list',
                                    'url':
                                    nextpage,
                                    'content':
                                    params['content'],
                                    'title':
                                    a.language(30018, True),
                                    'cover_url':
                                    a.image('next.png', image),
                                    'backdrop_url':
                                    a.art(),
                                    'type':
                                    3
                                }])
                        if lastpage:
                            lastpage = lastpage.get('href').replace(' ', '+')
                            if lastpage != '#':
                                if not lastpage.startswith('http://'):
                                    lastpage = base_url + lastpage
                                item_list.extend([{
                                    'site':
                                    site,
                                    'mode':
                                    'list',
                                    'url':
                                    lastpage,
                                    'content':
                                    'goto',
                                    'title':
                                    a.language(30019, True),
                                    'cover_url':
                                    a.image('goto.png', image),
                                    'backdrop_url':
                                    a.art(),
                                    'type':
                                    3
                                }])

            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'play':
            html = a.get_page(params['url'])
            link = re.search('html \+= "src=\'(.+?)\'', html)
            if link:
                from playback import Playback
                Playback().play_this(link.group(1), params['title'],
                                     params['cover_url'],
                                     a.common.usedirsources())
            else:
                a.alert(a.language(30904, True), sound=False)
示例#15
0
    def __init__(self, params):
        import re
        import json
        import urllib
        from addon import Addon
        from addondict import AddonDict

        a = Addon()
        site = self.__module__
        mode = params['mode']

        api_version = 'v5'
        recent_url = 'http://beeg.com/api/%s/index/main/0/pc' % api_version
        long_url = 'http://beeg.com/api/%s/index/tag/0/pc?tag=long%svideos' % (api_version, '%20')
        search_url = 'http://beeg.com/api/%s/index/search/0/pc?query=' % api_version
        tag_url = 'http://beeg.com/api/%s/index/tag/0/pc?tag=' % api_version
        img_url = 'http://img.beeg.com/236x177/%s.jpg'

        data_markers = 'data=pc.US'

        if mode == 'main':
            item_list = [{'site': site, 'mode': 'list', 'title': a.language(30003), 'content': '',
                          'url': recent_url, 'cover_url': a.image('recent.png', image), 'backdrop_url': a.art(),
                          'type': 3},
                         {'site': site, 'mode': 'categories', 'title': a.language(30005), 'content': '',
                          'url': recent_url, 'cover_url': a.image('categories.png', image), 'backdrop_url': a.art(),
                          'type': 3},
                         {'site': site, 'mode': 'list', 'title': a.language(30039), 'content': '',
                          'url': long_url, 'cover_url': a.image('longvideos.png', image), 'backdrop_url': a.art(),
                          'type': 3},
                         {'site': site, 'mode': 'list', 'title': a.language(30004), 'content': 'search',
                          'url': search_url, 'cover_url': a.image('search.png', image), 'backdrop_url': a.art(),
                          'type': 3}]
            item_list.extend(a.favs_hist_menu(site))
            item_list.extend(a.extended_menu())
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'categories':
            html = a.get_page(params['url'])
            data = json.loads(html)
            item_list = []
            tags = data.get('tags', None)
            if tags:
                popular = tags.get('popular', None)
                if popular:
                    for item in popular:
                        url_item = re.search('(.+?)-', str(item))
                        if url_item: url_item = url_item.group(1)
                        else: url_item = item
                        item_list.extend([{'site': site, 'mode': 'list', 'url': tag_url + url_item,
                                           'content': '', 'title': str(item).capitalize(),
                                           'cover_url': a.image(image, image), 'backdrop_url': a.art(), 'type': 3}])
                nonpopular = tags.get('nonpopular', None)
                if nonpopular:
                    for item in nonpopular:
                        url_item = re.search('(.+?)-', str(item))
                        if url_item: url_item = url_item.group(1)
                        else: url_item = item
                        item_list.extend([{'site': site, 'mode': 'list', 'url': tag_url + urllib.quote(url_item),
                                           'content': '', 'title': str(item).capitalize(),
                                           'cover_url': a.image(image, image), 'backdrop_url': a.art(), 'type': 3}])
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'list':
            if params.get('content', '') == 'search':
                item = a.search_input()
                if item:
                    params['url'] = search_url + item.replace(' ', '+')
                else:
                    exit(1)
            elif params.get('content', '') == 'goto':
                last_item = re.search('/([0-9]+)/pc', params['url'])
                if last_item:
                    last_item = int(last_item.group(1))
                else:
                    last_item = 10000
                item = a.page_input(last_item)
                if item:
                    params['url'] = re.sub('/[0-9]+/pc', '/' + str(item) + '/pc', params['url']).replace(' ', '+')
                else:
                    exit(1)
            html = a.get_page(params['url'])
            item_list = []
            data = json.loads(html)
            allvideos = []
            videos = data.get('videos', None)
            if videos:
                for video in videos:
                    nt_name = video.get('nt_name', '').encode('utf-8', 'ignore')
                    ps_name = video.get('ps_name', '').encode('utf-8', 'ignore')
                    atitle = video.get('title', '').encode('utf-8', 'ignore')
                    vid_id = video.get('id', '').encode('utf-8', 'ignore')
                    if nt_name.lower() == 'na': nt_name = ''
                    if ps_name.lower() == 'na': ps_name = ''
                    atitle = '%s - %s' % (atitle, ps_name)
                    if nt_name:
                        atitle += ' (%s)' % nt_name
                    if vid_id:
                        allvideos.append([vid_id, atitle, video])

                if allvideos:
                    params['mode'] = 'play'
                    params['content'] = 'episodes'
                    params['type'] = 0
                    params['context'] = 0
                    params['duration'] = '480'
                    params['sub_site'] = site
                    addondict = AddonDict(0).update(params)

                    for number, name, idata in allvideos:
                        _dict = addondict.copy()
                        _dict['title'] = name
                        _dict['tvshowtitle'] = _dict['title']
                        _dict['originaltitle'] = _dict['title']
                        _dict['cover_url'] = a.image(img_url % number)
                        _dict['thumb_url'] = _dict['cover_url']
                        _dict['poster'] = _dict['cover_url']
                        _dict['url'] = params['url']
                        _dict['count'] = number
                        item_list.extend([_dict])
                    pages = data.get('pages', 0)
                    if pages != 0:
                        pages -= 1
                    page = re.search('/([0-9]+)/pc', params['url'])
                    if page:
                        page = int(page.group(1))
                    else:
                        page = 0
                    previouspage = None
                    nextpage = None
                    lastpage = None
                    if page > 0:
                        previouspage = re.sub('/[0-9]+/pc', '/' + str(page - 1) + '/pc', params['url'])
                    if pages > 1:
                        lastpage = re.sub('/[0-9]+/pc', '/' + str(pages) + '/pc', params['url'])
                    if page < pages:
                        nextpage = re.sub('/[0-9]+/pc', '/' + str(page + 1) + '/pc', params['url'])

                    if previouspage:
                        item_list.extend([{'site': site, 'mode': 'list', 'url': previouspage, 'content': params['content'],
                                           'title': a.language(30017, True), 'cover_url': a.image('previous.png', image),
                                           'backdrop_url': a.art(), 'type': 3}])
                    if nextpage:
                        item_list.extend([{'site': site, 'mode': 'list', 'url': nextpage, 'content': params['content'],
                                           'title': a.language(30018, True), 'cover_url': a.image('next.png', image),
                                           'backdrop_url': a.art(), 'type': 3}])
                    if lastpage:
                        item_list.extend([{'site': site, 'mode': 'list', 'url': lastpage, 'content': 'goto',
                                           'title': a.language(30019, True), 'cover_url': a.image('goto.png', image),
                                           'backdrop_url': a.art(), 'type': 3}])

            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'play':
            html = a.get_page(params['url'])
            data = json.loads(html)
            video = None
            videos = data.get('videos', None)
            if videos:
                for vid in videos:
                    if vid.get('id', None) == params['count']:
                        video = vid
                        break
                if video:
                    img = img_url % video.get('id')
                    name = params['title']
                    url = video.get('720p', None)
                    if not url:
                        url = video.get('480p', None)
                        if not url:
                            url = video.get('240p', None)
                    if url:
                        url = 'http:' + re.sub('\{DATA_MARKERS\}', data_markers, url)
                        from playback import Playback
                        Playback().play_this(url, name, img, a.common.usedirsources())
                    else:
                        a.alert(a.language(30904, True), sound=False)
示例#16
0
    def __init__(self, params):
        import re
        from addon import Addon
        from addondict import AddonDict
        from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment

        a = Addon()
        site = self.__module__
        mode = params['mode']

        base_url = 'http://filmikz.ch'
        home_url = base_url + '/index.php?genre=14'
        search_url = home_url + '&search='
        false_positives = ['#']

        if mode == 'main':
            item_list = [{
                'site': site,
                'mode': 'list',
                'title': a.language(30006),
                'content': '',
                'url': home_url,
                'cover_url': a.image('all.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30004),
                'content': 'search',
                'url': search_url,
                'cover_url': a.image('search.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }]
            item_list.extend(a.favs_hist_menu(site))
            item_list.extend(a.extended_menu())
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'list':
            if params.get('content', '') == 'search':
                item = a.search_input()
                if item:
                    params['url'] = search_url + item
                else:
                    exit(1)
            elif params.get('content', '') == 'goto':
                last_item = re.search('pg=([0-9]+)', params['url'])
                if last_item:
                    last_item = int(last_item.group(1))
                else:
                    last_item = 10000
                last_item = int(last_item / 10)
                item = a.page_input(last_item)
                if item:
                    item = str(int(item) * 10)
                    params['url'] = re.sub('pg=[0-9]+', 'pg=' + str(item),
                                           params['url']).replace(' ', '+')
                else:
                    exit(1)
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'td', {'width': '490'}))
            item_list = []
            params['mode'] = 'play'
            params['content'] = 'movies'
            params['type'] = 0
            params['context'] = 0
            params['duration'] = '7200'
            params['sub_site'] = site
            if soup:
                addondict = AddonDict(0).update(params)
                for item in soup.findAll('table', {
                        'width': '100%',
                        'height': '155'
                }):
                    _dict = addondict.copy()
                    ahref = item.find('a', {'href': True})
                    if ahref:
                        url = ahref.get('href')
                        if not url.startswith('http://'):
                            url = base_url + url
                        _dict['url'] = url
                        data = item.find('strong')
                        _dict['title'] = str(data.contents[0]).rstrip(' XXX :')
                        _dict['tvshowtitle'] = _dict['title']
                        _dict['originaltitle'] = _dict['title']
                        img = item.find('img')
                        if img:
                            img = img.get('src')
                            if not img.startswith('http://'):
                                img = base_url + '/' + img
                        else:
                            img = ''
                        _dict['cover_url'] = a.image(img)
                        _dict['thumb_url'] = _dict['cover_url']
                        _dict['poster'] = _dict['cover_url']
                        cast = item.find('p',
                                         text=re.compile('[Ss]tarring:.+'))
                        if cast:
                            _dict['plot'] = str(cast)
                            _dict['plotoutline'] = _dict['plot']
                            cast = re.search('[Ss]tarring:\s*(.+?)\s*\.+',
                                             str(cast))
                            if cast:
                                cast = cast.group(1)
                                _dict['cast'] = cast.split(', ')
                        item_list.extend([_dict])
                pages = BeautifulSoup(html,
                                      parseOnlyThese=SoupStrainer(
                                          'table', {'width': '250'}))
                if pages:
                    previouspage = None
                    nextpage = None
                    lastpage = None
                    for ahref in pages.findAll('a', {'href': True}):
                        astr = ahref.string.encode('utf-8')
                        if astr == '‹‹ ':
                            previouspage = base_url + '/' + ahref.get('href')
                        elif astr == '››':
                            nextpage = base_url + '/' + ahref.get('href')
                        elif astr == ' Last ':
                            lastpage = base_url + '/' + ahref.get('href')
                            last_item = re.search('pg=(-*[0-9]+)',
                                                  str(lastpage))
                            if last_item:
                                last_item = int(last_item.group(1))
                                if last_item < 10:
                                    lastpage = None
                    if previouspage:
                        item_list.extend([{
                            'site':
                            site,
                            'mode':
                            'list',
                            'url':
                            previouspage,
                            'content':
                            params['content'],
                            'title':
                            a.language(30017, True),
                            'cover_url':
                            a.image('previous.png', image),
                            'backdrop_url':
                            a.art(),
                            'type':
                            3
                        }])
                    if nextpage:
                        item_list.extend([{
                            'site':
                            site,
                            'mode':
                            'list',
                            'url':
                            nextpage,
                            'content':
                            params['content'],
                            'title':
                            a.language(30018, True),
                            'cover_url':
                            a.image('next.png', image),
                            'backdrop_url':
                            a.art(),
                            'type':
                            3
                        }])
                    if lastpage:
                        item_list.extend([{
                            'site':
                            site,
                            'mode':
                            'list',
                            'url':
                            lastpage,
                            'content':
                            'goto',
                            'title':
                            a.language(30019, True),
                            'cover_url':
                            a.image('goto.png', image),
                            'backdrop_url':
                            a.art(),
                            'type':
                            3
                        }])
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'play':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer('body'))
            item_list = []
            _bad_hosts = ['NowDownload', 'ePornik']
            if soup:
                buttons = soup.findAll('input', {
                    'type': 'button',
                    'onclick': True
                })
                if buttons:
                    addondict = AddonDict(0).update(params)
                    for button in buttons:
                        value = button.get('value')
                        newhost = re.search('.+?-([a-zA-Z]+)', value)
                        if newhost:
                            newhost = newhost.group(1)
                        else:
                            newhost = ''
                        if newhost not in _bad_hosts:
                            item = button.get('onclick')
                            item = re.sub(
                                'javascript:popUp\([\'"](.+?)[\'"]\);*',
                                '\g<01>', item)
                            item = base_url + item
                            value = button.get('value')
                            if not re.search('[Pp]art ', value):
                                try:
                                    thtml = a.get_page(item)
                                    tsoup = BeautifulSoup(thtml)
                                    source = tsoup.find('frame')
                                    if source:
                                        source = source.get('src')
                                        if 'ads.php' not in source:
                                            _dict = addondict.copy()
                                            _dict['url'] = source
                                            item_list.extend([_dict])
                                except:
                                    continue
                    parts = []
                    oldhost = ''
                    _dict = addondict.copy()
                    _dict['multi-part'] = True
                    for button in buttons:
                        value = button.get('value')
                        newhost = re.search('.+?-([a-zA-Z]+)', value)
                        if newhost:
                            newhost = newhost.group(1)
                        else:
                            newhost = ''
                        if newhost not in _bad_hosts:
                            item = button.get('onclick')
                            item = re.sub(
                                'javascript:popUp\([\'"](.+?)[\'"]\);*',
                                '\g<01>', item)
                            item = base_url + item
                            if re.search('[Pp]art ', value):
                                if oldhost != newhost:
                                    if oldhost != '':
                                        _dict['parts'] = parts
                                        item_list.extend([_dict])
                                        _dict = addondict.copy()
                                        _dict['multi-part'] = True
                                        parts = []
                                    oldhost = newhost

                                try:
                                    thtml = a.get_page(item)
                                    tsoup = BeautifulSoup(thtml)
                                    source = tsoup.find('frame')
                                    if source:
                                        source = source.get('src')
                                        if 'ads.php' not in source:
                                            parts.extend([source])
                                except:
                                    continue
                    if parts:
                        _dict['parts'] = parts
                        item_list.extend([_dict])
            if item_list:
                from playback import Playback
                Playback().choose_sources(item_list)
            else:
                a.alert(a.language(30904, True), sound=False)
示例#17
0
'''
    common XBMC Module
    Copyright (C) 2011 t0mm0

    This program is free software: you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation, either version 3 of the License, or
    (at your option) any later version.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program.  If not, see <http://www.gnu.org/licenses/>.
'''

from addon import Addon

addon = Addon('script.module.addon.common')
addon_path = addon.get_path()
addon_version = addon.get_version()
示例#18
0
    def __init__(self, params):
        import re
        from addon import Addon
        from addondict import AddonDict as XBMCDict
        from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment

        a = Addon()
        site = self.__module__
        mode = params['mode']

        home_url = 'http://pornhardx.com/'
        movies_url = home_url + 'category/full-movie/'
        scenes_url = home_url + 'video/'
        search_url = home_url + '?s='
        false_positives = [
            'http://pornhardx.com/video',
            'http://pornhardx.com/video/?order=viewed',
            'http://pornhardx.com/video/?order=liked', 'http://pornhardx.com/'
        ]

        if mode == 'main':
            item_list = []
            item_list.extend([{
                'site': site,
                'mode': 'list',
                'title': a.language(30006),
                'content': '',
                'url': scenes_url,
                'cover_url': a.image('all.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }])
            item_list.extend([{
                'site': site,
                'mode': 'list',
                'title': a.language(30003),
                'content': '',
                'url': home_url,
                'cover_url': a.image('recent.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }])
            item_list.extend([{
                'site': site,
                'mode': 'categories',
                'title': a.language(30005),
                'content': '',
                'url': scenes_url,
                'cover_url': a.image('categories.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }])
            item_list.extend([{
                'site': site,
                'mode': 'list',
                'title': a.language(30004),
                'content': 'search',
                'url': search_url,
                'cover_url': a.image('search.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }])
            item_list.extend(a.favs_hist_menu(site))
            item_list.extend(a.extended_menu())
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'categories':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div', {'id': 'navigation-wrapper'}))
            item_list = []
            if soup:
                for item in soup.findAll('a', {'href': True}):
                    if item:
                        if item.get('href') not in false_positives:
                            if 'full-movie' in params['url']:
                                if movies_url != item.get(
                                        'href') and 'full-movie' in item.get(
                                            'href'):
                                    item_list.extend([{
                                        'site':
                                        site,
                                        'mode':
                                        'list',
                                        'url':
                                        item.get('href'),
                                        'content':
                                        '',
                                        'title':
                                        item.contents[0].encode('UTF-8'),
                                        'cover_url':
                                        a.image(image, image),
                                        'backdrop_url':
                                        a.art(),
                                        'type':
                                        3
                                    }])
                            elif 'full-movie' not in item.get('href'):
                                item_list.extend([{
                                    'site':
                                    site,
                                    'mode':
                                    'list',
                                    'url':
                                    item.get('href'),
                                    'content':
                                    '',
                                    'title':
                                    item.contents[0].encode('UTF-8'),
                                    'cover_url':
                                    a.image(image, image),
                                    'backdrop_url':
                                    a.art(),
                                    'type':
                                    3
                                }])
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'list':
            if params.get('content', '') == 'search':
                item = a.search_input()
                if item:
                    params['url'] = search_url + item
                else:
                    exit(1)
            elif params.get('content', '') == 'goto':
                last_item = re.search('/page/([0-9]+)/', params['url'])
                if last_item:
                    last_item = int(last_item.group(1))
                else:
                    last_item = 10000
                item = a.page_input(last_item)
                if item:
                    params['url'] = re.sub('/page/[0-9]+/',
                                           '/page/' + str(item) + '/',
                                           params['url'])
                else:
                    exit(1)
            html = a.get_page(params['url'])
            soup = BeautifulSoup(
                html,
                parseOnlyThese=SoupStrainer(
                    'div',
                    {'class': re.compile('col-sm-8(?:\s*main-content)*')}))
            item_list = []
            params['mode'] = 'play'
            params['content'] = 'movies'
            params['type'] = 0
            params['context'] = 0
            params['duration'] = '7200'
            if soup:
                xbmcdict = XBMCDict(0).update(params)
                for item in soup.findAll(
                        'div',
                    {
                        'class':
                        re.compile(
                            '.*(?:col-xs-6 item|post type-post status-publish).*'
                        )
                    }):
                    if item:
                        if item.a.get('href') not in false_positives:
                            _dict = xbmcdict.copy()
                            if 'full-movie' not in params['url']:
                                _dict['duration'] = '1500'
                                _dict['content'] = 'episodes'
                            if item.h3:
                                _dict['url'] = item.h3.a.get('href')
                                if item.h3.a.contents:
                                    _dict['title'] = item.h3.a.contents[
                                        0].encode('UTF-8')
                                else:
                                    _dict['title'] = 'Untitled'
                            elif item.h2:
                                _dict['url'] = item.h2.a.get('href')
                                if item.h2.a.contents:
                                    _dict['title'] = item.h2.a.contents[
                                        0].encode('UTF-8')
                                else:
                                    _dict['title'] = 'Untitled'
                            _dict['tvshowtitle'] = _dict['title']
                            _dict['originaltitle'] = _dict['title']
                            _dict['cover_url'] = a.image(item.img.get('src'))
                            _dict['thumb_url'] = _dict['cover_url']
                            _dict['poster'] = _dict['cover_url']
                            _dict['sub_site'] = site

                            item_list.extend([_dict])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'ul', {'class': 'pagination'}))
            if soup.li:
                item = soup.find('a', {'class': 'prev page-numbers'})
                if item:
                    item_list.extend([{
                        'site': site,
                        'mode': 'list',
                        'url': item.get('href'),
                        'content': params['content'],
                        'title': a.language(30017, True),
                        'cover_url': a.image(image, image),
                        'backdrop_url': a.art(),
                        'type': 3
                    }])
                item = soup.find('a', {'class': 'next page-numbers'})
                if item:
                    item_list.extend([{
                        'site': site,
                        'mode': 'list',
                        'url': item.get('href'),
                        'content': params['content'],
                        'title': a.language(30018, True),
                        'cover_url': a.image(image, image),
                        'backdrop_url': a.art(),
                        'type': 3
                    }])
                    if len(soup.findAll('a')) > 2:
                        last_item = soup.find('a', {
                            'class': 'next page-numbers'
                        }).parent.previousSibling.a.get('href')
                        item_list.extend([{
                            'site': site,
                            'mode': 'list',
                            'url': last_item,
                            'content': 'goto',
                            'title': a.language(30019, True),
                            'cover_url': a.image(image, image),
                            'backdrop_url': a.art(),
                            'type': 3
                        }])
                else:
                    item = soup.find('span', {'class': 'page-numbers current'})
                    if item:
                        if len(soup.findAll('a')) > 2:
                            last_item = soup.find(
                                'span', {
                                    'class': 'page-numbers current'
                                }).parent.previousSibling.a.get('href')
                            item_list.extend([{
                                'site':
                                site,
                                'mode':
                                'list',
                                'url':
                                last_item,
                                'content':
                                'goto',
                                'title':
                                a.language(30019, True),
                                'cover_url':
                                a.image('goto.png', image),
                                'backdrop_url':
                                a.art(),
                                'type':
                                3
                            }])
            else:
                soup = BeautifulSoup(html,
                                     parseOnlyThese=SoupStrainer(
                                         'ul', {'class': 'pager'}))
                item = soup.find('li', {'class': 'previous'})
                if item:
                    item_list.extend([{
                        'site':
                        site,
                        'mode':
                        'list',
                        'url':
                        item.previousSibling.get('href'),
                        'content':
                        params['content'],
                        'title':
                        a.language(30017, True),
                        'cover_url':
                        a.image('previous.png', image),
                        'backdrop_url':
                        a.art(),
                        'type':
                        3
                    }])
                item = soup.find('li', {'class': 'next'})
                if item:
                    item_list.extend([{
                        'site': site,
                        'mode': 'list',
                        'url': item.previousSibling.get('href'),
                        'content': params['content'],
                        'title': a.language(30018, True),
                        'cover_url': a.image('next.png', image),
                        'backdrop_url': a.art(),
                        'type': 3
                    }])
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'play':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer('body'))
            item = ''
            item_list = []
            if soup:
                for item in soup.findAll('param', {'name': 'FlashVars'}):
                    item = item.get('value')
                    item = re.search('.*?proxy\.link=(.+?)&(?:proxy|skin).*?',
                                     item)
                    if item:
                        if item not in item_list:
                            item = item.group(1)
                        else:
                            item = ''
                    else:
                        item = ''
                    xbmcdict = XBMCDict(0).update(params)
                    if item:
                        _dict = xbmcdict.copy()
                        _dict['url'] = item
                        item_list.extend([_dict])
                item = ''
                for item in soup.findAll('video'):
                    for source in soup.findAll('source'):
                        src = source.get('src')
                        if src:
                            xbmcdict = XBMCDict(0).update(params)
                            if item and ('..' not in src):
                                _dict = xbmcdict.copy()
                                try:
                                    _dict['src_title'] = source.get(
                                        'data-res') + 'p'
                                except:
                                    pass
                                _dict['url'] = src
                                item_list.extend([_dict])
                    try:
                        src = item.get('src')
                        if src:
                            xbmcdict = XBMCDict(0).update(params)
                            if item and ('..' not in src):
                                _dict = xbmcdict.copy()
                                try:
                                    _dict['src_title'] = source.get(
                                        'data-res') + 'p'
                                except:
                                    pass
                                _dict['url'] = src
                                item_list.extend([_dict])
                    except:
                        pass
                for script in soup.findAll('script'):
                    item = ''
                    if script.get('src'):
                        if 'http://videomega.tv/validatehash.php' in script[
                                'src']:
                            item = script['src']
                        elif 'ref=' in script.get('src'):
                            temp = re.search('.*ref=[\'"](.+?)[\'"]',
                                             script.get('src'))
                            if temp:
                                item = 'http://videomega.tv/iframe.php?ref=' + temp.group(
                                    1)
                        xbmcdict = XBMCDict(0).update(params)
                        if item:
                            _dict = xbmcdict.copy()
                            _dict['url'] = item
                            item_list.extend([_dict])
                for iframe in soup.findAll('iframe'):
                    item = ''
                    if iframe.get('src'):
                        if 'http://videomega.tv/validatehash.php' in iframe[
                                'src']:
                            item = iframe['src']
                        elif 'ref=' in iframe.get('src'):
                            temp = re.search('.*ref=[\'"](.+?)[\'"]',
                                             iframe.get('src'))
                            if temp:
                                item = 'http://videomega.tv/iframe.php?ref=' + temp.group(
                                    1)
                        else:
                            item = iframe.get('src')
                        xbmcdict = XBMCDict(0).update(params)
                        if item:
                            _dict = xbmcdict.copy()
                            _dict['url'] = item
                            item_list.extend([_dict])

            if item_list:
                from playback import Playback
                Playback().choose_sources(item_list)
            else:
                a.alert(a.language(30904, True), sound=False)
示例#19
0
 def __init__(self):
     self.ad = Addon()
     self.create_dirs()
     self.copy_db()
示例#20
0
 def __init__(self):
     self.cpu = CPU()
     self.memory = Memory()
     self.disk = Disk()
     self.network = Network()
     self.addon = Addon()
示例#21
0
nextButtonState = None

globals.init()
initConfigValues()

conf = configparser.ConfigParser()
conf.read('TS5AddonInstaller.ini')

# fetch possible addons
config = configparser.ConfigParser()
try:  # try custom lookup server from config file
    configData = requests.get(str(conf['config']['url'])).text
except Exception:  # fallback to default lookup server
    configData = requests.get(
        "https://julianimhof.de/files/TS5Addons/experimental/addons.ini").text
config.read_file(io.StringIO(configData))

MAXPAGES = math.ceil((len(config.sections()) - 1) / ITEMSPERPAGE)

addons = []
for addon in [x for x in config.sections() if x != "general"]:
    addons.append(Addon(addon, config[addon]))

app = Application(master=root)
globals.app = app
globals.showPage = lambda: showPage(currpage)

showPage(0)

app.mainloop()
示例#22
0
    def __init__(self, params):
        import re
        from addon import Addon
        from addondict import AddonDict as XBMCDict
        from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment

        a = Addon()
        site = self.__module__
        mode = params['mode']

        home_url = 'http://www.freeomovie.com/'
        movies_url = home_url + 'category/full-movie/'
        scenes_url = home_url + 'category/clips/'
        search_url = home_url + '/?s='
        false_positives = [
            'http://www.freeomovie.com/category/full-movie/',
            'http://www.freeomovie.com/category/clips/'
        ]

        if mode == 'main':
            item_list = [{
                'site': site,
                'mode': 'list',
                'title': a.language(30006),
                'content': '',
                'url': home_url,
                'cover_url': a.image('all.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30001),
                'content': '',
                'url': movies_url,
                'cover_url': a.image('movies.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30002),
                'content': '',
                'url': scenes_url,
                'cover_url': a.image('scenes.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'categories',
                'title': a.language(30005),
                'content': '',
                'url': home_url,
                'cover_url': a.image('categories.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30004),
                'content': 'search',
                'url': search_url,
                'cover_url': a.image('search.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }]
            item_list.extend(a.favs_hist_menu(site))
            item_list.extend(a.extended_menu())
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'categories':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div',
                                     {'class': 'multi-column-taxonomy-list'}))
            item_list = []
            if soup:
                for item in soup.findAll('a'):
                    if item:
                        if item.get('href') not in false_positives:
                            item_list.extend([{
                                'site':
                                site,
                                'mode':
                                'list',
                                'url':
                                item.get('href'),
                                'content':
                                '',
                                'title':
                                item.string.encode('UTF-8'),
                                'cover_url':
                                a.image(image, image),
                                'backdrop_url':
                                a.art(),
                                'type':
                                3
                            }])

            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'list':
            if params.get('content', '') == 'search':
                item = a.search_input()
                if item:
                    params['url'] = search_url + item
                else:
                    exit(1)
            elif params.get('content', '') == 'goto':
                last_item = re.search('/page/([0-9]+)/', params['url'])
                if last_item:
                    last_item = int(last_item.group(1))
                else:
                    last_item = 10000
                item = a.page_input(last_item)
                if item:
                    params['url'] = re.sub('/page/[0-9]+/',
                                           '/page/' + str(item) + '/',
                                           params['url'])
                else:
                    exit(1)
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div', {'id': 'content'}))
            item_list = []
            params['mode'] = 'play'
            params['content'] = 'movies'
            params['type'] = 0
            params['context'] = 0
            params['duration'] = '7200'
            if soup:
                xbmcdict = XBMCDict(0).update(params)
                for item in soup.findAll('div', {'class': 'postbox'}):
                    if item:
                        if item.h2.a.get('href') not in false_positives:
                            _dict = xbmcdict.copy()
                            if scenes_url in params['url']:
                                _dict['duration'] = '1500'
                                _dict['content'] = 'episodes'
                            _dict['url'] = item.h2.a.get('href')
                            _dict['title'] = item.h2.a.get('title').encode(
                                'UTF-8')
                            _dict['tvshowtitle'] = _dict['title']
                            _dict['originaltitle'] = _dict['title']
                            _dict['cover_url'] = a.image(item.img.get('src'))
                            _dict['thumb_url'] = _dict['cover_url']
                            _dict['poster'] = _dict['cover_url']
                            _dict['sub_site'] = site
                            item_list.extend([_dict])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div', {'class': 'wp-pagenavi'}))
            last_item = False
            if soup:
                for item in soup.findAll('a', href=True):
                    if item:
                        if item.get('class') == 'previouspostslink':
                            item_list.extend([{
                                'site':
                                site,
                                'mode':
                                'list',
                                'url':
                                item.get('href'),
                                'content':
                                params['content'],
                                'title':
                                a.language(30017, True),
                                'cover_url':
                                a.image('previous.png', image),
                                'backdrop_url':
                                a.art(),
                                'type':
                                3
                            }])
                        if item.get('class') == 'nextpostslink':
                            item_list.extend([{
                                'site':
                                site,
                                'mode':
                                'list',
                                'url':
                                item.get('href'),
                                'content':
                                params['content'],
                                'title':
                                a.language(30018, True),
                                'cover_url':
                                a.image('next.png', image),
                                'backdrop_url':
                                a.art(),
                                'type':
                                3
                            }])
                        if item.get('class') == 'last':
                            last_item = item.get('href')
                if not last_item:
                    try:
                        if not soup.find('a', {'class': 'nextpostslink'}):
                            last_item = soup.findAll('a',
                                                     href=True)[-1].get('href')
                        else:
                            last_item = soup.findAll('a',
                                                     href=True)[-2].get('href')
                    except:
                        pass
                if last_item:
                    item_list.extend([{
                        'site': site,
                        'mode': 'list',
                        'url': last_item,
                        'content': 'goto',
                        'title': a.language(30019, True),
                        'cover_url': a.image('goto.png', image),
                        'backdrop_url': a.art(),
                        'type': 3
                    }])

            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'play':
            html = a.get_page(params['url'])
            item_list = []
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div', {'class': 'videosection'}))
            if soup:
                xbmcdict = XBMCDict(0).update(params)
                pages = soup.findAll('li', {'class': re.compile('pg.')})
                if pages:
                    old_li = pages[0].get('class')
                    _dict = xbmcdict.copy()
                    _dict['multi-part'] = True
                    parts = []
                    for li in pages:
                        if old_li != li.get('class'):
                            _dict['parts'] = parts
                            item_list.extend([_dict])
                            _dict = xbmcdict.copy()
                            _dict['multi-part'] = True
                            old_li = li.get('class')
                            parts = []
                        url = re.search('.+myurl=(.+)', li.a.get('href'),
                                        re.IGNORECASE)
                        if url:
                            url = url.group(1)
                            parts.extend([url])
                    if parts:
                        _dict['parts'] = parts
                        item_list.extend([_dict])
                alink = soup.find('a', {'target': '_blank'})
                if alink:
                    alink = alink.get('href')
                    if 'main.exoclick.com' not in alink:
                        _dict = xbmcdict.copy()
                        _dict['url'] = alink
                        item_list.extend([_dict])
                iframes = soup.findAll('iframe', {'src': True})
                if iframes:
                    for iframe in iframes:
                        iframe = iframe.get('src')
                        if 'main.exoclick.com' not in iframe:
                            _dict = xbmcdict.copy()
                            _dict['url'] = iframe
                            item_list.extend([_dict])
                if not item_list:
                    soup = BeautifulSoup(html,
                                         parseOnlyThese=SoupStrainer(
                                             'ul', {'id': 'countrytabs'}))
                    if soup:
                        xbmcdict = XBMCDict(0).update(params)
                        for index, items in enumerate(
                                soup.findAll('a', href=True)):
                            item = ''
                            if not items.get('id') == 'jpg':
                                item = items.get('href')
                                item = re.search('.*myURL\[\]=(.+)$', item,
                                                 re.DOTALL)
                                if item:
                                    item = re.sub('&tab=[0-9]+', '',
                                                  item.group(1))
                                if item:
                                    _dict = xbmcdict.copy()
                                    _dict['url'] = item
                                    _dict['count'] = index
                                    item_list.extend([_dict])
            if item_list:
                from playback import Playback
                Playback().choose_sources(item_list)
            else:
                a.alert(a.language(30904, True), sound=False)
示例#23
0
    def __init__(self, params):
        import re
        from addon import Addon
        from addondict import AddonDict as XBMCDict
        from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment

        a = Addon()
        site = self.__module__
        mode = params['mode']

        base_url = 'http://yespornplease.com'
        home_url = base_url + '/index.php'
        popular_url = base_url + '/index.php?p=1&m=today'
        search_url = base_url + '/search.php?q='
        false_positives = ['']

        if mode == 'main':
            item_list = [{
                'site': site,
                'mode': 'list',
                'title': a.language(30006),
                'content': '',
                'url': home_url,
                'cover_url': a.image('all.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30016),
                'content': '',
                'url': popular_url,
                'cover_url': a.image('popular.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'categories',
                'title': a.language(30005),
                'content': '',
                'url': home_url,
                'cover_url': a.image('categories.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30004),
                'content': 'search',
                'url': search_url,
                'cover_url': a.image('search.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }]
            item_list.extend(a.favs_hist_menu(site))
            item_list.extend(a.extended_menu())
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'categories':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div', {'id': 'categories'}))
            item_list = []
            if soup:
                for item in soup.findAll('a'):
                    if item:
                        item_list.extend([{
                            'site':
                            site,
                            'mode':
                            'list',
                            'url':
                            item.get('href').replace(' ', '+'),
                            'content':
                            '',
                            'title':
                            item.string.encode('UTF-8'),
                            'cover_url':
                            a.image(image, image),
                            'backdrop_url':
                            a.art(),
                            'type':
                            3
                        }])
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'list':
            if params.get('content', '') == 'search':
                item = a.search_input()
                if item:
                    params['url'] = search_url + item.replace(' ', '+')
                else:
                    exit(1)
            elif params.get('content', '') == 'goto':
                last_item = re.search('p=([0-9]+)', params['url'])
                if last_item:
                    last_item = int(last_item.group(1))
                else:
                    last_item = 10000
                item = a.page_input(last_item)
                if item:
                    params['url'] = re.sub('p=[0-9]+', 'p=' + str(item),
                                           params['url']).replace(' ', '+')
                else:
                    exit(1)
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div', {'id': 'videos'}))
            item_list = []
            params['mode'] = 'play'
            params['content'] = 'movies'
            params['type'] = 0
            params['context'] = 0
            params['duration'] = '7200'
            if soup:
                xbmcdict = XBMCDict(0).update(params)
                for item in soup.findAll('div', {'class': 'video-preview'}):
                    if item:
                        _dict = xbmcdict.copy()
                        temp = item.find('div', {'class': 'jcarousel'}).a
                        if temp:
                            temp = temp.get('href')
                            if not temp.startswith('http://'):
                                temp = base_url + temp
                            _dict['url'] = temp
                            _dict['title'] = item.find('div', {
                                'class': 'preview-title'
                            }).get('title').encode('UTF-8')
                            _dict['tvshowtitle'] = _dict['title']
                            _dict['originaltitle'] = _dict['title']
                            temp = item.find('div', {
                                'class': 'jcarousel'
                            }).img.get('src')
                            if temp.startswith('//'): temp = 'http:' + temp
                            _dict['cover_url'] = a.image(temp)
                            _dict['thumb_url'] = _dict['cover_url']
                            _dict['poster'] = _dict['cover_url']
                            temp = item.find('div', {
                                'class': 'preview-info-box length'
                            }).b.string
                            if temp:
                                temp = re.search('([0-9]+):([0-9]+):([0-9]+)',
                                                 temp)
                                _dict['duration'] = str(
                                    (int(temp.group(1)) * 60 * 60) +
                                    (int(temp.group(2)) * 60) +
                                    int(temp.group(3)))
                            _dict['sub_site'] = site

                            item_list.extend([_dict])

                soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer('body'))
                if soup.find('a', {'id': 'prev-page'}):
                    item = soup.find('a', {
                        'id': 'prev-page'
                    }).get('href').replace(' ', '+')
                    if not item.startswith('http://'): item = base_url + item
                    if 'index.php' in params['url']:
                        item = item.replace('search.php', 'index.php')
                    item_list.extend([{
                        'site':
                        site,
                        'mode':
                        'list',
                        'url':
                        item,
                        'content':
                        params['content'],
                        'title':
                        a.language(30017, True),
                        'cover_url':
                        a.image('previous.png', image),
                        'backdrop_url':
                        a.art(),
                        'type':
                        3
                    }])
                if soup.find('a', {'id': 'next-page'}):
                    item = soup.find('a', {
                        'id': 'next-page'
                    }).get('href').replace(' ', '+')
                    if 'index.php' in params['url']:
                        item = item.replace('search.php', 'index.php')
                    if not item.startswith('http://'): item = base_url + item
                    item_list.extend([{
                        'site': site,
                        'mode': 'list',
                        'url': item,
                        'content': params['content'],
                        'title': a.language(30018, True),
                        'cover_url': a.image('next.png', image),
                        'backdrop_url': a.art(),
                        'type': 3
                    }])

                soup = BeautifulSoup(html,
                                     parseOnlyThese=SoupStrainer(
                                         'div', {'id': 'pagination'}))
                last_item = False
                if soup:
                    for item in reversed(soup.findAll('a')):
                        last_item = item.get('href')
                        if not last_item.startswith('http://'):
                            last_item = base_url + last_item
                        break
                if last_item:
                    item_list.extend([{
                        'site': site,
                        'mode': 'list',
                        'url': last_item,
                        'content': 'goto',
                        'title': a.language(30019, True),
                        'cover_url': a.image('goto.png', image),
                        'backdrop_url': a.art(),
                        'type': 3
                    }])

            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'play':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'object', {'id': 'videoContainer'}))
            item_list = []
            if soup:
                item = soup.find('param', {'name': 'flashvars'})
                item = re.search('.*?video_url=(.+?)&.*?', str(item))
                if item: item = item.group(1)
                xbmcdict = XBMCDict(0).update(params)
                if item:
                    _dict = xbmcdict.copy()
                    _dict['url'] = item
                    item_list.extend([_dict])
                else:
                    a.alert(a.language(30904, True), sound=False)
            if item_list:
                from playback import Playback
                Playback().choose_sources(item_list)
            else:
                a.alert(a.language(30904, True), sound=False)
示例#24
0
    def __init__(self, params):
        site = self.__module__
        addon = Addon()
        common = addon.common
        mode = params['mode']
        common.update_favorites_db()

        if mode == 'main':
            __item_list_ = [{'site': site, 'mode': 'list_favorites', 'title': addon.language(30884, True), 'content': 'all',
                             'sub_site': params['sub_site'], 'cover_url': addon.image('all.png'), 'backdrop_url': addon.art(), 'type': 3},
                            {'site': site, 'mode': 'list_favorites', 'title': addon.language(30885, True), 'content': 'movies',
                             'sub_site': params['sub_site'], 'cover_url': addon.image('movies.png'), 'backdrop_url': addon.art(), 'type': 3},
                            {'site': site, 'mode': 'list_favorites', 'title': addon.language(30886, True), 'content': 'tvshows',
                             'sub_site': params['sub_site'], 'cover_url': addon.image('tvshows.png'), 'backdrop_url': addon.art(), 'type': 3},
                            {'site': site, 'mode': 'list_favorites', 'title': addon.language(30888, True), 'content': 'episodes',
                             'sub_site': params['sub_site'], 'cover_url': addon.image('scenes.png'), 'backdrop_url': addon.art(), 'type': 3}]

            addon.add_items(__item_list_)
            addon.end_of_directory()

        elif mode == 'add_favorite':
            params = AddonDict(common.addon_type()).str_update(params['__params_'])
            execute = 'INSERT INTO ' + common.fav_db_table + ' (sub_site, content, url, __params_) VALUES (?, ?, ?, ?)'
            inserted = common.db.execute(execute, (params['sub_site'], params['content'], params['url'], str(params)))
            if common.to_bool(inserted):
                if inserted == 1:
                    addon.alert(str(addon.language(30891, True) + ' ' + params['title'].decode('ascii', 'ignore') + ' ' + addon.language(30893, True)))
                if inserted == 2:
                    addon.alert(str(params['title'].decode('ascii', 'ignore') + ' ' + addon.language(30890, True)))

        elif mode == 'delete_favorite':
            params = AddonDict(common.addon_type()).str_update(params['__params_'])
            execute = 'DELETE FROM ' + common.fav_db_table + ' WHERE sub_site=? AND content=? AND url=?'
            deleted = common.db.execute(execute, (params['sub_site'], params['content'], params['url']))
            if common.to_bool(deleted):
                addon.alert(str(addon.language(30892, True) + ' ' + params['title'].decode('ascii', 'ignore') + ' ' + addon.language(30894, True)))
                xbmc.executebuiltin('Container.Refresh')

        elif mode == 'list_favorites':
            if params['content'] == 'all':
                sql_params = (params['sub_site'],)
                execute = 'SELECT * FROM ' + common.fav_db_table + ' WHERE sub_site=?'
            else:
                sql_params = (params['sub_site'], params['content'])
                execute = 'SELECT * FROM ' + common.fav_db_table + ' WHERE sub_site=? AND content=?'
            selected = common.db.fetchall(execute, sql_params)
            item_list = []
            if selected:
                for this_id, site, content, url, params in selected:
                    params = AddonDict(common.addon_type()).str_update(params)
                    params['context'] = 4
                    item_list.extend([params])
            if item_list:
                addon.add_items(item_list)
            addon.end_of_directory()

        elif mode == 'clear_favorites':
            """
            Prompt user for confirmation prior to clearing all favorites / removing favorites table
            """
            if not params['sub_site']:
                execute = 'DROP TABLE ' + common.fav_db_table
                sql_params = ''
            else:
                execute = 'DELETE FROM ' + common.fav_db_table + ' WHERE sub_site=?'
                sql_params = (params['sub_site'],)
            clear_favs = xbmcgui.Dialog().yesno(
                common.addon_name + ' - ' + addon.language(30895, True), ' ', addon.language(30896, True),
                nolabel=addon.language(30899, True), yeslabel=addon.language(30898, True))
            if common.to_bool(clear_favs):
                cleared = common.db.execute(execute, sql_params)
                if common.to_bool(cleared):
                    common.db.execute('VACUUM ' + common.fav_db_table)
                    addon.alert(str(addon.language(30897, True)))
                    xbmc.executebuiltin('Container.Refresh')
示例#25
0
'''
    Ice Channel
'''
import os
import sys
from addon import Addon

addon_id = 'script.icechannel'

try:
    addon = Addon(addon_id, sys.argv)
except:
    addon = Addon(addon_id)

addon_path = addon.get_path()
addon_version = addon.get_version()

lib_path = os.path.join(addon_path, 'lib', 'entertainment')
plugins_path = os.path.join(lib_path, 'plugins')
settings_file = os.path.join(addon_path, 'resources', 'settings.xml')

profile_path = addon.get_profile()

theme_name = addon.get_setting('theme')
theme_type = addon.get_setting(theme_name + '_themetype')
if theme_type == 'online':
    icon_path = addon.get_setting(theme_name + '_themeurl')
else:
    theme_addon = Addon(addon.get_setting(theme_name + '_themeaddon'))
    icon_path = os.path.join(theme_addon.get_path(), 'theme')
示例#26
0
import cloudflare
import jsunpack
import net
import resolveurl
import xbmc
import xbmcaddon
import xbmcgui
import xbmcplugin
from addon import Addon

net = net.Net()
addon_id = 'plugin.video.anime69'
selfAddon = xbmcaddon.Addon(id=addon_id)
datapath = xbmc.translatePath(selfAddon.getAddonInfo('profile'))
addon = Addon(addon_id, sys.argv)
fanart = xbmc.translatePath(
    os.path.join('special://home/addons/' + addon_id, 'fanart.jpg'))
icon = xbmc.translatePath(
    os.path.join('special://home/addons/' + addon_id, 'icon.png'))
superc = xbmc.translatePath(
    os.path.join('special://home/addons/' + addon_id, 'superc.png'))
animexd = xbmc.translatePath(
    os.path.join('special://home/addons/' + addon_id, 'animexd.png'))
try:
    os.mkdir(datapath)
except:
    pass
file_var = open(xbmc.translatePath(os.path.join(datapath, 'cookie.lwp')), "a")
cookie_file = os.path.join(os.path.join(datapath, ''), 'cookie.lwp')
示例#27
0
    def __init__(self, params):
        import re
        from addon import Addon
        from addondict import AddonDict as XBMCDict
        from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment

        a = Addon()
        site = self.__module__
        mode = params['mode']

        home_url = 'http://xtheatre.net/'
        search_url = home_url + '?s='
        false_positives = [
            'http://watchxxxhd.net/watch-full-movies-hd/',
            'http://watchxxxhd.net', 'http://watchxxxhd.net/category/movies/',
            'http://watchxxxhd.net/category/ategorized222/',
            'http://watchxxxhd.net/watch-full-movies-hd/'
        ]

        if mode == 'main':
            item_list = [{
                'site': site,
                'mode': 'list',
                'title': a.language(30006),
                'content': '',
                'url': home_url + '?filtre=date&cat=0',
                'cover_url': a.image('all.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'categories',
                'title': a.language(30005),
                'content': '',
                'url': home_url + 'categories/',
                'cover_url': a.image('categories.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30004),
                'content': 'search',
                'url': search_url,
                'cover_url': a.image('search.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }]
            item_list.extend(a.favs_hist_menu(site))
            item_list.extend(a.extended_menu())
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'categories':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'ul', {'class': 'listing-cat'}))
            item_list = []
            if soup:
                for item in soup.findAll('li'):
                    if item:
                        if item.a.get('href') not in false_positives:
                            try:
                                vidcount = item.findAll(
                                    'span',
                                    {'class': 'nb_cat border-radius-5'
                                     })[0].string.encode('UTF-8')
                                vidcount = re.sub('\svideo[s]*', '', vidcount)
                            except:
                                vidcount = '0'
                            if vidcount and vidcount != '0':
                                img = item.find('img')
                                if img:
                                    try:
                                        img = img.get('data-lazy-src')
                                    except:
                                        try:
                                            img = img.get('src')
                                        except:
                                            img = ''
                                if not img:
                                    img = ''
                                title = item.a.get('title').encode(
                                    'UTF-8') + ' (%s)' % vidcount
                                item_list.extend([{
                                    'site':
                                    site,
                                    'mode':
                                    'list',
                                    'url':
                                    item.a.get('href'),
                                    'content':
                                    '',
                                    'title':
                                    title,
                                    'cover_url':
                                    a.image(img, image),
                                    'backdrop_url':
                                    a.art(),
                                    'type':
                                    3
                                }])

            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'list':
            if params.get('content', '') == 'search':
                item = a.search_input()
                if item:
                    params['url'] = search_url + item
                else:
                    exit(1)
            elif params.get('content', '') == 'goto':
                last_item = re.search('/page/([0-9]+)/', params['url'])
                if last_item:
                    last_item = int(last_item.group(1))
                else:
                    last_item = 10000
                item = a.page_input(last_item)
                if item:
                    params['url'] = re.sub('/page/[0-9]+/',
                                           '/page/' + str(item) + '/',
                                           params['url'])
                else:
                    exit(1)
            html = a.get_page(params['url'])
            soup = BeautifulSoup(
                html,
                parseOnlyThese=SoupStrainer(
                    'ul', {'class': 'listing-videos listing-extract'}))
            item_list = []
            params['mode'] = 'play'
            params['content'] = 'movies'
            params['type'] = 0
            params['context'] = 0
            params['duration'] = '7200'
            if soup:
                xbmcdict = XBMCDict(0).update(params)
                for item in soup.findAll(
                        'li', {'class': 'border-radius-5 box-shadow'}):
                    if item:
                        if item.a.get('href') not in false_positives:
                            _dict = xbmcdict.copy()
                            _dict['url'] = item.a.get('href')
                            _dict['title'] = item.a.get('title').encode(
                                'UTF-8')
                            _dict['tvshowtitle'] = _dict['title']
                            _dict['originaltitle'] = _dict['title']
                            img = item.find('img')
                            if img:
                                try:
                                    img = img.get('data-lazy-src')
                                except:
                                    try:
                                        img = img.get('src')
                                    except:
                                        img = ''
                            if not img:
                                img = ''
                            _dict['cover_url'] = a.image(img)
                            _dict['thumb_url'] = _dict['cover_url']
                            _dict['poster'] = _dict['cover_url']
                            _dict['sub_site'] = site
                            plot = item.find('div', {'class': 'right'})
                            if plot:
                                plot = plot.p.contents[0].encode('utf-8')
                                _dict['plot'] = plot
                                _dict['plotoutline'] = plot
                            item_list.extend([_dict])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div', {'class': 'pagination'}))
            last_item = False
            if soup:
                for item in soup.findAll('a'):
                    if (item.string.encode('UTF-8')
                            == 'Last »') or (item.get('class') == 'last'):
                        last_item = item.get('href')
                        break
                if last_item is False:
                    for last_item in soup.findAll('a', {'class': 'inactive'}):
                        pass
                    if last_item: last_item = last_item.get('href')
                item = soup.find('span', {'class': 'current'})
                if item:
                    if item.parent:
                        item = item.parent
                        if item.previousSibling:
                            if item.previousSibling.find('a'):
                                item_list.extend([{
                                    'site':
                                    site,
                                    'mode':
                                    'list',
                                    'url':
                                    item.previousSibling.a.get('href'),
                                    'content':
                                    params['content'],
                                    'title':
                                    a.language(30017, True),
                                    'cover_url':
                                    a.image('previous.png', image),
                                    'backdrop_url':
                                    a.art(),
                                    'type':
                                    3
                                }])
                        if item.nextSibling:
                            if item.nextSibling.find('a'):
                                item_list.extend([{
                                    'site':
                                    site,
                                    'mode':
                                    'list',
                                    'url':
                                    item.nextSibling.a.get('href'),
                                    'content':
                                    params['content'],
                                    'title':
                                    a.language(30018, True),
                                    'cover_url':
                                    a.image('next.png', image),
                                    'backdrop_url':
                                    a.art(),
                                    'type':
                                    3
                                }])
            if last_item:
                item_list.extend([{
                    'site': site,
                    'mode': 'list',
                    'url': last_item,
                    'content': 'goto',
                    'title': a.language(30019, True),
                    'cover_url': a.image('goto.png', image),
                    'backdrop_url': a.art(),
                    'type': 3
                }])

            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'play':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div', {'class': 'video-embed'}))
            item_list = []
            if soup:
                for script in soup.findAll(re.compile('s_*c_*r_*i_*p_*t')):
                    item = ''
                    if script.get('src'):
                        if 'http://videomega.tv/validatehash.php' in script[
                                'src']:
                            item = script['src']
                        elif 'ref=' in script.get('src'):
                            temp = re.search('.*ref=[\'"](.+?)[\'"]',
                                             script.get('src'))
                            if temp:
                                item = 'http://videomega.tv/iframe.php?ref=' + temp.group(
                                    1)
                        xbmcdict = XBMCDict(0).update(params)
                        if item:
                            _dict = xbmcdict.copy()
                            _dict['url'] = item
                            item_list.extend([_dict])
                if soup.find('iframe', src=True):
                    item = ''
                    for iframe in soup.findAll('iframe', src=True):
                        if iframe.get('data-lazy-src'):
                            item = iframe.get('data-lazy-src')
                            r = re.search('.+old=(.+)$', item)
                            if r:
                                item = r.group(1)
                        else:
                            item = iframe.get('src').replace('\\', '')
                        xbmcdict = XBMCDict(0).update(params)
                        if item:
                            _dict = xbmcdict.copy()
                            _dict['url'] = item
                            item_list.extend([_dict])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div', {'id': 'video-infos'}))
            if soup:
                item = ''
                for p in soup.findAll('p'):
                    if p.iframe:
                        item = p.iframe.get('src')
                        xbmcdict = XBMCDict(0).update(params)
                        if item:
                            _dict = xbmcdict.copy()
                            _dict['url'] = item
                            item_list.extend([_dict])
            if item_list:
                from playback import Playback
                Playback().choose_sources(item_list)
            else:
                a.alert(a.language(30904, True), sound=False)
示例#28
0
    def __init__(self, params):
        import re
        from addon import Addon
        from addondict import AddonDict as XBMCDict
        from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment

        a = Addon()
        site = self.__module__
        mode = params['mode']

        home_url = 'http://playporn.to/'
        search_url = home_url + '?submit=Search&s='
        movies_url = home_url + 'category/xxx-movie-stream/'
        scenes_url = home_url + 'category/xxx-clips-scenes-stream/'
        false_positives = ['http://playporn.to/deutsche-milfs-anonym-sex/']

        if mode == 'main':
            item_list = [{
                'site': site,
                'mode': 'list',
                'title': a.language(30003),
                'content': '',
                'url': home_url,
                'cover_url': a.image('recent.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'sub',
                'title': a.language(30001),
                'content': '',
                'url': movies_url,
                'cover_url': a.image('movies.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'sub',
                'title': a.language(30002),
                'content': '',
                'url': scenes_url,
                'cover_url': a.image('scenes.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30004),
                'content': 'search',
                'url': search_url,
                'cover_url': a.image('search.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }]
            item_list.extend(a.favs_hist_menu(site))
            item_list.extend(a.extended_menu())
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'sub':
            item_list = [{
                'site': site,
                'mode': 'list',
                'title': a.language(30006),
                'content': '',
                'url': params['url'],
                'cover_url': a.image('all.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'category',
                'title': a.language(30005),
                'content': '',
                'url': home_url,
                'cover_url': a.image('categories.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }]
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'category':
            index = 1
            if 'scenes' in params['url'].lower(): index = 2
            html = a.get_page(home_url)
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer('ul', 'nav fl'))
            item_list = []
            for item in soup.findAll('ul')[index].findAll({'a': True}):
                item_list.extend([{
                    'site': 'playporn',
                    'mode': 'list',
                    'url': item.get('href'),
                    'content': '',
                    'title': item.contents[0].encode('UTF-8'),
                    'cover_url': a.image(image, image),
                    'backdrop_url': a.art(),
                    'type': 3
                }])
            if item_list:
                a.add_items(item_list)
                a.end_of_directory()

        elif mode == 'list':
            if params.get('content', '') == 'search':
                item = a.search_input()
                if item:
                    params['url'] = search_url + item
                else:
                    exit(1)
            elif params.get('content', '') == 'goto':
                last_item = re.search('/page/([0-9]+)/', params['url'])
                if last_item:
                    last_item = int(last_item.group(1))
                else:
                    last_item = 10000
                item = a.page_input(last_item)
                if item:
                    params['url'] = re.sub('/page/[0-9]+/',
                                           '/page/' + str(item) + '/',
                                           params['url'])
                else:
                    exit(1)
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer('body'))
            item_list = []
            params['mode'] = 'play'
            params['content'] = 'movies'
            params['type'] = 0
            params['context'] = 0
            params['duration'] = '7200'
            xbmcdict = XBMCDict(0).update(params)
            for item in soup.findAll('div', 'photo-thumb-image'):
                if not item.a.get('href') in false_positives:
                    _dict = xbmcdict.copy()
                    if 'scenes' in params['url']:
                        _dict['duration'] = '2700'
                        _dict['content'] = 'episodes'
                    _dict['url'] = item.a.get('href')
                    _dict['title'] = item.a.get('title').encode('UTF-8')
                    _dict['tvshowtitle'] = _dict['title']
                    _dict['originaltitle'] = _dict['title']
                    _dict['cover_url'] = a.image(item.img.get('src'))
                    _dict['thumb_url'] = _dict['cover_url']
                    _dict['poster'] = _dict['cover_url']
                    _dict['sub_site'] = site

                    item_list.extend([_dict])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div', 'more_entries'))
            if soup:
                item = soup.find('a', 'previouspostslink')
                if item:
                    item_list.extend([{
                        'site':
                        site,
                        'mode':
                        'list',
                        'url':
                        item.get('href'),
                        'content':
                        params['content'],
                        'title':
                        a.language(30017, True),
                        'cover_url':
                        a.image('previous.png', image),
                        'backdrop_url':
                        a.art(),
                        'type':
                        3
                    }])
                item = soup.find('a', 'nextpostslink')
                if item:
                    item_list.extend([{
                        'site': site,
                        'mode': 'list',
                        'url': item.get('href'),
                        'content': params['content'],
                        'title': a.language(30018, True),
                        'cover_url': a.image('next.png', image),
                        'backdrop_url': a.art(),
                        'type': 3
                    }])
                item = soup.find('a', 'last')
                if item:
                    item_list.extend([{
                        'site': site,
                        'mode': 'list',
                        'url': item.get('href'),
                        'content': 'goto',
                        'title': a.language(30019, True),
                        'cover_url': a.image('goto.png', image),
                        'backdrop_url': a.art(),
                        'type': 3
                    }])
            if item_list:
                a.add_items(item_list)
                a.end_of_directory()

        elif mode == 'play':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div', {'id': 'loopedSlider'}))
            soup = soup.find(text=lambda text: isinstance(text, Comment))
            if soup:
                soup = re.sub('&lt;', '<', soup.encode('utf-8'))
                soup = re.sub('&gt;', '>', soup)
                soup = BeautifulSoup(soup,
                                     parseOnlyThese=SoupStrainer(
                                         'div', 'video'))
                if soup:
                    item_list = []
                    xbmcdict = XBMCDict(0).update(params)
                    for item in soup.findAll('iframe'):
                        _dict = xbmcdict.copy()
                        _dict['url'] = item.get('src').replace(
                            'http://playporn.to/stream/all/?file=',
                            '').encode('UTF-8')
                        if 'flashx.tv' in _dict['url'].lower():
                            item = re.search('hash=(.+?)&', _dict['url'])
                            if item:
                                _dict[
                                    'url'] = 'http://flashx.tv/video/' + item.group(
                                        1) + '/'
                        elif 'played.to' in _dict['url'].lower():
                            item = re.search('embed-([a-zA-Z0-9]+?)-.+?html',
                                             _dict['url'])
                            if item:
                                _dict[
                                    'url'] = 'http://played.to/' + item.group(
                                        1)
                        item_list.extend([_dict])
                    if item_list:
                        from playback import Playback
                        Playback().choose_sources(item_list)
                    else:
                        a.alert(a.language(30904, True), sound=False)
                else:
                    a.alert(a.language(30904, True), sound=False)
            else:
                a.alert(a.language(30904, True), sound=False)
示例#29
0
    def __init__(self, params):
        import re
        import urllib2
        from addon import Addon
        from addondict import AddonDict
        from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment

        a = Addon()
        site = self.__module__
        mode = params['mode']

        base_url = 'http://urbanhentai.com'
        home_url = base_url
        search_url = base_url + '/?s='
        false_positives = ['#']

        if mode == 'main':
            item_list = [{
                'site': site,
                'mode': 'list',
                'title': a.language(30006),
                'content': '',
                'url': home_url,
                'cover_url': a.image('all.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'categories',
                'title': a.language(30005),
                'content': '',
                'url': home_url,
                'cover_url': a.image('categories.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30004),
                'content': 'search',
                'url': search_url,
                'cover_url': a.image('search.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }]
            item_list.extend(a.favs_hist_menu(site))
            item_list.extend(a.extended_menu())
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'categories':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'li', {'id': 'menu-item-4538'}))
            item_list = []
            if soup:
                genre_list = soup.find('ul', {'class': 'sub-menu'})
                if genre_list:
                    for item in soup.findAll('a'):
                        if item.get('href') not in false_positives:
                            item_list.extend([{
                                'site':
                                site,
                                'mode':
                                'list',
                                'url':
                                item.get('href').replace(' ', '+'),
                                'content':
                                '',
                                'title':
                                item.string.encode('UTF-8'),
                                'cover_url':
                                a.image(image, image),
                                'backdrop_url':
                                a.art(),
                                'type':
                                3
                            }])
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'list':
            if params.get('content', '') == 'search':
                item = a.search_input()
                if item:
                    params['url'] = search_url + item.replace(' ', '+')
                else:
                    exit(1)
            elif params.get('content', '') == 'goto':
                last_item = re.search('/page/([0-9]+)/', params['url'])
                if last_item:
                    last_item = int(last_item.group(1))
                else:
                    last_item = 10000
                item = a.page_input(last_item)
                if item:
                    params['url'] = re.sub('/page/[0-9]+/',
                                           '/page/' + str(item) + '/',
                                           params['url']).replace(' ', '+')
                else:
                    exit(1)
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div',
                                     {'class': re.compile('loop-content.*')}))
            item_list = []
            params['mode'] = 'play'
            params['content'] = 'episodes'
            params['type'] = 0
            params['context'] = 0
            params['duration'] = '1500'
            params['sub_site'] = site
            if soup:
                addondict = AddonDict(0).update(params)
                for item in soup.findAll('div',
                                         {'id': re.compile('post-[0-9]+')}):
                    _dict = addondict.copy()
                    clip_link = item.find('a', {'class': 'clip-link'})
                    if clip_link:
                        url = clip_link.get('href')
                        if not url.startswith('http://'):
                            url = base_url + url
                        _dict['url'] = url
                        try:
                            _dict['title'] = clip_link.get('title').encode(
                                'UTF-8')
                        except:
                            data = item.find('h2', {'class': 'entry-title'})
                            if data:
                                _dict['title'] = str(data.a.contents[0])
                        _dict['tvshowtitle'] = _dict['title']
                        _dict['originaltitle'] = _dict['title']
                        img = item.find('img')
                        if img:
                            img = img.get('src')
                            if img.startswith('//'):
                                img = 'http:' + img
                        else:
                            img = ''
                        _dict['cover_url'] = a.image(img)
                        _dict['thumb_url'] = _dict['cover_url']
                        _dict['poster'] = _dict['cover_url']
                        item_list.extend([_dict])
                pages = BeautifulSoup(html,
                                      parseOnlyThese=SoupStrainer(
                                          'div', {'class': 'wp-pagenavi'}))
                if pages:
                    previouspage = pages.find('a',
                                              {'class': 'previouspostslink'})
                    nextpage = pages.find('a', {'class': 'nextpostslink'})
                    lastpage = pages.find('a', {'class': 'last'})

                    if previouspage:
                        previouspage = previouspage.get('href').replace(
                            ' ', '+')
                        item_list.extend([{
                            'site':
                            site,
                            'mode':
                            'list',
                            'url':
                            previouspage,
                            'content':
                            params['content'],
                            'title':
                            a.language(30017, True),
                            'cover_url':
                            a.image('previous.png', image),
                            'backdrop_url':
                            a.art(),
                            'type':
                            3
                        }])
                    if nextpage:
                        nextpage = nextpage.get('href').replace(' ', '+')
                        item_list.extend([{
                            'site':
                            site,
                            'mode':
                            'list',
                            'url':
                            nextpage,
                            'content':
                            params['content'],
                            'title':
                            a.language(30018, True),
                            'cover_url':
                            a.image('next.png', image),
                            'backdrop_url':
                            a.art(),
                            'type':
                            3
                        }])
                    if lastpage:
                        lastpage = lastpage.get('href').replace(' ', '+')
                        item_list.extend([{
                            'site':
                            site,
                            'mode':
                            'list',
                            'url':
                            lastpage,
                            'content':
                            'goto',
                            'title':
                            a.language(30019, True),
                            'cover_url':
                            a.image('goto.png', image),
                            'backdrop_url':
                            a.art(),
                            'type':
                            3
                        }])

            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'play':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div',
                                     {'class': re.compile('entry-content.*')}))
            item_list = []
            if soup:
                item = re.search('file\s*:\s*[\'"](.+?)[\'"]',
                                 str(soup.contents[0]))
                if item:
                    item = item.group(1)
                    if base_url in item:
                        try:
                            opener = urllib2.build_opener()
                            opener.addheaders = [
                                ('User-agent',
                                 'Mozilla/5.0 (Windows NT 10.0; WOW64) '
                                 'AppleWebKit/537.36 (KHTML, like Gecko) '
                                 'Chrome/45.0.2454.101 Safari/537.36')
                            ]
                            opener.addheaders = [('Referer', params['url'])]
                            opener.addheaders = [
                                ('Accept', 'text/html,application/xhtml+xml,'
                                 'application/xml;q=0.9,image/webp,*/*;q=0.8')
                            ]
                            urllib2.install_opener(opener)
                            item = urllib2.urlopen(item).geturl()
                        except urllib2.HTTPError as e:
                            if item != e.geturl():
                                item = e.geturl()
                            else:
                                item = None
                    if item:
                        addondict = AddonDict(0).update(params)
                        _dict = addondict.copy()
                        _dict['url'] = item
                        item_list.extend([_dict])
            if item_list:
                from playback import Playback
                Playback().choose_sources(item_list)
            else:
                a.alert(a.language(30904, True), sound=False)
示例#30
0
# -*- coding: utf-8 -*-
try:
    from BeautifulSoup import BeautifulSoup as bss
except:
    from bs4 import BeautifulSoup as bss
try:
    import urllib2
except:
    import urllib.request as urllib2

import urllib, client, control
import re, sys
from random import randint
from log_utils import log
from addon import Addon
addon = Addon('plugin.video.croatia_od', sys.argv)


def track_ga(sc):
    try:
        cid = get_cid()
        av = addon.get_version()
        sc = urllib.quote(sc)
        client.request(
            'http://www.google-analytics.com/collect?payload_data&v=1&tid=UA-79665842-1&cid=%s&t=screenview&an=Croatia%%20On%%20Demand&av=%s&cd=%s'
            % (cid, av, sc))
    except:
        pass


def get_cid():