Ejemplo n.º 1
0
def get_url_with_headers(url, headers):
    if 'Accept-Encoding' in headers:
        del headers['Accept-Encoding']
    if 'Host' in headers:
        del headers['Host']
    parts = url.split('|')
    url = parts[0]
    url_headers = {}
    if len(parts) > 1:
        for i in re.finditer(r'(?:&|^)([^=]+)=(.+?)(?:&|$)', parts[-1]):
            if (i.group(1) == 'Cookie') and ('Cookie' in headers):
                headers['Cookie'] += unquote_plus(i.group(2))
            else:
                url_headers.update({i.group(1): unquote_plus(i.group(2))})
    url_headers.update(headers)
    cookie_string = ''
    if 'Cookie' in url_headers:
        cookie_string = ''.join(
            c.group(1)
            for c in re.finditer(r'(?:^|\s)(.+?=.+?;)', url_headers['Cookie']))
        del url_headers['Cookie']
    net = Net()
    cookie_jar_result = net.set_cookies(COOKIE_FILE)
    for c in net._cj:
        if c.domain and (c.domain.lstrip('.') in url):
            if c.value not in cookie_string:
                cookie_string += '%s=%s;' % (c.name, c.value)
    if cookie_string:
        sep = '&' if url_headers else ''
        return url + append_headers(
            url_headers) + sep + 'Cookie=' + quote_plus(cookie_string)
    if url_headers:
        return url + append_headers(url_headers)
    else:
        return url
Ejemplo n.º 2
0
def parse_dbase_from_uri(uri):
    """A simplified version of pymongo.uri_parser.parse_uri to get the dbase.

    Returns a string representing the database provided in the URI or None if
    no database is provided in the URI.

    An invalid MongoDB connection URI may raise an InvalidURI exception,
    however, the URI is not fully parsed and some invalid URIs may not result
    in an exception.

    "mongodb://host1/database" becomes "database"

    and

    "mongodb://host1" becomes None
    """
    SCHEME = "mongodb://"

    if not uri.startswith(SCHEME):
        raise InvalidURI("Invalid URI scheme: URI "
                         "must begin with '%s'" % (SCHEME,))

    scheme_free = uri[len(SCHEME):]

    if not scheme_free:
        raise InvalidURI("Must provide at least one hostname or IP.")

    dbase = None

    # Check for unix domain sockets in the uri
    if '.sock' in scheme_free:
        host_part, _, path_part = scheme_free.rpartition('/')
        if not host_part:
            host_part = path_part
            path_part = ""
        if '/' in host_part:
            raise InvalidURI("Any '/' in a unix domain socket must be"
                             " URL encoded: %s" % host_part)
        path_part = unquote_plus(path_part)
    else:
        host_part, _, path_part = scheme_free.partition('/')

    if not path_part and '?' in host_part:
        raise InvalidURI("A '/' is required between "
                         "the host list and any options.")

    if path_part:
        if path_part[0] == '?':
            opts = path_part[1:]
        else:
            dbase, _, opts = path_part.partition('?')
            if '.' in dbase:
                dbase, _ = dbase.split('.', 1)

    if dbase is not None:
        dbase = unquote_plus(dbase)

    return dbase
Ejemplo n.º 3
0
def parse_dbase_from_uri(uri):
    """A simplified version of pymongo.uri_parser.parse_uri to get the dbase.

    Returns a string representing the database provided in the URI or None if
    no database is provided in the URI.

    An invalid MongoDB connection URI may raise an InvalidURI exception,
    however, the URI is not fully parsed and some invalid URIs may not result
    in an exception.

    "mongodb://host1/database" becomes "database"

    and

    "mongodb://host1" becomes None
    """
    SCHEME = "mongodb://"

    if not uri.startswith(SCHEME):
        raise InvalidURI("Invalid URI scheme: URI "
                         "must begin with '%s'" % (SCHEME, ))

    scheme_free = uri[len(SCHEME):]

    if not scheme_free:
        raise InvalidURI("Must provide at least one hostname or IP.")

    dbase = None

    # Check for unix domain sockets in the uri
    if '.sock' in scheme_free:
        host_part, _, path_part = scheme_free.rpartition('/')
        if not host_part:
            host_part = path_part
            path_part = ""
        if '/' in host_part:
            raise InvalidURI("Any '/' in a unix domain socket must be"
                             " URL encoded: %s" % host_part)
        path_part = unquote_plus(path_part)
    else:
        host_part, _, path_part = scheme_free.partition('/')

    if not path_part and '?' in host_part:
        raise InvalidURI("A '/' is required between "
                         "the host list and any options.")

    if path_part:
        if path_part[0] == '?':
            opts = path_part[1:]
        else:
            dbase, _, opts = path_part.partition('?')
            if '.' in dbase:
                dbase, _ = dbase.split('.', 1)

    if dbase is not None:
        dbase = unquote_plus(dbase)

    return dbase
Ejemplo n.º 4
0
 def unquote(cls, value, plus_as_space=False):
     """
     Python 2 and 3 compat layer for utf-8 unquoting
     """
     if six.PY2:
         if plus_as_space:
             return unquote_plus(value).decode("utf8")
         else:
             return unquote(value).decode("utf8")
     else:
         if plus_as_space:
             return unquote_plus(value.decode("ascii"))
         else:
             return unquote(value.decode("ascii"))
Ejemplo n.º 5
0
 def unquote(cls, value, plus_as_space=False):
     """
     Python 2 and 3 compat layer for utf-8 unquoting
     """
     if six.PY2:
         if plus_as_space:
             return unquote_plus(value).decode("utf8")
         else:
             return unquote(value).decode("utf8")
     else:
         if plus_as_space:
             return unquote_plus(value.decode("ascii"))
         else:
             return unquote(value.decode("ascii"))
Ejemplo n.º 6
0
def load_manifest_file(client, bucket, schema, versioned, ifilters, key_info):
    """Given an inventory csv file, return an iterator over keys
    """
    # To avoid thundering herd downloads, we do an immediate yield for
    # interspersed i/o
    yield None

    # Inline these values to avoid the local var lookup, they are constants
    # rKey = schema['Key'] # 1
    # rIsLatest = schema['IsLatest'] # 3
    # rVersionId = schema['VersionId'] # 2

    with tempfile.NamedTemporaryFile() as fh:
        client.download_fileobj(Bucket=bucket, Key=key_info['key'], Fileobj=fh)
        fh.seek(0)
        reader = csv.reader(gzip.GzipFile(fileobj=fh, mode='r'))
        for key_set in chunks(reader, 1000):
            keys = []
            for kr in key_set:
                k = kr[1]
                if inventory_filter(ifilters, schema, kr):
                    continue
                k = unquote_plus(k)
                if versioned:
                    if kr[3] == 'true':
                        keys.append((k, kr[2], True))
                    else:
                        keys.append((k, kr[2]))
                else:
                    keys.append(k)
            yield keys
    def get_media_url(self, host, media_id):
        web_url = self.get_url(host, media_id)
        headers = {'User-Agent': common.FF_USER_AGENT}
        html = self.net.http_GET(web_url, headers=headers).content
        stream_url = None
        try:
            stream_url = re.search(r'url\s*:\s*"(http.+?)"', html).group(1)
        except:
            pass

        if not stream_url:
            try:
                stream_url = re.search(r'unescape\(\'(http.+?)\'', html).group(1)
            except:
                pass

        if not stream_url:
            try:
                stream_url = base64.b64decode(re.search(r'atob\(\'(.+?)\'', html).group(1))
            except:
                pass

        if not stream_url:
            raise ResolverError('File not found')

        return urllib_parse.unquote_plus(stream_url) + helpers.append_headers(headers)
def clean_filename(s, minimal_change=False):
    """
    Sanitize a string to be used as a filename.

    If minimal_change is set to true, then we only strip the bare minimum of
    characters that are problematic for filesystems (namely, ':', '/' and
    '\x00', '\n').
    """

    # First, deal with URL encoded strings
    h = html_parser.HTMLParser()
    s = h.unescape(s)
    s = unquote_plus(s)

    # Strip forbidden characters
    s = (
        s.replace(':', '-')
        .replace('/', '-')
        .replace('\x00', '-')
        .replace('\n', '')
    )

    if minimal_change:
        return s

    s = s.replace('(', '').replace(')', '')
    s = s.rstrip('.')  # Remove excess of trailing dots

    s = s.strip().replace(' ', '_')
    valid_chars = '-_.()%s%s' % (string.ascii_letters, string.digits)
    return ''.join(c for c in s if c in valid_chars)
Ejemplo n.º 9
0
def main():
    params = get_params()
    url = None
    mode = None
    page = 1

    try:
        url = urllib_parse.unquote_plus(params['url'])
    except:
        pass
    try:
        mode = int(params['mode'])
    except:
        pass
    try:
        page = int(params['page'])
    except:
        pass

    if mode is None or url is None or len(url) < 1:
        CATEGORIES()

    elif mode == 1:
        xbmc.log('SORTMETHOD ' + url)
        SORTMETHOD(url)

    elif mode == 2:
        xbmc.log('VIDEOLIST ' + url)
        xbmc.log('VIDEOLIST ' + str(page))
        VIDEOLIST(url, page)

    elif mode == 3:
        xbmc.log('PLAYVIDEO ' + url)
        PLAYVIDEO(url)
Ejemplo n.º 10
0
def clean_filename(s, minimal_change=False):
    """
    Sanitize a string to be used as a filename.

    If minimal_change is set to true, then we only strip the bare minimum of
    characters that are problematic for filesystems (namely, ':', '/' and
    '\x00', '\n').
    """

    # First, deal with URL encoded strings
    h = html
    s = h.unescape(s)
    s = unquote_plus(s)

    # Strip forbidden characters
    # https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx
    s = (s.replace(':', '-').replace('/', '-').replace('<', '-').replace(
        '>',
        '-').replace('"', '-').replace('\\', '-').replace('|', '-').replace(
            '?', '-').replace('*', '-').replace('\x00',
                                                '-').replace('\n', ' '))

    # Remove trailing dots and spaces; forbidden on Windows
    s = s.rstrip(' .')

    if minimal_change:
        return s

    s = s.replace('(', '').replace(')', '')
    s = s.rstrip('.')  # Remove excess of trailing dots

    s = s.strip().replace(' ', '_')
    valid_chars = '-_.()%s%s' % (string.ascii_letters, string.digits)
    return ''.join(c for c in s if c in valid_chars)
Ejemplo n.º 11
0
def searchDir(url, mode, page=None, alphabet=None):
    if not alphabet:
        addDir('[COLOR hotpink]Add Keyword[/COLOR]', url, 'utils.newSearch', cum_image('cum-search.png'), '', mode, Folder=False)
        addDir('[COLOR hotpink]Alphabetical[/COLOR]', url, 'utils.alphabeticalSearch', cum_image('cum-search.png'), '', mode)
        if addon.getSetting('keywords_sorted') == 'true':
            addDir('[COLOR hotpink]Unsorted Keywords[/COLOR]', url, 'utils.setUnsorted', cum_image('cum-search.png'), '', mode, Folder=False)
        else:
            addDir('[COLOR hotpink]Sorted Keywords[/COLOR]', url, 'utils.setSorted', cum_image('cum-search.png'), '', mode, Folder=False)
    conn = sqlite3.connect(favoritesdb)
    c = conn.cursor()

    try:
        if alphabet:
            c.execute("SELECT * FROM keywords WHERE keyword LIKE ? ORDER BY keyword ASC", (alphabet.lower() + '%', ))
        else:
            if addon.getSetting('keywords_sorted') == 'true':
                c.execute("SELECT * FROM keywords ORDER by keyword")
            else:
                c.execute("SELECT * FROM keywords ORDER BY rowid DESC")
        for (keyword,) in c.fetchall():
            keyword = keyword if six.PY3 else keyword.encode('utf8')
            name = '[COLOR deeppink]' + urllib_parse.unquote_plus(keyword) + '[/COLOR]'
            addDir(name, url, mode, cum_image('cum-search.png'), page=page, keyword=keyword)
    except:
        pass
    conn.close()
    eod()
Ejemplo n.º 12
0
 def create_connect_args(self, url):
     opts = url.translate_connect_args(username='******')
     if 'database' in opts:
         name_spaces = [
             unquote_plus(e) for e in opts['database'].split('/')
         ]
         if len(name_spaces) == 1:
             pass
         elif len(name_spaces) == 2:
             opts['database'] = name_spaces[0]
             opts['schema'] = name_spaces[1]
         else:
             raise sa_exc.ArgumentError(
                 "Invalid name space is specified: {0}".format(
                     opts['database']))
     if '.snowflakecomputing.com' not in opts['host'] and not opts.get(
             'port'):
         opts['account'] = opts['host']
         if u'.' in opts['account']:
             # remove region subdomain
             opts['account'] = opts['account'][0:opts['account'].find(u'.')]
             # remove external ID
             opts['account'] = opts['account'].split('-')[0]
         opts['host'] = opts['host'] + '.snowflakecomputing.com'
         opts['port'] = '443'
     opts['autocommit'] = False  # autocommit is disabled by default
     opts.update(url.query)
     self._cache_column_metadata = opts.get('cache_column_metadata',
                                            "false").lower() == 'true'
     return ([], opts)
Ejemplo n.º 13
0
def load_manifest_file(client, bucket, schema, versioned, ifilters, key_info):
    """Given an inventory csv file, return an iterator over keys
    """
    # to avoid thundering herd downloads
    yield None

    # Inline these values to avoid the local var lookup, they are constants
    #rKey = schema['Key'] # 1
    #rIsLatest = schema['IsLatest'] # 3
    #rVersionId = schema['VersionId'] # 2

    with tempfile.NamedTemporaryFile() as fh:
        client.download_fileobj(Bucket=bucket, Key=key_info['key'], Fileobj=fh)
        fh.seek(0)
        reader = csv.reader(gzip.GzipFile(fileobj=fh, mode='r'))
        for key_set in chunks(reader, 1000):
            keys = []
            for kr in key_set:
                k = kr[1]
                if inventory_filter(ifilters, schema, kr):
                    continue
                if '%' in k:
                    k = unquote_plus(k)
                if versioned:
                    if kr[3] == 'true':
                        keys.append((k, kr[2], True))
                    else:
                        keys.append((k, kr[2]))
                else:
                    keys.append(k)
            yield keys
Ejemplo n.º 14
0
def Search(url):  # 26
    if url == 'new':
        keyb = xbmc.Keyboard('', Lang(32002))
        keyb.doModal()
        if keyb.isConfirmed():
            search = quote_plus(keyb.getText())
            if six.PY2:
                term = unquote_plus(search).decode('utf-8')
            else:
                term = unquote_plus(search)

            dbcon = database.connect(control.searchFile)
            dbcur = dbcon.cursor()

            dp = xbmcgui.Dialog()
            select = dp.select('Select Website', [
                '[COLORgold][B]Tenies-Online[/COLOR][/B]',
                '[COLORgold][B]Gamato-Kids[/COLOR][/B]'
            ])
            if select == 0:
                from resources.lib.indexers import teniesonline
                url = Teniesonline + "?s={}".format(search)
                dbcur.execute("DELETE FROM Search WHERE url = ?", (url, ))
                dbcur.execute("INSERT INTO Search VALUES (?,?)", (url, term))
                dbcon.commit()
                dbcur.close()
                teniesonline.search(url)

            elif select == 1:
                url = GAMATO + "?s={}".format(search)
                dbcur.execute("DELETE FROM Search WHERE url = ?", (url, ))
                dbcur.execute("INSERT INTO Search VALUES (?,?)", (url, term))
                dbcon.commit()
                dbcur.close()
                Search_gamato(url)
            else:
                return
        else:
            return

    else:
        if 'gamato' in url:
            Search_gamato(url)
        else:
            from resources.lib.indexers import teniesonline
            teniesonline.search(url)
    views.selectView('movies', 'movie-view')
Ejemplo n.º 15
0
def decode_old_fullpath(fullpath):
    fp = fullpath.split('/')
    if len(fp) != 4:
        raise ValueError("old fullpath: Wrong format")
    decoded = list()
    for part in fp:
        decoded.append(unquote_plus(part))
    return tuple(decoded)
Ejemplo n.º 16
0
def search_subs(params):
    logger.info('Searching for subs...')
    languages = get_languages(
        urlparse.unquote_plus(params['languages']).split(',')
    )
    # Search subtitles in Addic7ed.com.
    if params['action'] == 'search':
        try:
            episode_data = extract_episode_data()
        except ParseError:
            return
        # Create a search query string
        query = '{0} {1}x{2}'.format(
            normalize_showname(episode_data.showname),
            episode_data.season,
            episode_data.episode
        )
        filename = episode_data.filename
    else:
        # Get the query string typed on the on-screen keyboard
        query = params['searchstring']
        filename = query
    if query:
        logger.debug('Search query: {0}'.format(query))
        try:
            results = parser.search_episode(query, languages)
        except Add7ConnectionError:
            logger.error('Unable to connect to addic7ed.com')
            dialog.notification(
                get_ui_string(32002), get_ui_string(32005), 'error'
            )
        except SubsSearchError:
            logger.info('No subs for "{}" found.'.format(query))
        else:
            if isinstance(results, list):
                logger.info('Multiple episodes found:\n{0}'.format(results))
                i = dialog.select(
                    get_ui_string(32008), [item.title for item in results]
                )
                if i >= 0:
                    try:
                        results = parser.get_episode(results[i].link, languages)
                    except Add7ConnectionError:
                        logger.error('Unable to connect to addic7ed.com')
                        dialog.notification(get_ui_string(32002),
                                            get_ui_string(32005), 'error')
                        return
                    except SubsSearchError:
                        logger.info('No subs found.')
                        return
                else:
                    logger.info('Episode selection cancelled.')
                    return
            logger.info('Found subs for "{0}"'.format(query))
            display_subs(results.subtitles, results.episode_url,
                         filename)
Ejemplo n.º 17
0
def resolve(regex):
    try:
        vanilla = re.compile('(<regex>.+)', re.MULTILINE | re.DOTALL).findall(regex)[0]
        cddata = re.compile('<\!\[CDATA\[(.+?)\]\]>', re.MULTILINE | re.DOTALL).findall(regex)
        for i in cddata:
            regex = regex.replace('<![CDATA['+i+']]>', urllib_parse.quote_plus(i))

        regexs = re.compile('(<regex>.+)', re.MULTILINE | re.DOTALL).findall(regex)[0]
        regexs = re.compile('<regex>(.+?)</regex>', re.MULTILINE | re.DOTALL).findall(regexs)
        regexs = [re.compile('<(.+?)>(.*?)</.+?>', re.MULTILINE | re.DOTALL).findall(i) for i in regexs]

        regexs = [dict([(client.replaceHTMLCodes(x[0]), client.replaceHTMLCodes(urllib_parse.unquote_plus(x[1]))) for x in i]) for i in regexs]
        regexs = [(i['name'], i) for i in regexs]
        regexs = dict(regexs)

        url = regex.split('<regex>', 1)[0].strip()
        url = client.replaceHTMLCodes(url)
        url = six.ensure_str(url)

        r = getRegexParsed(regexs, url)

        try:
            ln = ''
            ret = r[1]
            listrepeat = r[2]['listrepeat']
            regexname = r[2]['name']

            for obj in ret:
                try:
                    item = listrepeat
                    for i in list(range(len(obj)+1)):
                        item = item.replace('[%s.param%s]' % (regexname, str(i)), obj[i-1])

                    item2 = vanilla
                    for i in list(range(len(obj)+1)):
                        item2 = item2.replace('[%s.param%s]' % (regexname, str(i)), obj[i-1])

                    item2 = re.compile('(<regex>.+?</regex>)', re.MULTILINE | re.DOTALL).findall(item2)
                    item2 = [x for x in item2 if not '<name>%s</name>' % regexname in x]
                    item2 = ''.join(item2)

                    ln += '\n<item>%s\n%s</item>\n' % (item, item2)
                except Exception:
                    pass

            return ln
        except Exception:
            pass

        if r[1] is True:
            return r[0]
    except Exception:
        return
Ejemplo n.º 18
0
def search_menu():
    addon.add_directory({'mode': 'search_bb', 'url': 'new'},
                        {'title': control.lang(32014).encode('utf-8')}, img=IconPath + 'search.png', fanart=FANART)
    try:
        from sqlite3 import dbapi2 as database
    except ImportError:
        from pysqlite2 import dbapi2 as database

    dbcon = database.connect(control.searchFile)
    dbcur = dbcon.cursor()

    try:
        dbcur.execute("""CREATE TABLE IF NOT EXISTS Search (url text, search text)""")
    except BaseException:
        pass

    dbcur.execute("SELECT * FROM Search ORDER BY search")

    lst = []

    delete_option = False
    for (url, search) in dbcur.fetchall():
        # if six.PY2:
        #     title = unquote_plus(search).encode('utf-8')
        # else:
        title = six.ensure_text(unquote_plus(search), 'utf-8')
        title = '[B]{}[/B]'.format(title)
        delete_option = True
        addon.add_directory({'mode': 'search_bb', 'url': search},
                            {'title': title},
                            [(control.lang(32007).encode('utf-8'),
                              'RunPlugin(plugin://plugin.video.releaseBB/?mode=settings)',),
                             (control.lang(32015).encode('utf-8'),
                              'RunPlugin(plugin://plugin.video.releaseBB/?mode=del_search_item&query=%s)' % search,),
                             (control.lang(32008).encode('utf-8'),
                              'RunPlugin(plugin://plugin.video.releaseBB/?mode=ClearCache)',),
                             (control.lang(32009).encode('utf-8'),
                              'RunPlugin(plugin://plugin.video.releaseBB/?mode=setviews)',)],
                            img=IconPath + 'search.png', fanart=FANART)
        lst += [(search)]
    dbcur.close()

    if delete_option:
        addon.add_directory({'mode': 'del_search_items'},
                            {'title': control.lang(32016).encode('utf-8')},
                            img=IconPath + 'search.png', fanart=FANART, is_folder=False)

    control.content(int(sys.argv[1]), 'videos')
    control.directory(int(sys.argv[1]))
    view.setView('videos', {'skin.estuary': 55, 'skin.confluence': 500})
Ejemplo n.º 19
0
 def _parse_gdocs(self, html):
     urls = []
     if 'error' in html:
         reason = urllib_parse.unquote_plus(re.findall('reason=([^&]+)', html)[0])
         raise ResolverError(reason)
     value = urllib_parse.unquote(re.findall('fmt_stream_map=([^&]+)', html)[0])
     items = value.split(',')
     for item in items:
         _source_itag, source_url = item.split('|')
         if isinstance(source_url, six.text_type) and six.PY2:  # @big change
             source_url = source_url.decode('unicode_escape').encode('utf-8')
         quality = self.itag_map.get(_source_itag, 'Unknown Quality [%s]' % _source_itag)
         source_url = urllib_parse.unquote(source_url)
         urls.append((quality, source_url))
     return urls
def run(context, url, name):
    _dialog = xbmcgui.Dialog()
    context.plex_network = plex.Plex(context.settings, load=True)

    tree = get_xml(context, url)
    if tree is None:
        return

    try:
        name = unquote_plus(name)
    except:  # pylint: disable=bare-except
        pass

    operations = {}
    plugins = tree.findall('Directory')
    for idx, plugin in enumerate(plugins):
        operations[idx] = plugin.get('title')

        # If we find an install option, switch to a yes/no dialog box
        if operations[idx].lower() == 'install':
            LOG.debug('Not installed.  Print dialog')
            result = \
                _dialog.yesno(i18n('Plex Online'), i18n('About to install') + ' ' + name)

            if result:
                LOG.debug('Installing....')
                _ = get_xml(context, url + '/install')

            return

    # Else continue to a selection dialog box
    result = _dialog.select(i18n('This plugin is already installed'),
                            list(operations.values()))

    if result == -1:
        LOG.debug('No option selected, cancelling')
        return

    LOG.debug('Option %s selected.  Operation is %s' %
              (result, operations[result]))

    item_url = url + '/' + operations[result].lower()
    _ = get_xml(context, item_url)

    xbmc.executebuiltin('Container.Refresh')
Ejemplo n.º 21
0
    def get_media_url(self, host, media_id):
        web_url = self.get_url(host, media_id)
        headers = {'User-Agent': common.IOS_USER_AGENT, 'Referer': web_url}
        stream_url = ''
        new_host = urllib_parse.urlparse(web_url).netloc
        html = self.net.http_GET(web_url, headers=headers).content
        if 'videozoo' not in new_host:
            r = re.search(
                r'(?:playlist:|timer\s*=\s*null;).+?url\s*[:=]+\s*[\'"]+(.+?)[\'"]+',
                html, re.DOTALL)
        else:
            r = re.search(r'\*/\s+?(eval\(function\(p,a,c,k,e,d\).+)\s+?/\*',
                          html)
            if r:
                try:
                    r = jsunpack.unpack(r.group(1))
                    if r:
                        r = re.search(r'\[{"url":"(.+?)"', r.replace('\\', ''))
                except:
                    if r:
                        re_src = re.search(r'urlResolvers\|2F(.+?)\|',
                                           r.group(1))
                        re_url = re.search(r'php\|3D(.+?)\|', r.group(1))
                        if re_src and re_url:
                            stream_url = 'http://%s/%s.php?url=%s' % (
                                new_host, re_src.group(1), re_url.group(1))
                            stream_url = self._redirect_test(stream_url)
                        else:
                            raise ResolverError('File not found')
        if r:
            stream_url = urllib_parse.unquote_plus(r.group(1))
            if 'http' not in stream_url:
                stream_url = 'http://' + host + '/' + stream_url.replace(
                    '/gplus.php', 'gplus.php').replace('/picasa.php',
                                                       'picasa.php')
            stream_url = self._redirect_test(stream_url)

        if stream_url:
            if 'google' in stream_url:
                return HostedMediaFile(url=stream_url).resolve()
            else:
                return stream_url
        else:
            raise ResolverError('File not found')
Ejemplo n.º 22
0
    def run(self, title, year, season, episode, imdb, tvdb, url, meta):
        try:
            control.sleep(200)

            self.totalTime = 0
            self.currentTime = 0

            self.content = 'movie' if season == None or episode == None else 'episode'

            self.title = title
            self.year = year
            self.name = urllib_parse.quote_plus(title) + urllib_parse.quote_plus(' (%s)' % year) if self.content == 'movie' else urllib_parse.quote_plus(
                title) + urllib_parse.quote_plus(' S%02dE%02d' % (int(season), int(episode)))
            self.name = urllib_parse.unquote_plus(self.name)
            self.season = '%01d' % int(season) if self.content == 'episode' else None
            self.episode = '%01d' % int(episode) if self.content == 'episode' else None

            self.DBID = None
            self.imdb = imdb if not imdb == None else '0'
            self.tvdb = tvdb if not tvdb == None else '0'
            self.ids = {'imdb': self.imdb, 'tvdb': self.tvdb}
            self.ids = dict((k,v) for k, v in six.iteritems(self.ids) if not v == '0')

            self.offset = bookmarks().get(self.name, self.year)

            poster, thumb, meta = self.getMeta(meta)

            item = control.item(path=url)
            item.setArt({'icon': thumb, 'thumb': thumb, 'poster': poster,
                         'tvshow.poster': poster, 'season.poster': poster})
            item.setInfo(type='video', infoLabels = control.metadataClean(meta))

            if 'plugin' in control.infoLabel('Container.PluginName'):
                control.player.play(url, item)

            control.resolve(int(sys.argv[1]), True, item)

            control.window.setProperty('script.trakt.ids', json.dumps(self.ids))

            self.keepPlaybackAlive()

            control.window.clearProperty('script.trakt.ids')
        except Exception:
            return
Ejemplo n.º 23
0
def clean_filename(s, minimal_change=False):
    """
    Sanitize a string to be used as a filename.

    If minimal_change is set to true, then we only strip the bare minimum of
    characters that are problematic for filesystems (namely, ':', '/' and
    '\x00', '\n').
    """

    # First, deal with URL encoded strings
    h = html_parser.HTMLParser()
    s = h.unescape(s)
    s = unquote_plus(s)

    # Strip forbidden characters
    # https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx
    s = (
        s.replace(':', '-')
        .replace('/', '-')
        .replace('<', '-')
        .replace('>', '-')
        .replace('"', '-')
        .replace('\\', '-')
        .replace('|', '-')
        .replace('?', '-')
        .replace('*', '-')
        .replace('\x00', '-')
        .replace('\n', ' ')
    )

    # Remove trailing dots and spaces; forbidden on Windows
    s = s.rstrip(' .')

    if minimal_change:
        return s

    s = s.replace('(', '').replace(')', '')
    s = s.rstrip('.')  # Remove excess of trailing dots

    s = s.strip().replace(' ', '_')
    valid_chars = '-_.()%s%s' % (string.ascii_letters, string.digits)
    return ''.join(c for c in s if c in valid_chars)
 def unquote_plus(self):
     """Return the object's URL unquote_plus representation."""
     return _safe_as_text(urllib.unquote_plus(self.context))
Ejemplo n.º 25
0
                'icon': 'DefaultVideo.png',
                'poster': iconimage})
    liz.setInfo(type='Video', infoLabels={'Title': name})
    ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u,
                                     listitem=liz, isFolder=True)
    return ok


topparams = get_params()
topurl = None
topname = None
topmode = None
topthumbnail = None

try:
    topurl = urllib_parse.unquote_plus(topparams['url'])
except:
    pass
try:
    topname = urllib_parse.unquote_plus(topparams['name'])
except:
    pass
try:
    topmode = int(topparams['mode'])
except:
    pass

try:
    topthumbnail = urllib_parse.unquote_plus(topparams['iconimage'])
except:
    pass
Ejemplo n.º 26
0
def Search_bb(url):
    if 'new' == url:
        keyboard = xbmc.Keyboard()
        keyboard.setHeading(control.lang(32002).encode('utf-8'))
        keyboard.doModal()
        if keyboard.isConfirmed():
            _query = keyboard.getText()
            query = _query.encode('utf-8')
            try:
                query = quote_plus(query)
                referer_link = 'http://search.proxybb.com?s={0}'.format(query)

                url = 'http://search.proxybb.com/Home/GetPost?phrase={0}&pindex=1&content=true&type=Simple&rad=0.{1}'
                url = url.format(query, random.randint(33333333333333333, 99999999999999999))
                #########save in Database#########
                if six.PY2:
                    term = unquote_plus(query).decode('utf-8')
                else:
                    term = unquote_plus(query)

                dbcon = database.connect(control.searchFile)
                dbcur = dbcon.cursor()
                dbcur.execute("DELETE FROM Search WHERE search = ?", (term,))
                dbcur.execute("INSERT INTO Search VALUES (?,?)", (url, term))
                dbcon.commit()
                dbcur.close()

                #########search in website#########
                headers = {'Referer': referer_link,
                           'X-Requested-With': 'XMLHttpRequest'}
                first = client.request(referer_link, headers=headers)
                xbmc.sleep(10)
                html = client.request(url, headers=headers)
                posts = json.loads(html)['results']
                posts = [(i['post_name'], i['post_title'], i['post_content'], i['domain']) for i in posts if i]
                for movieUrl, title, infos, domain in posts:
                    if not 'imdb.com/title' in infos:
                        continue
                    base = BASE_URL if 'old' not in domain else OLD_URL
                    movieUrl = urljoin(base, movieUrl) if not movieUrl.startswith('http') else movieUrl
                    title = title.encode('utf-8')
                    infos = infos.replace('\\', '')
                    try:
                        img = client.parseDOM(infos, 'img', ret='src')[0]
                        img = img.replace('.ru', '.to')
                    except:
                        img = ICON

                    try:
                        fan = client.parseDOM(infos, 'img', ret='src')[1]
                    except:
                        fan = FANART

                    try:
                        # desc = client.parseDOM(infos, 'div', attrs={'class': 'entry-summary'})[0]
                        desc = re.findall(r'>(Plot:.+?)</p>', infos, re.DOTALL)[0]
                    except:
                        desc = 'N/A'

                    desc = Sinopsis(desc)
                    # title = six.python_2_unicode_compatible(six.ensure_str(title))
                    title = six.ensure_str(title, 'utf-8')
                    name = '[B][COLORgold]{0}[/COLOR][/B]'.format(title)

                    mode = 'GetPack' if re.search(r'\s+S\d+\s+', name) else 'GetLinks'
                    addon.add_directory(
                        {'mode': mode, 'url': movieUrl, 'img': img, 'plot': desc},
                        {'title': name, 'plot': desc},
                        [(control.lang(32007).encode('utf-8'),
                          'RunPlugin(plugin://plugin.video.releaseBB/?mode=settings)',),
                         (control.lang(32008).encode('utf-8'),
                          'RunPlugin(plugin://plugin.video.releaseBB/?mode=ClearCache)',),
                         (control.lang(32009).encode('utf-8'),
                          'RunPlugin(plugin://plugin.video.releaseBB/?mode=setviews)',)],
                        img=img, fanart=fan)

                # if 'olderEntries' in ref_html:
                pindex = int(re.search('pindex=(\d+)&', url).group(1)) + 1
                np_url = re.sub(r'&pindex=\d+&', '&pindex={0}&'.format(pindex), url)
                rand = random.randint(33333333333333333, 99999999999999999)
                np_url = re.sub(r'&rand=0\.\d+$', '&rand={}'.format(rand), np_url)
                addon.add_directory(
                    {'mode': 'search_bb', 'url': np_url + '|Referer={0}|nextpage'.format(referer_link)},
                    {'title': control.lang(32010).encode('utf-8')},
                    img=IconPath + 'next_page.png', fanart=FANART)

            except BaseException:
                control.infoDialog(control.lang(32022).encode('utf-8'), NAME, ICON, 5000)

    elif '|nextpage' in url:
        url, referer_link, np = url.split('|')
        referer_link = referer_link.split('=', 1)[1]
        headers = {'Referer': referer_link,
                   'X-Requested-With': 'XMLHttpRequest'}
        first = client.request(referer_link, headers=headers)
        xbmc.sleep(10)
        html = client.request(url, headers=headers)
        # xbmc.log('NEXT HTMLLLLL: {}'.format(html))
        posts = json.loads(html)['results']
        posts = [(i['post_name'], i['post_title'], i['post_content'], i['domain']) for i in posts if i]
        for movieUrl, title, infos, domain in posts:
            base = BASE_URL if 'old' not in domain else OLD_URL
            movieUrl = urljoin(base, movieUrl) if not movieUrl.startswith('http') else movieUrl
            title = six.ensure_str(title, 'utf-8')
            infos = infos.replace('\\', '')
            try:
                img = client.parseDOM(infos, 'img', ret='src')[0]
                img = img.replace('.ru', '.to')
            except:
                img = ICON

            try:
                fan = client.parseDOM(infos, 'img', ret='src')[1]
            except:
                fan = FANART

            try:
                desc = re.search(r'>(Plot:.+?)</p>', infos, re.DOTALL).group(0)
            except:
                desc = 'N/A'

            desc = Sinopsis(desc)
            name = '[B][COLORgold]{0}[/COLOR][/B]'.format(title)
            mode = 'GetPack' if re.search(r'\s+S\d+\s+', name) else 'GetLinks'
            addon.add_directory(
                {'mode': mode, 'url': movieUrl, 'img': img, 'plot': desc},
                {'title': name, 'plot': desc},
                [(control.lang(32007).encode('utf-8'),
                  'RunPlugin(plugin://plugin.video.releaseBB/?mode=settings)',),
                 (control.lang(32008).encode('utf-8'),
                  'RunPlugin(plugin://plugin.video.releaseBB/?mode=ClearCache)',),
                 (control.lang(32009).encode('utf-8'),
                  'RunPlugin(plugin://plugin.video.releaseBB/?mode=setviews)',)],
                img=img, fanart=fan)

        # if 'olderEntries' in ref_html:
        pindex = int(re.search('pindex=(\d+)&', url).groups()[0]) + 1
        np_url = re.sub('&pindex=\d+&', '&pindex={0}&'.format(pindex), url)
        rand = random.randint(33333333333333333, 99999999999999999)
        np_url = re.sub(r'&rand=0\.\d+$', '&rand={}'.format(rand), np_url)
        addon.add_directory(
            {'mode': 'search_bb', 'url': np_url + '|Referer={0}|nextpage'.format(referer_link)},
            {'title': control.lang(32010).encode('utf-8')},
            img=IconPath + 'next_page.png', fanart=FANART)

    else:
        try:
            url = quote_plus(url)
            referer_link = 'http://search.proxybb.com?s={0}'.format(url)
            headers = {'Referer': referer_link,
                       'X-Requested-With': 'XMLHttpRequest'}
            # first = scraper.get('http://rlsbb.ru', headers=headers).text
            xbmc.sleep(10)
            s_url = 'http://search.proxybb.com/Home/GetPost?phrase={0}&pindex=1&content=true&type=Simple&rad=0.{1}'
            s_url = s_url.format(url, random.randint(33333333333333333, 99999999999999999))
            html = client.request(s_url, headers=headers)
            posts = json.loads(html)['results']
            posts = [(i['post_name'], i['post_title'], i['post_content'], i['domain']) for i in posts if i]
            for movieUrl, title, infos, domain in posts:
                base = BASE_URL if 'old' not in domain else OLD_URL
                movieUrl = urljoin(base, movieUrl) if not movieUrl.startswith('http') else movieUrl
                title = six.ensure_str(title, 'utf-8')
                infos = infos.replace('\\', '')
                try:
                    img = client.parseDOM(infos, 'img', ret='src')[0]
                    img = img.replace('.ru', '.to')
                except:
                    img = ICON

                try:
                    fan = client.parseDOM(infos, 'img', ret='src')[1]
                except:
                    fan = FANART

                try:
                    desc = re.search(r'>(Plot:.+?)</p>', infos, re.DOTALL).group(0)
                except:
                    desc = 'N/A'

                desc = Sinopsis(desc)
                name = '[B][COLORgold]{0}[/COLOR][/B]'.format(title)

                mode = 'GetPack' if re.search(r'\s+S\d+\s+', name) else 'GetLinks'
                addon.add_directory(
                    {'mode': mode, 'url': movieUrl, 'img': img, 'plot': desc},
                    {'title': name, 'plot': desc},
                    [(control.lang(32007).encode('utf-8'),
                      'RunPlugin(plugin://plugin.video.releaseBB/?mode=settings)',),
                     (control.lang(32008).encode('utf-8'),
                      'RunPlugin(plugin://plugin.video.releaseBB/?mode=ClearCache)',),
                     (control.lang(32009).encode('utf-8'),
                      'RunPlugin(plugin://plugin.video.releaseBB/?mode=setviews)',)],
                    img=img, fanart=fan)

            pindex = int(re.search('pindex=(\d+)&', s_url).groups()[0]) + 1
            np_url = re.sub('&pindex=\d+&', '&pindex={0}&'.format(pindex), s_url)
            rand = random.randint(33333333333333333, 99999999999999999)
            np_url = re.sub(r'&rand=0\.\d+$', '&rand={}'.format(rand), np_url)
            addon.add_directory(
                {'mode': 'search_bb', 'url': np_url + '|Referer={0}|nextpage'.format(referer_link)},
                {'title': control.lang(32010).encode('utf-8')},
                img=IconPath + 'next_page.png', fanart=FANART)

        except BaseException:
            control.infoDialog(control.lang(32022).encode('utf-8'), NAME, ICON, 5000)

    control.content(int(sys.argv[1]), 'videos')
    control.directory(int(sys.argv[1]))
    view.setView('videos', {'skin.estuary': 55, 'skin.confluence': 500})
Ejemplo n.º 27
0
def parse_uri(uri, default_port=27017, warn=False):
    """A simplified version of pymongo.uri_parser.parse_uri.

    Returns a dict with:
     - nodelist, a tuple of (host, port)
     - database the name of the database or None if no database is provided in the URI.

    An invalid MongoDB connection URI may raise an InvalidURI exception,
    however, the URI is not fully parsed and some invalid URIs may not result
    in an exception.

    'mongodb://host1/database' becomes 'host1', 27017, 'database'

    and

    'mongodb://host1' becomes 'host1', 27017, None
    """
    SCHEME = 'mongodb://'

    if not uri.startswith(SCHEME):
        raise InvalidURI('Invalid URI scheme: URI '
                         "must begin with '%s'" % (SCHEME, ))

    scheme_free = uri[len(SCHEME):]

    if not scheme_free:
        raise InvalidURI('Must provide at least one hostname or IP.')

    dbase = None

    # Check for unix domain sockets in the uri
    if '.sock' in scheme_free:
        host_part, _, path_part = scheme_free.rpartition('/')
        if not host_part:
            host_part = path_part
            path_part = ''
        if '/' in host_part:
            raise InvalidURI("Any '/' in a unix domain socket must be"
                             ' URL encoded: %s' % host_part)
        path_part = unquote_plus(path_part)
    else:
        host_part, _, path_part = scheme_free.partition('/')

    if not path_part and '?' in host_part:
        raise InvalidURI("A '/' is required between "
                         'the host list and any options.')

    nodelist = []
    if ',' in host_part:
        hosts = host_part.split(',')
    else:
        hosts = [host_part]
    for host in hosts:
        match = _HOST_MATCH.match(host)
        if not match:
            raise ValueError(
                "Reserved characters such as ':' must be escaped according RFC "
                "2396. An IPv6 address literal must be enclosed in '[' and ']' "
                'according to RFC 2732.')
        host = match.group(2)
        if host.startswith('[') and host.endswith(']'):
            host = host[1:-1]

        port = match.group(4)
        if port:
            try:
                port = int(port)
                if port < 0 or port > 65535:
                    raise ValueError()
            except ValueError:
                raise ValueError(
                    'Port must be an integer between 0 and 65535:', port)
        else:
            port = default_port

        nodelist.append((host, port))

    if path_part and path_part[0] != '?':
        dbase, _, _ = path_part.partition('?')
        if '.' in dbase:
            dbase, _ = dbase.split('.', 1)

    if dbase is not None:
        dbase = unquote_plus(dbase)

    return {'nodelist': tuple(nodelist), 'database': dbase}
Ejemplo n.º 28
0
        rlist = tvshows.tvshows().get(url, create_directory=False)
        r = sys.argv[0]+"?action=random&rtype=season"
    from random import randint
    import simplejson as json
    try:
        from resources.lib.modules import control
        rand = randint(1,len(rlist))-1
        for p in ['title','year','imdb','tmdb','season','episode','tvshowtitle','premiered','select']:
            if rtype == "show" and p == "tvshowtitle":
                try: r += '&'+p+'='+urllib_parse.quote_plus(rlist[rand]['originaltitle'])
                except: pass
            else:
                if rtype == "movie":
                    rlist[rand]['title'] = rlist[rand]['originaltitle']
                elif rtype == "episode":
                    rlist[rand]['tvshowtitle'] = urllib_parse.unquote_plus(rlist[rand]['tvshowtitle'])
                try: r += '&'+p+'='+urllib_parse.quote_plus(rlist[rand][p])
                except: pass
        try: r += '&meta='+urllib_parse.quote_plus(json.dumps(rlist[rand]))
        except: r += '&meta={}'
        if rtype == "movie":
            try: control.infoDialog('%s (%s)' % (rlist[rand]['title'], rlist[rand]['year']), control.lang(32536), time=20000)
            except: pass
        elif rtype == "episode":
            try: control.infoDialog('%s - %01dx%02d . %s' % (urllib_parse.unquote_plus(rlist[rand]['tvshowtitle']), int(rlist[rand]['season']), int(rlist[rand]['episode']), rlist[rand]['title']), control.lang(32536), time=20000)
            except: pass
        control.execute('RunPlugin(%s)' % r)
    except:
        from resources.lib.modules import control
        control.infoDialog(control.lang(32537), time=8000)
Ejemplo n.º 29
0
def unescape_html(s):
    h = html
    s = h.unescape(s)
    s = unquote_plus(s)
    return unescape(s, HTML_UNESCAPE_TABLE)
Ejemplo n.º 30
0
        return host
    except BaseException:
        pass


params = init.params
mode = params.get('mode')
name = params.get('name')
iconimage = params.get('iconimage')
fanart = params.get('fanart')
description = params.get('description')
url = params.get('url')

try:
    url = unquote_plus(params["url"])
except BaseException:
    pass
try:
    name = unquote_plus(params["name"])
except BaseException:
    pass
try:
    iconimage = unquote_plus(params["iconimage"])
except BaseException:
    pass
try:
    mode = int(params["mode"])
except BaseException:
    pass
try:
Ejemplo n.º 31
0
def routing(_argv):

    params = dict(urllib_parse.parse_qsl(_argv.replace('?', '')))

    action = params.get('action')

    name = params.get('name')

    title = params.get('title')

    year = params.get('year')

    imdb = params.get('imdb')

    tvdb = params.get('tvdb')

    tmdb = params.get('tmdb')

    season = params.get('season')

    episode = params.get('episode')

    tvshowtitle = params.get('tvshowtitle')

    premiered = params.get('premiered')

    url = params.get('url')

    image = params.get('image')

    meta = params.get('meta')

    select = params.get('select')

    query = params.get('query')

    source = params.get('source')

    content = params.get('content')

    status = params.get('status')

    rtype = params.get('rtype')

    windowedtrailer = params.get('windowedtrailer')
    windowedtrailer = int(windowedtrailer) if windowedtrailer in ('0',
                                                                  '1') else 0

    if action == None:
        from resources.lib.indexers import navigator
        from resources.lib.modules import cache
        cache.cache_version_check()
        navigator.navigator().root()

    elif action == 'furkNavigator':
        from resources.lib.indexers import navigator
        navigator.navigator().furk()

    elif action == 'furkMetaSearch':
        from resources.lib.indexers import furk
        furk.furk().furk_meta_search(url)

    elif action == 'furkSearch':
        from resources.lib.indexers import furk
        furk.furk().search()

    elif action == 'furkUserFiles':
        from resources.lib.indexers import furk
        furk.furk().user_files()

    elif action == 'furkSearchNew':
        from resources.lib.indexers import furk
        furk.furk().search_new()

    elif action == 'movieNavigator':
        from resources.lib.indexers import navigator
        navigator.navigator().movies()

    elif action == 'movieliteNavigator':
        from resources.lib.indexers import navigator
        navigator.navigator().movies(lite=True)

    elif action == 'mymovieNavigator':
        from resources.lib.indexers import navigator
        navigator.navigator().mymovies()

    elif action == 'mymovieliteNavigator':
        from resources.lib.indexers import navigator
        navigator.navigator().mymovies(lite=True)

    elif action == 'tvNavigator':
        from resources.lib.indexers import navigator
        navigator.navigator().tvshows()

    elif action == 'tvliteNavigator':
        from resources.lib.indexers import navigator
        navigator.navigator().tvshows(lite=True)

    elif action == 'mytvNavigator':
        from resources.lib.indexers import navigator
        navigator.navigator().mytvshows()

    elif action == 'mytvliteNavigator':
        from resources.lib.indexers import navigator
        navigator.navigator().mytvshows(lite=True)

    elif action == 'downloadNavigator':
        from resources.lib.indexers import navigator
        navigator.navigator().downloads()

    elif action == 'libraryNavigator':
        from resources.lib.indexers import navigator
        navigator.navigator().library()

    elif action == 'toolNavigator':
        from resources.lib.indexers import navigator
        navigator.navigator().tools()

    elif action == 'searchNavigator':
        from resources.lib.indexers import navigator
        navigator.navigator().search()

    elif action == 'viewsNavigator':
        from resources.lib.indexers import navigator
        navigator.navigator().views()

    elif action == 'cacheNavigator':
        from resources.lib.indexers import navigator
        navigator.navigator().cache_functions()

    elif action == 'logNavigator':
        from resources.lib.indexers import navigator
        navigator.navigator().log_functions()

    elif action == 'clearCache':
        from resources.lib.indexers import navigator
        navigator.navigator().clearCache()

    elif action == 'clearCacheProviders':
        from resources.lib.indexers import navigator
        navigator.navigator().clearCacheProviders()

    elif action == 'clearDebridCheck':
        from resources.lib.indexers import navigator
        navigator.navigator().clearDebridCheck()

    elif action == 'clearCacheSearch':
        from resources.lib.indexers import navigator
        navigator.navigator().clearCacheSearch(select)

    elif action == 'clearAllCache':
        from resources.lib.indexers import navigator
        navigator.navigator().clearCacheAll()

    elif action == 'infoCheck':
        from resources.lib.indexers import navigator
        navigator.navigator().infoCheck('')

    elif action == 'uploadLog':
        from resources.lib.indexers import navigator
        navigator.navigator().uploadLog()

    elif action == 'emptyLog':
        from resources.lib.indexers import navigator
        navigator.navigator().emptyLog()

    elif action == 'viewLog':
        from resources.lib.modules import log_utils
        log_utils.view_log()

    elif action == 'movies':
        from resources.lib.indexers import movies
        movies.movies().get(url)

    elif action == 'moviePage':
        from resources.lib.indexers import movies
        movies.movies().get(url)

    elif action == 'movieWidget':
        from resources.lib.indexers import movies
        movies.movies().widget()

    elif action == 'movieSearch':
        from resources.lib.indexers import movies
        movies.movies().search()

    elif action == 'movieSearchnew':
        from resources.lib.indexers import movies
        movies.movies().search_new()

    elif action == 'movieSearchterm':
        from resources.lib.indexers import movies
        movies.movies().search_term(name)

    elif action == 'movieMosts':
        from resources.lib.indexers import movies
        movies.movies().mosts()

    elif action == 'movieGenres':
        from resources.lib.indexers import movies
        movies.movies().genres()

    elif action == 'movieLanguages':
        from resources.lib.indexers import movies
        movies.movies().languages()

    elif action == 'movieCertificates':
        from resources.lib.indexers import movies
        movies.movies().certifications()

    elif action == 'movieYears':
        from resources.lib.indexers import movies
        movies.movies().years()

    elif action == 'movieDecades':
        from resources.lib.indexers import movies
        movies.movies().decades()

    elif action == 'movieKeywords':
        from resources.lib.indexers import movies
        movies.movies().keywords()

    elif action == 'movieKeywords2':
        from resources.lib.indexers import movies
        movies.movies().keywords2()

    elif action == 'movieCustomLists':
        from resources.lib.indexers import movies
        movies.movies().custom_lists()

    elif action == 'movieUserlists':
        from resources.lib.indexers import movies
        movies.movies().userlists()

    elif action == 'channels':
        from resources.lib.indexers import channels
        channels.channels().get()

    elif action == 'tvshows':
        from resources.lib.indexers import tvshows
        tvshows.tvshows().get(url)

    elif action == 'tvshowPage':
        from resources.lib.indexers import tvshows
        tvshows.tvshows().get(url)

    elif action == 'tvSearch':
        from resources.lib.indexers import tvshows
        tvshows.tvshows().search()

    elif action == 'tvSearchnew':
        from resources.lib.indexers import tvshows
        tvshows.tvshows().search_new()

    elif action == 'tvSearchterm':
        from resources.lib.indexers import tvshows
        tvshows.tvshows().search_term(name)

    elif action == 'tvMosts':
        from resources.lib.indexers import tvshows
        tvshows.tvshows().mosts()

    elif action == 'tvGenres':
        from resources.lib.indexers import tvshows
        tvshows.tvshows().genres()

    elif action == 'tvNetworks':
        from resources.lib.indexers import tvshows
        tvshows.tvshows().networks()

    elif action == 'tvLanguages':
        from resources.lib.indexers import tvshows
        tvshows.tvshows().languages()

    elif action == 'tvCertificates':
        from resources.lib.indexers import tvshows
        tvshows.tvshows().certifications()

    elif action == 'tvUserlists':
        from resources.lib.indexers import tvshows
        tvshows.tvshows().userlists()

    elif action == 'peopleSearch':
        from resources.lib.indexers import people
        people.People().search(content)

    elif action == 'peopleSearchnew':
        from resources.lib.indexers import people
        people.People().search_new(content)

    elif action == 'peopleSearchterm':
        from resources.lib.indexers import people
        people.People().search_term(name, content)

    elif action == 'persons':
        from resources.lib.indexers import people
        people.People().persons(url, content)

    elif action == 'moviePerson':
        from resources.lib.indexers import people
        people.People().persons(url, content='movies')

    elif action == 'tvPerson':
        from resources.lib.indexers import people
        people.People().persons(url, content='tvshows')

    elif action == 'personsSelect':
        from resources.lib.indexers import people
        people.People().getPeople(name, url)

    elif action == 'seasons':
        from resources.lib.indexers import episodes
        episodes.seasons().get(tvshowtitle, year, imdb, tmdb, meta)

    elif action == 'episodes':
        from resources.lib.indexers import episodes
        episodes.episodes().get(tvshowtitle, year, imdb, tmdb, meta, season,
                                episode)

    elif action == 'calendar':
        from resources.lib.indexers import episodes
        episodes.episodes().calendar(url)

    elif action == 'tvWidget':
        from resources.lib.indexers import episodes
        episodes.episodes().widget()

    elif action == 'calendars':
        from resources.lib.indexers import episodes
        episodes.episodes().calendars()

    elif action == 'episodeUserlists':
        from resources.lib.indexers import episodes
        episodes.episodes().userlists()

    elif action == 'refresh':
        from resources.lib.modules import control
        control.refresh()

    elif action == 'queueItem':
        from resources.lib.modules import control
        control.queueItem()

    elif action == 'openSettings':
        from resources.lib.modules import control
        control.openSettings(query)

    elif action == 'artwork':
        from resources.lib.modules import control
        control.artwork()

    elif action == 'addView':
        from resources.lib.modules import views
        views.addView(content)

    elif action == 'moviePlaycount':
        from resources.lib.modules import playcount
        playcount.movies(imdb, query)

    elif action == 'episodePlaycount':
        from resources.lib.modules import playcount
        playcount.episodes(imdb, tmdb, season, episode, query)

    elif action == 'tvPlaycount':
        from resources.lib.modules import playcount
        playcount.tvshows(name, imdb, tmdb, season, query)

    elif action == 'yt_trailer':
        from resources.lib.modules import control, trailer
        if not control.condVisibility('System.HasAddon(plugin.video.youtube)'):
            control.installAddon('plugin.video.youtube')
        trailer.YT_trailer().play(name, url, tmdb, imdb, season, episode,
                                  windowedtrailer)

    elif action == 'tmdb_trailer':
        from resources.lib.modules import control, trailer
        if not control.condVisibility('System.HasAddon(plugin.video.youtube)'):
            control.installAddon('plugin.video.youtube')
        trailer.TMDb_trailer().play(tmdb, imdb, season, episode,
                                    windowedtrailer)

    elif action == 'imdb_trailer':
        from resources.lib.modules import trailer
        trailer.IMDb_trailer().play(imdb, name, tmdb, season, episode,
                                    windowedtrailer)

    elif action == 'traktManager':
        from resources.lib.modules import trakt
        trakt.manager(name, imdb, tmdb, content)

    elif action == 'authTrakt':
        from resources.lib.modules import trakt
        trakt.authTrakt()

    elif action == 'smuSettings':
        try:
            import resolveurl
            resolveurl.display_settings()
        except:
            pass

    elif action == 'oathscrapersettings':
        from resources.lib.modules import control
        control.openSettings('0.0', 'script.module.oathscrapers')

    elif action == 'installOrion':
        from resources.lib.modules import control
        control.installAddon('script.module.orion')
        control.sleep(200)
        control.refresh()

    elif action == 'orionsettings':
        from resources.lib.modules import control
        control.openSettings('0.0', 'script.module.orion')

    elif action == 'download':
        import simplejson as json
        from resources.lib.modules import sources
        from resources.lib.modules import downloader
        try:
            downloader.download(
                name, image,
                sources.sources().sourcesResolve(json.loads(source)[0], True))
        except:
            pass

    elif action == 'play':
        from resources.lib.modules import control
        control.busy()
        from resources.lib.modules import sources
        sources.sources().play(title,
                               year,
                               imdb,
                               tmdb,
                               season,
                               episode,
                               tvshowtitle,
                               premiered,
                               meta,
                               select,
                               unfiltered=False)

    elif action == 'playUnfiltered':
        from resources.lib.modules import control
        control.busy()
        from resources.lib.modules import sources
        sources.sources().play(title,
                               year,
                               imdb,
                               tmdb,
                               season,
                               episode,
                               tvshowtitle,
                               premiered,
                               meta,
                               select,
                               unfiltered=True)

    elif action == 'addItem':
        from resources.lib.modules import sources
        sources.sources().addItem(title)

    elif action == 'playItem':
        from resources.lib.modules import sources
        sources.sources().playItem(title, source)

    elif action == 'alterSources':
        from resources.lib.modules import sources
        sources.sources().alterSources(url, meta)

    elif action == 'clearSources':
        from resources.lib.modules import sources
        sources.sources().clearSources()

    elif action == 'random':
        from sys import argv
        if rtype == 'movie':
            from resources.lib.indexers import movies
            rlist = movies.movies().get(url, create_directory=False)
            r = argv[0] + '?action=play'
        elif rtype == 'episode':
            from resources.lib.indexers import episodes
            rlist = episodes.episodes().get(tvshowtitle,
                                            year,
                                            imdb,
                                            tmdb,
                                            meta,
                                            season,
                                            create_directory=False)
            r = argv[0] + '?action=play'
        elif rtype == 'season':
            from resources.lib.indexers import episodes
            rlist = episodes.seasons().get(tvshowtitle,
                                           year,
                                           imdb,
                                           tmdb,
                                           None,
                                           create_directory=False)
            r = argv[0] + '?action=random&rtype=episode'
        elif rtype == 'show':
            from resources.lib.indexers import tvshows
            rlist = tvshows.tvshows().get(url, create_directory=False)
            r = argv[0] + '?action=random&rtype=season'
        from random import randint
        import simplejson as json
        try:
            from resources.lib.modules import control
            rand = randint(1, len(rlist)) - 1
            for p in [
                    'title', 'year', 'imdb', 'tmdb', 'season', 'episode',
                    'tvshowtitle', 'premiered', 'select'
            ]:
                if rtype == 'show' and p == 'tvshowtitle':
                    try:
                        r += '&' + p + '=' + urllib_parse.quote_plus(
                            rlist[rand]['title'])
                    except:
                        pass
                else:
                    if rtype == 'movie':
                        rlist[rand]['title'] = rlist[rand]['originaltitle']
                    elif rtype == 'episode':
                        rlist[rand]['tvshowtitle'] = urllib_parse.unquote_plus(
                            rlist[rand]['tvshowtitle'])
                    try:
                        r += '&' + p + '=' + urllib_parse.quote_plus(
                            rlist[rand][p])
                    except:
                        pass
            try:
                r += '&meta=' + urllib_parse.quote_plus(json.dumps(
                    rlist[rand]))
            except:
                r += '&meta={}'
            if rtype == 'movie':
                try:
                    control.infoDialog(
                        '%s (%s)' %
                        (rlist[rand]['title'], rlist[rand]['year']),
                        control.lang(32536),
                        time=20000)
                except:
                    pass
            elif rtype == 'episode':
                try:
                    control.infoDialog(
                        '%s - %01dx%02d . %s' %
                        (urllib_parse.unquote_plus(rlist[rand]['tvshowtitle']),
                         int(rlist[rand]['season']), int(
                             rlist[rand]['episode']), rlist[rand]['title']),
                        control.lang(32536),
                        time=20000)
                except:
                    pass
            control.execute('RunPlugin(%s)' % r)
        except:
            from resources.lib.modules import control
            control.infoDialog(control.lang(32537), time=8000)

    elif action == 'movieToLibrary':
        from resources.lib.modules import libtools
        libtools.libmovies().add(name, title, year, imdb, tmdb)

    elif action == 'moviesToLibrary':
        from resources.lib.modules import libtools
        libtools.libmovies().range(url)

    elif action == 'moviesToLibrarySilent':
        from resources.lib.modules import libtools
        libtools.libmovies().silent(url)

    elif action == 'tvshowToLibrary':
        from resources.lib.modules import libtools
        libtools.libtvshows().add(tvshowtitle, year, imdb, tmdb)

    elif action == 'tvshowsToLibrary':
        from resources.lib.modules import libtools
        libtools.libtvshows().range(url)

    elif action == 'tvshowsToLibrarySilent':
        from resources.lib.modules import libtools
        libtools.libtvshows().silent(url)

    elif action == 'updateLibrary':
        from resources.lib.modules import libtools
        libtools.libepisodes().update(query)

    elif action == 'service':
        from resources.lib.modules import libtools
        libtools.libepisodes().service()

    elif action == 'syncTraktStatus':
        from resources.lib.modules import trakt
        trakt.syncTraktStatus()

    elif action == 'changelog':
        from resources.lib.modules import changelog
        changelog.get()

    elif action == 'cleanSettings':
        from resources.lib.modules import control
        control.clean_settings()

    elif action == 'tvcredits':
        from resources.lib.modules import credits
        credits.Credits().get_tv(tmdb, status)

    elif action == 'moviecredits':
        from resources.lib.modules import credits
        credits.Credits().get_movies(tmdb, status)
Ejemplo n.º 32
0
def unescape_html(s):
    h = html_parser.HTMLParser()
    s = h.unescape(s)
    s = unquote_plus(s)
    return unescape(s, HTML_UNESCAPE_TABLE)
Ejemplo n.º 33
0
    def __test_stream(self, stream_url):
        """
        Returns True if the stream_url gets a non-failure http status (i.e. <400) back from the server
        otherwise return False

        Intended to catch stream urls returned by resolvers that would fail to playback
        """
        # parse_qsl doesn't work because it splits elements by ';' which can be in a non-quoted UA
        try:
            headers = dict([
                item.split('=')
                for item in (stream_url.split('|')[1]).split('&')
            ])
        except:
            headers = {}
        for header in headers:
            headers[header] = urllib_parse.unquote_plus(headers[header])
        common.logger.log_debug('Setting Headers on UrlOpen: %s' % headers)

        try:
            import ssl
            ssl_context = ssl.create_default_context()
            ssl_context.check_hostname = False
            ssl_context.verify_mode = ssl.CERT_NONE
            opener = urllib_request.build_opener(
                urllib_request.HTTPSHandler(context=ssl_context))
            urllib_request.install_opener(opener)
        except:
            pass

        try:
            msg = ''
            request = urllib_request.Request(stream_url.split('|')[0],
                                             headers=headers)
            # only do a HEAD request. gujal
            request.get_method = lambda: 'HEAD'
            #  set urlopen timeout to 15 seconds
            http_code = urllib_request.urlopen(request, timeout=15).getcode()
        except urllib_error.HTTPError as e:
            if isinstance(e, urllib_error.HTTPError):
                http_code = e.code
                if http_code == 405:
                    http_code = 200
            else:
                http_code = 600
        except urllib_error.URLError as e:
            http_code = 500
            if hasattr(e, 'reason'):
                # treat an unhandled url type as success
                if 'unknown url type' in str(e.reason).lower():
                    return True
                else:
                    msg = e.reason
            if not msg:
                msg = str(e)

        except Exception as e:
            http_code = 601
            msg = str(e)
            if msg == "''":
                http_code = 504

        # added this log line for now so that we can catch any logs on streams that are rejected due to test_stream failures
        # we can remove it once we are sure this works reliably
        if int(http_code) >= 400 and int(http_code) != 504:
            common.logger.log_warning(
                'Stream UrlOpen Failed: Url: %s HTTP Code: %s Msg: %s' %
                (stream_url, http_code, msg))

        return int(http_code) < 400 or int(http_code) == 504
Ejemplo n.º 34
0
            "thumb": "{0}search.png".format(_ipath),
            "icon": "{0}search.png".format(_ipath)
        })
        list_item.setInfo(type="Video", infoLabels={"genre": "History"})
        url = "{0}?url={1}&mode={2}".format(sys.argv[0], item["url"],
                                            item["mode"])

        listoflists.append((url, list_item, True))

    ok = xbmcplugin.addDirectoryItems(pluginhandle, listoflists,
                                      len(listoflists))
    return ok


params = parameters_string_to_dict(sys.argv[2])
mode = urllib_parse.unquote_plus(params.get('mode', ''))
url = urllib_parse.unquote_plus(params.get('url', ''))
name = urllib_parse.unquote_plus(params.get('name', ''))

if mode == 'listVideos':
    listVideos(url)
elif mode == 'listLive':
    listLive(url)
elif mode == 'listUsers':
    listUsers(url)
elif mode == 'listChannels':
    listChannels()
elif mode == 'favourites':
    favourites(url)
elif mode == 'addFav':
    addFav()
Ejemplo n.º 35
0
def doDownload(url, dest, title, image, headers):

    headers = json.loads(urllib_parse.unquote_plus(headers))

    url = urllib_parse.unquote_plus(url)

    title = urllib_parse.unquote_plus(title)

    image = urllib_parse.unquote_plus(image)

    dest = urllib_parse.unquote_plus(dest)

    file = dest.rsplit(os.sep, 1)[-1]

    resp = getResponse(url, headers, 0)

    if not resp:
        xbmcgui.Dialog().ok(
            title, dest + '[CR]' + 'Download failed' + '[CR]' +
            'No response from server')
        return

    try:
        content = int(resp.headers['Content-Length'])
    except:
        content = 0

    try:
        resumable = 'bytes' in resp.headers['Accept-Ranges'].lower()
    except:
        resumable = False

    #print("Download Header")
    #print(resp.headers)
    if resumable:
        print("Download is resumable")

    if content < 1:
        xbmcgui.Dialog().ok(
            title,
            file + '[CR]' + 'Unknown filesize' + '[CR]' + 'Unable to download')
        return

    size = 1024 * 1024
    mb = content / (1024 * 1024)

    if content < size:
        size = content

    total = 0
    notify = 0
    errors = 0
    count = 0
    resume = 0
    sleep = 0

    if not xbmcgui.Dialog().yesno(
            title + ' - Confirm Download', file + '[CR]' +
            'Complete file is %dMB' % mb + '[CR]' + 'Continue with download?'):
        return

    print('Download File Size : %dMB %s ' % (mb, dest))

    #f = open(dest, mode='wb')
    f = xbmcvfs.File(dest, 'w')

    chunk = None
    chunks = []

    while True:
        downloaded = total
        for c in chunks:
            downloaded += len(c)
        percent = min(100 * downloaded / content, 100)
        if percent >= notify:
            xbmcgui.Dialog().notification(
                'Download Progress: ' + str(int(percent)) + '% - ' + title,
                dest, image, 5000, False)

            print(
                'Download percent : %s %s %dMB downloaded : %sMB File Size : %sMB'
                % (str(percent) + '%', dest, mb, downloaded / 1000000,
                   content / 1000000))

            notify += 10

        chunk = None
        error = False

        try:
            chunk = resp.read(size)
            if not chunk:
                if percent < 99:
                    error = True
                else:
                    while len(chunks) > 0:
                        c = chunks.pop(0)
                        f.write(c)
                        del c

                    f.close()
                    print('%s download complete' % (dest))
                    return done(title, dest, True)

        except Exception as e:
            print(str(e))
            error = True
            sleep = 10
            errno = 0

            if hasattr(e, 'errno'):
                errno = e.errno

            if errno == 10035:  # 'A non-blocking socket operation could not be completed immediately'
                pass

            if errno == 10054:  #'An existing connection was forcibly closed by the remote host'
                errors = 10  #force resume
                sleep = 30

            if errno == 11001:  # 'getaddrinfo failed'
                errors = 10  #force resume
                sleep = 30

        if chunk:
            errors = 0
            chunks.append(chunk)
            if len(chunks) > 5:
                c = chunks.pop(0)
                f.write(c)
                total += len(c)
                del c

        if error:
            errors += 1
            count += 1
            print('%d Error(s) whilst downloading %s' % (count, dest))
            xbmc.sleep(sleep * 1000)

        if (resumable and errors > 0) or errors >= 10:
            if (not resumable and resume >= 50) or resume >= 500:
                #Give up!
                print(
                    '%s download canceled - too many error whilst downloading'
                    % (dest))
                return done(title, dest, False)

            resume += 1
            errors = 0
            if resumable:
                chunks = []
                #create new response
                print('Download resumed (%d) %s' % (resume, dest))
                resp = getResponse(url, headers, total)
            else:
                #use existing response
                pass
Ejemplo n.º 36
0
    def get(self, query):
        try:
            query, imdb = query.split('/imdb=')
            match = re.findall(r'^(?P<title>.+)[\s+\(|\s+](?P<year>\d{4})',
                               query)
            # xbmc.log('MATCH: {}'.format(match))
            cookie = self.s.get(self.baseurl, headers=self.hdr)

            cj = requests.utils.dict_from_cookiejar(cookie.cookies)

            if len(match) > 0:

                title, year = match[0][0], match[0][1]

                if imdb.startswith('tt'):
                    frame = self.baseurl + 'view/{}'.format(imdb)
                    r = self.s.get(frame).text
                    if six.PY2:
                        r = re.sub(r'[^\x00-\x7F]+', ' ', r)

                    # try:
                    #     r = r.decode('utf-8', errors='replace')
                    # except AttributeError:
                    #     pass
                else:
                    url = self.baseurl + 'search/{}/movies'.format(
                        quote(title))

                    data = self.s.get(url).text
                    data = client.parseDOM(data, 'span', attrs={'class': 'h5'})
                    data = [(client.parseDOM(i, 'a')[0],
                             client.parseDOM(i, 'a', ret='href')[0])
                            for i in data if i]
                    frame = [
                        i[1] for i in data
                        if cleantitle.get(i[0]) == cleantitle.get(title)
                    ][0]

                    r = self.s.get(frame).text
                    if six.PY2:
                        r = re.sub(r'[^\x00-\x7F]+', ' ', r)
                    # try:
                    #     r = r.decode('utf-8', errors='replace')
                    # except AttributeError:
                    #     pass
                secCode = client.parseDOM(r,
                                          'input',
                                          ret='value',
                                          attrs={'id': 'secCode'})[0]
                items = client.parseDOM(r, 'tbody')[0]
                # xbmc.log('ITEMS: {}'.format(items))
                items = client.parseDOM(items, 'tr')

            else:
                title, season, episode = re.findall(
                    r'^(?P<title>.+)\s+S(\d+)E(\d+)', query, re.I)[0]
                hdlr = 'season-{}-episode-{}'.format(int(season), int(episode))
                if imdb.startswith('tt'):
                    r = self.s.get(self.baseurl + 'view/{}'.format(imdb)).text
                    # r = re.sub(r'[^\x00-\x7F]+', ' ', r)
                    frames = client.parseDOM(r, 'a', ret='href')
                    link = [i for i in frames if hdlr in i]

                    if not link:
                        frame = self.baseurl + 'view/{}'.format(imdb)
                    else:
                        frame = link[0]
                else:
                    if len(imdb) > 1:
                        baseurl = ' https://api.thetvdb.com/login'
                        series_url = 'https://api.thetvdb.com/series/%s'
                        greek_api = '7d4261794838bb48a3122381811ecb42'
                        user_key = 'TJXB86PGDBYN0818'
                        username = '******'

                        _headers = {
                            'Content-Type': 'application/json',
                            'Accept': 'application/json',
                            'Connection': 'close'
                        }

                        post = {
                            "apikey": greek_api,
                            "username": username,
                            "userkey": user_key
                        }

                        # data = requests.post(baseurl, data=json.dumps(post), headers=_headers).json()
                        data = client.request(baseurl,
                                              post=json.dumps(post),
                                              headers=_headers)
                        auth = 'Bearer {}'.format(
                            unquote_plus(json.loads(data)['token']))
                        _headers['Authorization'] = auth

                        series_data = client.request(series_url % imdb,
                                                     headers=_headers)
                        imdb = json.loads(series_data)['data']['imdbId']
                        r = self.s.get(self.baseurl +
                                       'view/{}'.format(imdb)).text
                        # r = re.sub(r'[^\x00-\x7F]+', ' ', r)
                        frames = client.parseDOM(r, 'a', ret='href')
                        frame = [i for i in frames if hdlr in i][0]
                    else:
                        url = self.baseurl + 'search/{}/tv'.format(
                            quote(title))
                        data = self.s.get(url).text
                        data = client.parseDOM(data,
                                               'span',
                                               attrs={'class': 'h5'})
                        data = [(client.parseDOM(i, 'a')[0],
                                 client.parseDOM(i, 'a', ret='href')[0])
                                for i in data if i]

                        serie_link = [
                            i[1] for i in data
                            if cleantitle.get(i[0]) == cleantitle.get(title)
                        ][0]
                        imdbid = re.findall(r'\/(tt\d+)\/', serie_link)[0]
                        r = self.s.get(self.baseurl +
                                       'view/{}'.format(imdbid)).text
                        frames = client.parseDOM(r, 'a', ret='href')
                        frame = [i for i in frames if hdlr in i][0]

                frame = client.replaceHTMLCodes(frame)
                frame = six.ensure_text(frame, encoding='utf-8')
                r = self.s.get(frame).text
                # r = re.sub(r'[^\x00-\x7F]+', ' ', r)
                secCode = client.parseDOM(r,
                                          'input',
                                          ret='value',
                                          attrs={'id': 'secCode'})[0]
                items = client.parseDOM(r, 'tbody')[0]
                items = client.parseDOM(items, 'tr')
                # xbmc.log('ITEMS: {}'.format(items))

        except BaseException:
            return

        for item in items:
            try:
                item = six.ensure_str(item, encoding='utf-8')
                # xbmc.log('$#$MATCH-SUBZ-ITEM: {}'.format(item))
                try:
                    imdb = re.search(r'\/(tt\d+)\/', str(frame)).groups()[0]
                except BaseException:
                    imdb = re.search(r'\/(tt\d+)', str(frame)).groups()[0]

                data = re.findall(
                    r'''downloadMe\(['"](\w+-\w+).+?label.+?>(\d+).+?<td>(.+?)</td''',
                    item, re.I | re.DOTALL)[0]
                name = data[2]
                name = client.replaceHTMLCodes(name)

                url = self.baseurl + 'dll/{}/0/{}'.format(data[0], secCode)
                url = client.replaceHTMLCodes(url)
                url = six.ensure_str(url, encoding='utf-8')

                url = six.ensure_str(url, encoding='utf-8')
                name = six.ensure_str(name)
                down = data[1]
                rating = str(self._rating(down))

                self.list.append({
                    'name':
                    name,
                    'url':
                    '{}|{}|{}|{}|{}'.format(frame, url, cj['PHPSESSID'], name,
                                            imdb),
                    'source':
                    'subztv',
                    'rating':
                    rating
                })

            except BaseException:
                pass

        return self.list