Пример #1
0
def get_content(url):  #5 <div id="content"><div class="container">
    r = client.request(url, headers=headers)
    # data = client.parseDOM(r, 'div', attrs={'class': 'container'})[0]
    # xbmc.log('DATAAAA: %s' % data)
    data = client.parseDOM(r, 'li', attrs={'class': 'webcam'})
    for item in data:
        link = client.parseDOM(item, 'a', ret='href')[0]
        if link == '#':
            continue
        link = client.replaceHTMLCodes(link)
        link = link.encode('utf-8')

        name = client.parseDOM(item, 'span', attrs={'class': 'title'})[0]
        name = client.replaceHTMLCodes(name)
        name = name.encode('utf-8')

        desc = client.parseDOM(item, 'span', attrs={'class': 'description'})[0]
        desc = clear_Title(desc)
        desc = desc.decode('ascii', errors='ignore')

        poster = client.parseDOM(item, 'img', ret='data-original')[0]
        poster = client.replaceHTMLCodes(poster)
        poster = 'https:' + poster if poster.startswith('//') else poster
        poster = poster.encode('utf-8')

        addDir('[B][COLOR white]%s[/COLOR][/B]' % name, link, 100, poster, '',
               desc)

    xbmcplugin.setContent(int(sys.argv[1]), 'movies')
Пример #2
0
def Get_content(url):  #5
    r = client.request(url, headers=headers)
    data = client.parseDOM(r, 'div', attrs={'class': 'imagen'})
    data = zip(client.parseDOM(data, 'a', ret='href'),
               client.parseDOM(data, 'a', ret='title'),
               client.parseDOM(data, 'img', ret='src'))
    for item in data:
        link, name, icon = item[0], item[1], item[2]
        link = client.replaceHTMLCodes(link)
        link = link.encode('utf-8')

        name = client.replaceHTMLCodes(name)
        name = name.encode('utf-8')
        if 'capitulo' in link:
            addDir('[B][COLOR white]%s[/COLOR][/B]' % name, link, 10, icon,
                   FANART, '')
        else:
            addDir('[B][COLOR white]%s[/COLOR][/B]' % name, link, 8, icon,
                   FANART, '')
    try:
        np = client.parseDOM(r, 'li')
        np = [i for i in np if 'iguiente' in i][0]
        np = client.parseDOM(np, 'a', ret='href')[0]

        page = re.search('(\d+)', np, re.DOTALL)
        page = '[COLORlime]%s[/COLOR]' % page.groups()[0]

        url = urlparse.urljoin(url, np)
        url = client.replaceHTMLCodes(url)

        addDir('[B][COLORgold]Siguiente(%s)>>>[/COLOR][/B]' % page, url, 5,
               ICON, FANART, '')
    except:
        pass
    xbmcplugin.setContent(int(sys.argv[1]), 'movies')
Пример #3
0
def get_content(url):  # 5 <div id="content"><div class="container">
    r = six.ensure_str(client.request(url, headers=headers))
    data = client.parseDOM(r, 'div', attrs={'class': 'container'})[0]
    data = dom.parse_dom(data, 'a', req='href')
    data = [i for i in data if 'subt' in i.content]
    # xbmc.log('DATA22: {}'.format(str(data)))
    for item in data:
        link = item.attrs['href']
        if link == '#':
            continue
        link = client.replaceHTMLCodes(link)

        name = client.parseDOM(item.content, 'img', ret='alt')[0]
        name = client.replaceHTMLCodes(name)

        desc = client.parseDOM(item.content, 'p', attrs={'class': 'subt'})[0]
        desc = clear_Title(desc)

        try:
            poster = client.parseDOM(item.content, 'img', ret='data-src')[0]
        except IndexError:
            poster = client.parseDOM(item.content, 'img', ret='src')[0]
        poster = client.replaceHTMLCodes(poster)
        poster = 'https:' + poster if poster.startswith('//') else poster

        if six.PY2:
            link = link.encode('utf-8')
            name = name.encode('utf-8')
            desc = desc.decode('ascii', errors='ignore')
            poster = poster.encode('utf-8')
        link = '{}/{}'.format(base_url, link)
        addDir('[B][COLOR white]%s[/COLOR][/B]' % name, link, 100, poster, '',
               desc)

    xbmcplugin.setContent(int(sys.argv[1]), 'movies')
Пример #4
0
def get_content(url):  #5 <div class="products-list__item shadow-hover">
    r = client._basic_request(url, headers=headers)
    # data = client.parseDOM(r, 'div', attrs={'class': 'products-view'})[0]
    data = client.parseDOM(r, 'div', attrs={'class': 'products-view__list.+?'})
    xbmc.log('DATAAA: %s' % str(data))
    for item in data:
        if 'FREE' in item:
            link = client.parseDOM(item, 'a', ret='href')[0]
            link = client.replaceHTMLCodes(link)
            link = link.encode('utf-8')

            name = client.parseDOM(item, 'a')[1]
            name = client.replaceHTMLCodes(name)
            name = name.encode('utf-8')

            desc = client.parseDOM(
                item, 'div', attrs={'class': 'product-card__description'})[0]
            desc = clear_Title(desc)
            desc = desc.decode('ascii', errors='ignore')

            univ = client.parseDOM(item,
                                   'div',
                                   attrs={'class':
                                          'product-card__category'})[0]
            un_name, un_link = [
                client.parseDOM(univ, 'a')[0],
                client.parseDOM(univ, 'a', ret='href')[0]
            ]
            un_name = re.sub('<.+?>', '', un_name)
            un_name = un_name.encode('utf-8')

            poster = client.parseDOM(item, 'img', ret='src')[0]
            poster = client.replaceHTMLCodes(poster)
            poster = 'https:' + poster if poster.startswith('//') else poster
            poster = poster.encode('utf-8')

            addDir('[B]%s[/B]' % name, link, 10, poster, '', desc)
            if not 'university' in url:
                addDir('[B]Find lectures from %s[/B]' % un_name, un_link, 5,
                       poster, '', desc)
        else:
            pass

    try:
        np = re.findall('''<li><a href="(.+?)">Next''', r, re.DOTALL)[0]
        page = np.split('/')[:-1][-1]
        page = '[B][COLORlime]{}[B][COLORwhite])[/B][/COLOR]'.format(page)
        np = client.replaceHTMLCodes(np)
        addDir(
            '[B][COLORgold]Next Page>>>[/COLOR] [COLORwhite]({}'.format(page),
            np, 5, ICON, '', 'Next Page')
    except:
        pass
    xbmcplugin.setContent(int(sys.argv[1]), 'movies')
Пример #5
0
def Get_letras(url): #9
    r = client.request(url, headers=headers)
    r = client.parseDOM(r, 'ul', attrs={'id': 'letras'})[0]
    r = client.parseDOM(r, 'li')
    for item in r:
        name = client.parseDOM(item, 'a')[0]
        name = client.replaceHTMLCodes(name)
        name = name.encode('utf-8')

        url = client.parseDOM(item, 'a', ret='href')[0]
        url = client.replaceHTMLCodes(url)
        url = url.encode('utf-8')
        addDir('[B][COLOR white]Telenovelas de %s[/COLOR][/B]' % name, url, 5, ICON, FANART, '')
    xbmcplugin.setContent(int(sys.argv[1]), 'movies')
Пример #6
0
def get_lectures(name, url, poster, desc):
    r = client._basic_request(url, headers=headers)
    posts = client.parseDOM(r, 'li', attrs={'class': 'class-list__row'})
    for post in posts:
        link = client.parseDOM(post, 'a', ret='href')[0]
        link = client.replaceHTMLCodes(link)
        link = link.encode('utf-8')

        name = client.parseDOM(post, 'a')[0]
        name = client.replaceHTMLCodes(name)
        name = name.encode('utf-8')

        addDir('[B]%s[/B]' % name, link, 100, poster, '', desc)

    xbmcplugin.setContent(int(sys.argv[1]), 'movies')
Пример #7
0
def Episodes(url): #8
    r = client.request(url, headers=headers)
    data = client.parseDOM(r, 'ul', attrs={'id': 'listado'})[0]
    data = client.parseDOM(data, 'li')
    data = zip(client.parseDOM(data, 'a', ret='href'),
               client.parseDOM(data, 'a'))
    get_icon = client.parseDOM(r, 'img', ret='src', attrs={'class': 'transparent'})[0]

    for item in data[::-1]:
        url, title = client.replaceHTMLCodes(item[0]), client.replaceHTMLCodes(item[1])
        url = url.encode('utf-8')
        title = title.encode('utf-8')

        addDir(title, url, 10, get_icon, FANART, '')
    xbmcplugin.setContent(int(sys.argv[1]), 'movies')
Пример #8
0
def get_country(url):  #4
    r = client.request(url, headers=headers)
    r = client.parseDOM(r, 'li', attrs={'class': 'dropdown'})[0]
    r = zip(client.parseDOM(r, 'a', attrs={'class': 'menu-item'}),
            client.parseDOM(r, 'a', attrs={'class': 'menu-item'}, ret='href'))
    for name, link in r:
        name = re.sub('<.+?>', '', name).replace('&nbsp;', ' ')
        name = client.replaceHTMLCodes(name)
        name = name.encode('utf-8')

        link = client.replaceHTMLCodes(link)
        link = link.encode('utf-8')
        link = base_url + link if link.startswith('/') else link
        addDir('[B][COLOR white]%s[/COLOR][/B]' % name, link, 5, ICON, FANART,
               '')
    xbmcplugin.setContent(int(sys.argv[1]), 'movies')
Пример #9
0
def get_country(url):  # 4
    r = six.ensure_str(client.request(url, headers=headers))
    r = client.parseDOM(r,
                        'div',
                        attrs={'class': 'dropdown mega-dropdown live'})[0]
    r = zip(client.parseDOM(r, 'a'), client.parseDOM(r, 'a', ret='href'))
    for name, link in r:
        name = re.sub('<.+?>', '', name).replace('&nbsp;', ' ')
        name = client.replaceHTMLCodes(name)
        name = '[B][COLOR white]{}[/COLOR][/B]'.format(name)
        link = client.replaceHTMLCodes(link)
        if six.PY2:
            name = name.encode('utf-8')
            link = link.encode('utf-8')
        link = base_url + link if link.startswith('/') else link
        addDir(name, link, 5, ICON, FANART, '')
    xbmcplugin.setContent(int(sys.argv[1]), 'movies')
Пример #10
0
def get_new(url):
    r = client.request(url, headers=headers)
    r = client.parseDOM(r, 'div', attrs={'class': 'row'})[0]
    r = zip(client.parseDOM(r, 'a', ret='href'),
            client.parseDOM(r, 'img', ret='src'),
            client.parseDOM(r, 'img', ret='alt'))
    for link, poster, name in r:
        name = client.replaceHTMLCodes(name)
        name = name.encode('utf-8')

        link = client.replaceHTMLCodes(link)
        link = link.encode('utf-8')
        link = 'https:' + link if link.startswith('//') else link

        poster = client.replaceHTMLCodes(poster)
        poster = 'https:' + poster if poster.startswith('//') else poster
        poster = poster.encode('utf-8')

        addDir('[B][COLOR white]%s[/COLOR][/B]' % name, link, 100, poster,
               FANART, '')
    xbmcplugin.setContent(int(sys.argv[1]), 'movies')
Пример #11
0
def get_greek_cams():
    link = 'http://www.livecameras.gr/'
    headers = {"User-Agent": client.agent()}
    r = client.request(link, headers=headers)
    r = r.encode('utf-8')
    cams = client.parseDOM(r, 'div', attrs={'class': 'fp-playlist'})[0]
    cams = zip(client.parseDOM(cams, 'a', ret='href'),
               client.parseDOM(cams, 'a', ret='data-title'),
               client.parseDOM(cams, 'img', ret='src'))
    for stream, name, poster in cams:
        name = re.sub('".+?false', '', name)
        name = client.replaceHTMLCodes(name).encode('utf-8')
        stream = 'http:' + stream if stream.startswith('//') else stream
        stream += '|Referer={}'.format(link)
        poster = link + poster if poster.startswith('/') else poster
        addDir('[B][COLOR white]%s[/COLOR][/B]' % name, stream, 100, poster,
               '', 'name')
Пример #12
0
    def get(self, query):
        try:
            query, imdb = query.split('/imdb=')
            match = re.findall(r'^(?P<title>.+)[\s+\(|\s+](?P<year>\d{4})',
                               query)
            if len(match) > 0:
                title, year = match[0][0], match[0][1]
                if imdb.startswith('tt'):
                    url = 'https://yifysubtitles.org/movie-imdb/{}'.format(
                        imdb)
                    r = six.ensure_text(client.request(url))

                else:
                    url = urljoin(self.base_link,
                                  self.search.format(quote_plus(title)))
                    r = six.ensure_text(client.request(url))
                    data = client.parseDOM(r,
                                           'div',
                                           attrs={
                                               'class': 'media-body'
                                           })  # <div class="media-body">
                    for i in data:
                        try:
                            name = client.parseDOM(i, 'h3')[0].encode('utf-8')
                            if not cleantitle.get(title) == cleantitle.get(
                                    client.replaceHTMLCodes(name)):
                                raise Exception()
                            y = re.search(r'">(\d{4})<small>year</small>',
                                          i).groups()[0]
                            if not year == y:
                                raise Exception()
                            url = client.parseDOM(i, 'a', ret='href')[0]
                            url = url.encode('utf-8')
                            url = urljoin(self.base_link, url)
                            r = client.request(url)
                        except BaseException:
                            pass

                data = client.parseDOM(r, 'tr', attrs={'data-id': r'\d+'})
                items = [i for i in data if 'greek' in i.lower()]
                # xbmc.log('$#$MATCH-YIFI-RRR: %s' % items)
                urls = []
                for item in items:
                    try:
                        # rating = client.parseDOM(item, 'span', attrs={'title': 'rating'})[0]
                        name = client.parseDOM(item, 'a')[0]
                        name = re.sub(r'<.+?>', '',
                                      name).replace('subtitle', '')
                        name = client.replaceHTMLCodes(name)

                        url = client.parseDOM(item, 'a', ret='href')[0]
                        url = client.replaceHTMLCodes(url)

                        if six.PY2:
                            url = url.encode('utf-8')
                            name = name.encode('utf-8')
                        urls += [(name, url)]
                    except BaseException:
                        pass
            else:
                return self.list

        except BaseException:
            return

        for i in urls:
            try:
                r = six.ensure_text(
                    client.request(urljoin(self.base_link, i[1])))
                url = client.parseDOM(
                    r,
                    'a',
                    ret='href',
                    attrs={'class': 'btn-icon download-subtitle'})[0]
                url = 'https://yifysubtitles.org/' + url if url.startswith(
                    '/') else url
                self.list.append({
                    'name': i[0],
                    'url': url,
                    'source': 'yifi',
                    'rating': '5'
                })
            except BaseException:
                pass

        return self.list
Пример #13
0
    def get(self, query):
        try:
            query, imdb = query.split('/imdb=')
            match = re.findall('^(?P<title>.+)[\s+\(|\s+](?P<year>\d{4})',
                               query)

            cookie = self.s.get('https://subztv.online/',
                                headers=self.hdr).cookies
            cj = requests.utils.dict_from_cookiejar(cookie)

            if len(match) > 0:

                title, year = match[0][0], match[0][1]

                if imdb.startswith('tt'):
                    frame = 'https://subztv.online/view/%s' % imdb
                    r = self.s.get(frame)
                    r = re.sub(r'[^\x00-\x7F]+', ' ', r.content)
                else:
                    url = 'https://subztv.online/search/%s/movies' % urllib.quote(
                        title)

                    data = self.s.get(url).content
                    data = client.parseDOM(data, 'span', attrs={'class': 'h5'})
                    data = [(client.parseDOM(i, 'a')[0],
                             client.parseDOM(i, 'a', ret='href')[0])
                            for i in data if i]

                    frame = [
                        i[1] for i in data
                        if cleantitle.get(i[0]) == cleantitle.get(title)
                    ][0]

                    r = self.s.get(frame).text
                    r = re.sub(r'[^\x00-\x7F]+', ' ', r)

                secCode = client.parseDOM(r,
                                          'input',
                                          ret='value',
                                          attrs={'id': 'secCode'})[0]
                items = client.parseDOM(r, 'tbody')[0]
                items = client.parseDOM(items, 'tr')

            else:
                title, season, episode = re.findall(
                    '^(?P<title>.+)\s+S(\d+)E(\d+)', query, re.I)[0]
                #xbmc.log('$#$MATCH-SUBZ: %s | %s | %s' % (title, season, episode), xbmc.LOGNOTICE)

                season, episode = '%01d' % int(season), '%01d' % int(episode)
                hdlr = 'season-%s-episode-%s' % (season, episode)

                if imdb.startswith('tt'):
                    r = self.s.get('https://subztv.online/view/%s' %
                                   imdb).content
                    # xbmc.log('$#$MATCH-SUBZ-RRR-source: %s' % r)
                    #r = re.sub(r'[^\x00-\x7F]+', ' ', r)
                    frames = client.parseDOM(r, 'a', ret='href')
                    frame = [i for i in frames if hdlr in i][0]
                else:
                    baseurl = ' https://api.thetvdb.com/login'
                    series_url = 'https://api.thetvdb.com/series/%s'
                    greek_api = 'CAYAM6RT1K2SERUE'
                    user_key = '7F5420E18BAD7762'
                    username = '******'

                    _headers = {
                        'Content-Type': 'application/json',
                        'Accept': 'application/json',
                        'Connection': 'close'
                    }

                    post = {
                        "apikey": greek_api,
                        "username": username,
                        "userkey": user_key
                    }

                    # data = requests.post(baseurl, data=json.dumps(post), headers=_headers).json()
                    data = client.request(baseurl,
                                          post=json.dumps(post),
                                          headers=_headers)

                    auth = 'Bearer %s' % urllib.unquote_plus(
                        json.loads(data)['token'])
                    _headers['Authorization'] = auth

                    series_data = client.request(series_url % imdb,
                                                 headers=_headers)
                    imdb = json.loads(series_data)['data']['imdbId']
                    #xbmc.log('$#$MATCH-SUBZ-RRR-IMDB: %s' % imdb)
                    r = self.s.get('https://subztv.online/view/%s' %
                                   imdb).content
                    # xbmc.log('$#$MATCH-SUBZ-RRR-source: %s' % r)
                    #r = re.sub(r'[^\x00-\x7F]+', ' ', r)
                    frames = client.parseDOM(r, 'a', ret='href')
                    frame = [i for i in frames if hdlr in i][0]

                #xbmc.log('$#$MATCH-SUBZ-λινκ: %s' % frame)
                r = self.s.get(frame).text
                r = re.sub(r'[^\x00-\x7F]+', ' ', r)
                secCode = client.parseDOM(r,
                                          'input',
                                          ret='value',
                                          attrs={'id': 'secCode'})[0]
                items = client.parseDOM(r, 'tbody')[0]
                items = client.parseDOM(items, 'tr')

        except BaseException:
            return

        for item in items:
            try:

                try:
                    imdb = re.search('\/(tt\d+)\/', frame).groups()[0]
                except BaseException:
                    imdb = re.search('\/(tt\d+)', frame).groups()[0]

                data = re.findall(
                    '''downloadMe\(['"](\w+\-\w+).+?label.+?>(\d+).+?<td>(.+?)</td''',
                    str(item), re.I | re.DOTALL)[0]

                name = data[2]
                name = client.replaceHTMLCodes(name)
                name = name.encode('utf-8')

                url = 'https://subztv.online/dll/{}/0/{}'.format(
                    data[0], secCode)
                url = client.replaceHTMLCodes(url)
                url = url.encode('utf-8')

                down = data[1]
                rating = self._rating(down)

                self.list.append({
                    'name':
                    name,
                    'url':
                    '%s|%s|%s|%s|%s|%s' %
                    (frame.encode('utf-8'), url, cj['__cfduid'],
                     cj['PHPSESSID'], name, imdb),
                    'source':
                    'subztv',
                    'rating':
                    rating
                })

            except BaseException:
                pass

        return self.list
Пример #14
0
    def get(self, query):
        try:
            match = re.findall('(.+?) \((\d{4})\)/imdb=$', query)

            if len(match) > 0:

                title, year = match[0][0], match[0][1]

                query = ' '.join(
                    urllib.unquote_plus(
                        re.sub('%\w\w', ' ',
                               urllib.quote_plus(title))).split())

                url = 'https://subz.xyz/search?q=%s' % urllib.quote_plus(query)

                result = client.request(url)
                result = re.sub(r'[^\x00-\x7F]+', ' ', result)

                url = client.parseDOM(result,
                                      'section',
                                      attrs={'class': 'movies'})[0]
                url = re.findall('(/movies/\d+)', url)
                url = [x for y, x in enumerate(url) if x not in url[:y]]
                url = [urljoin('https://subz.xyz', i) for i in url]
                url = url[:3]

                for i in url:
                    c = cache.get(self.cache, 2200, i)

                    if c is not None:
                        if cleantitle.get(c[0]) == cleantitle.get(
                                title) and c[1] == year:
                            try:
                                item = self.r
                            except:
                                item = client.request(i)
                            break

            else:

                title, season, episode = re.findall(
                    '(.+?) S(\d+)E(\d+)/imdb=$', query)[0]

                season, episode = '%01d' % int(season), '%01d' % int(episode)

                query = ' '.join(
                    urllib.unquote_plus(
                        re.sub('%\w\w', ' ',
                               urllib.quote_plus(title))).split())

                url = 'https://subz.xyz/search?q=%s' % urllib.quote_plus(query)

                result = client.request(url)
                result = re.sub(r'[^\x00-\x7F]+', ' ', result)

                url = client.parseDOM(result,
                                      'section',
                                      attrs={'class': 'tvshows'})[0]
                url = re.findall('(/series/\d+)', url)
                url = [x for y, x in enumerate(url) if x not in url[:y]]
                url = [urljoin('https://subz.xyz', i) for i in url]
                url = url[:3]

                for i in url:
                    c = cache.get(self.cache, 2200, i)

                    if c is not None:
                        if cleantitle.get(c[0]) == cleantitle.get(title):
                            item = i
                            break

                item = '%s/seasons/%s/episodes/%s' % (item, season, episode)
                item = client.request(item)

            item = re.sub(r'[^\x00-\x7F]+', ' ', item)
            items = client.parseDOM(item, 'tr', attrs={'data-id': '.+?'})
        except:
            return

        for item in items:
            try:

                r = client.parseDOM(item, 'td', attrs={'class': '.+?'})[-1]

                url = client.parseDOM(r, 'a', ret='href')[0]
                url = client.replaceHTMLCodes(url)
                url = url.replace("'", "").encode('utf-8')

                name = url.split('/')[-1].strip()
                name = re.sub('\s\s+', ' ', name)
                name = name.replace('_', '').replace('%20', '.')
                name = client.replaceHTMLCodes(name)
                name = name.encode('utf-8')

                self.list.append({
                    'name': name,
                    'url': url,
                    'source': 'subzxyz',
                    'rating': 5
                })
            except:
                pass

        return self.list
Пример #15
0
    def get(self, query):
        try:
            query, imdb = query.split('/imdb=')
            match = re.findall(r'^(?P<title>.+)[\s+\(|\s+](?P<year>\d{4})',
                               query)
            # xbmc.log('MATCH: {}'.format(match))
            cookie = self.s.get(self.baseurl, headers=self.hdr)

            cj = requests.utils.dict_from_cookiejar(cookie.cookies)

            if len(match) > 0:

                title, year = match[0][0], match[0][1]

                if imdb.startswith('tt'):
                    frame = self.baseurl + 'view/{}'.format(imdb)
                    r = self.s.get(frame).text
                    if six.PY2:
                        r = re.sub(r'[^\x00-\x7F]+', ' ', r)

                    # try:
                    #     r = r.decode('utf-8', errors='replace')
                    # except AttributeError:
                    #     pass
                else:
                    url = self.baseurl + 'search/{}/movies'.format(
                        quote(title))

                    data = self.s.get(url).text
                    data = client.parseDOM(data, 'span', attrs={'class': 'h5'})
                    data = [(client.parseDOM(i, 'a')[0],
                             client.parseDOM(i, 'a', ret='href')[0])
                            for i in data if i]
                    frame = [
                        i[1] for i in data
                        if cleantitle.get(i[0]) == cleantitle.get(title)
                    ][0]

                    r = self.s.get(frame).text
                    if six.PY2:
                        r = re.sub(r'[^\x00-\x7F]+', ' ', r)
                    # try:
                    #     r = r.decode('utf-8', errors='replace')
                    # except AttributeError:
                    #     pass
                secCode = client.parseDOM(r,
                                          'input',
                                          ret='value',
                                          attrs={'id': 'secCode'})[0]
                items = client.parseDOM(r, 'tbody')[0]
                # xbmc.log('ITEMS: {}'.format(items))
                items = client.parseDOM(items, 'tr')

            else:
                title, season, episode = re.findall(
                    r'^(?P<title>.+)\s+S(\d+)E(\d+)', query, re.I)[0]
                hdlr = 'season-{}-episode-{}'.format(int(season), int(episode))
                if imdb.startswith('tt'):
                    r = self.s.get(self.baseurl + 'view/{}'.format(imdb)).text
                    # r = re.sub(r'[^\x00-\x7F]+', ' ', r)
                    frames = client.parseDOM(r, 'a', ret='href')
                    link = [i for i in frames if hdlr in i]

                    if not link:
                        frame = self.baseurl + 'view/{}'.format(imdb)
                    else:
                        frame = link[0]
                else:
                    if len(imdb) > 1:
                        baseurl = ' https://api.thetvdb.com/login'
                        series_url = 'https://api.thetvdb.com/series/%s'
                        greek_api = '7d4261794838bb48a3122381811ecb42'
                        user_key = 'TJXB86PGDBYN0818'
                        username = '******'

                        _headers = {
                            'Content-Type': 'application/json',
                            'Accept': 'application/json',
                            'Connection': 'close'
                        }

                        post = {
                            "apikey": greek_api,
                            "username": username,
                            "userkey": user_key
                        }

                        # data = requests.post(baseurl, data=json.dumps(post), headers=_headers).json()
                        data = client.request(baseurl,
                                              post=json.dumps(post),
                                              headers=_headers)
                        auth = 'Bearer {}'.format(
                            unquote_plus(json.loads(data)['token']))
                        _headers['Authorization'] = auth

                        series_data = client.request(series_url % imdb,
                                                     headers=_headers)
                        imdb = json.loads(series_data)['data']['imdbId']
                        r = self.s.get(self.baseurl +
                                       'view/{}'.format(imdb)).text
                        # r = re.sub(r'[^\x00-\x7F]+', ' ', r)
                        frames = client.parseDOM(r, 'a', ret='href')
                        frame = [i for i in frames if hdlr in i][0]
                    else:
                        url = self.baseurl + 'search/{}/tv'.format(
                            quote(title))
                        data = self.s.get(url).text
                        data = client.parseDOM(data,
                                               'span',
                                               attrs={'class': 'h5'})
                        data = [(client.parseDOM(i, 'a')[0],
                                 client.parseDOM(i, 'a', ret='href')[0])
                                for i in data if i]

                        serie_link = [
                            i[1] for i in data
                            if cleantitle.get(i[0]) == cleantitle.get(title)
                        ][0]
                        imdbid = re.findall(r'\/(tt\d+)\/', serie_link)[0]
                        r = self.s.get(self.baseurl +
                                       'view/{}'.format(imdbid)).text
                        frames = client.parseDOM(r, 'a', ret='href')
                        frame = [i for i in frames if hdlr in i][0]

                frame = client.replaceHTMLCodes(frame)
                frame = six.ensure_text(frame, encoding='utf-8')
                r = self.s.get(frame).text
                # r = re.sub(r'[^\x00-\x7F]+', ' ', r)
                secCode = client.parseDOM(r,
                                          'input',
                                          ret='value',
                                          attrs={'id': 'secCode'})[0]
                items = client.parseDOM(r, 'tbody')[0]
                items = client.parseDOM(items, 'tr')
                # xbmc.log('ITEMS: {}'.format(items))

        except BaseException:
            return

        for item in items:
            try:
                item = six.ensure_str(item, encoding='utf-8')
                # xbmc.log('$#$MATCH-SUBZ-ITEM: {}'.format(item))
                try:
                    imdb = re.search(r'\/(tt\d+)\/', str(frame)).groups()[0]
                except BaseException:
                    imdb = re.search(r'\/(tt\d+)', str(frame)).groups()[0]

                data = re.findall(
                    r'''downloadMe\(['"](\w+-\w+).+?label.+?>(\d+).+?<td>(.+?)</td''',
                    item, re.I | re.DOTALL)[0]
                name = data[2]
                name = client.replaceHTMLCodes(name)

                url = self.baseurl + 'dll/{}/0/{}'.format(data[0], secCode)
                url = client.replaceHTMLCodes(url)
                url = six.ensure_str(url, encoding='utf-8')

                url = six.ensure_str(url, encoding='utf-8')
                name = six.ensure_str(name)
                down = data[1]
                rating = str(self._rating(down))

                self.list.append({
                    'name':
                    name,
                    'url':
                    '{}|{}|{}|{}|{}'.format(frame, url, cj['PHPSESSID'], name,
                                            imdb),
                    'source':
                    'subztv',
                    'rating':
                    rating
                })

            except BaseException:
                pass

        return self.list
Пример #16
0
    def get(self, query):
        try:
            query, imdb = query.split('/imdb=')
            match = re.findall(r'^(?P<title>.+)[\s+\(|\s+](?P<year>\d{4})', query)
            # xbmc.log('$#$MATCH-S4F: %s' % match, xbmc.LOGNOTICE)

            if len(match) > 0:
                hdr = {
                    'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3',
                    'Referer': 'https://www.subs4free.info/'}

                title, year = match[0][0], match[0][1]

                query = quote_plus('{} {}'.format(title, year))

                url = urljoin(self.base_link, self.search % query)

                req = requests.get(url, headers=hdr)
                cj = req.cookies
                r = req.text
                # r = re.sub(r'[^\x00-\x7F]+', ' ', r)
                if six.PY2:
                    r = re.sub(r'[^\x00-\x7F]+', ' ', r)
                # try:
                #     r = r.decode('utf-8', errors='replace')
                # except UnicodeEncodeError:
                #     pass
                # xbmc.log('$#$HTML: %s' % r, xbmc.LOGNOTICE)

                urls = client.parseDOM(r, 'div', attrs={'class': 'movie-download'})
                # urls += client.parseDOM(r, 'div', attrs={'class': ' seeMedium'})
                # xbmc.log('$#$URLS-start: %s' % urls, xbmc.LOGNOTICE)
                urls = [i for i in urls if '/greek-sub' in i]
                # urls = [(client.parseDOM(i, 'tr')[0], re.findall(r'<b>(\d+)</b>DLs', i, re.I)[0]) for i in urls if i]
                urls = [(client.parseDOM(i, 'a', ret='href')[0],
                         client.parseDOM(i, 'a', ret='title')[0],
                         re.findall(r'<b>(\d+)</b>DLs', i, re.I)[0]) for i in urls if i]
                # xbmc.log('$#$URLS: %s' % urls, xbmc.LOGNOTICE)
                urls = [(urljoin(self.base_link, i[0]), i[1].split('for ', 1)[1],
                         i[2]) for i in urls if i]
                urls = [(i[0], i[1], i[2]) for i in urls if i]
                # xbmc.log('$#$URLS: %s' % urls, xbmc.LOGNOTICE)


            else:
                hdr = {
                    'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3',
                    'Referer': 'https://www.subs4series.com/'}
                title, hdlr = re.findall(r'^(?P<title>.+)\s+(?P<hdlr>S\d+E\d+)', query, re.I)[0]
                # xbmc.log('$#$MATCH-S4F: %s | %s' % (title, hdlr), xbmc.LOGNOTICE)

                # hdlr = 'S%02dE%02d' % (int(season), int(episode))

                query = quote('{} {}'.format(title, hdlr))

                url = urljoin(self.base_TVlink, self.search % query)

                req = requests.get(url, headers=hdr)

                cj = req.cookies
                r = req.text
                if six.PY2:
                    r = re.sub(r'[^\x00-\x7F]+', ' ', r)
                # try:
                #     r = r.decode('utf-8', errors='replace')
                # except UnicodeEncodeError:
                #     pass
                # xbmc.log('@@URL:%s' % r)

                urls = client.parseDOM(r, 'div', attrs={'class': ' seeDark'})
                urls += client.parseDOM(r, 'div', attrs={'class': ' seeMedium'})
                urls = [i for i in urls if not '/en.gif' in i]
                urls = [(client.parseDOM(i, 'tr')[0], re.findall(r'<B>(\d+)</B>DLs', i, re.I)[0]) for i in urls if i]
                urls = [(client.parseDOM(i[0], 'a', ret='href')[0],
                         client.parseDOM(i[0], 'a', ret='title')[0], i[1]) for i in urls if i]
                urls = [(urljoin(self.base_TVlink, i[0]), re.sub('Greek subtitle[s] for ', '', i[1]),
                         i[2]) for i in urls if i]
                urls = [(i[0], i[1], i[2]) for i in urls if i]

        except BaseException:
            return

        for i in urls:
            try:
                rating = str(self._rating(i[2]))
                name = i[1].replace('_', '').replace('%20', '.')
                name = client.replaceHTMLCodes(name)
                name = six.ensure_str(name, 'utf-8')
                url = i[0]
                url = client.replaceHTMLCodes(url)
                url = six.ensure_str(url, 'utf-8')

                self.list.append({'name': name, 'url': '{}|{}'.format(url, cj['PHPSESSID']),
                                  'source': 's4f', 'rating': rating})
            except BaseException:
                pass

        return self.list
Пример #17
0
    def get(self, query):

        try:
            query, imdb = query.split('/imdb=')
            match = re.findall('^(?P<title>.+)[\s+\(|\s+](?P<year>\d{4})',
                               query)
            #xbmc.log('$#$MATCH-YIFI: %s' % match, xbmc.LOGNOTICE)
            if len(match) > 0:
                title, year = match[0][0], match[0][1]
                if imdb.startswith('tt'):
                    r = client.request(self.base_link + 'movie-imdb/%s' % imdb)

                else:
                    url = urlparse.urljoin(
                        self.base_link, self.search) % urllib.quote_plus(title)
                    r = client.request(url)
                    data = client.parseDOM(r, 'div', attrs={'id':
                                                            'content'})[0]
                    data = client.parseDOM(data,
                                           'li',
                                           attrs={'class': 'movie-wrapper'})
                    for i in data:
                        try:
                            name = client.parseDOM(i,
                                                   'span',
                                                   attrs={'class': 'title'
                                                          })[0].encode('utf-8')
                            if not cleantitle.get(title) == cleantitle.get(
                                    client.replaceHTMLCodes(name)):
                                raise Exception()
                            y = client.parseDOM(
                                i,
                                'span',
                                attrs={'class': 'wrap-enlarge year'})[0]
                            y = re.search('(\d{4})', y).groups()[0]
                            if not year == y: raise Exception()
                            url = client.parseDOM(i, 'a', ret='href')[0]
                            url = url.encode('utf-8')
                            url = urlparse.urljoin(self.base_link, url)
                            r = client.request(url)
                        except BaseException:
                            pass

                data = client.parseDOM(r, 'li', attrs={'data-id': '\d+'})
                items = [i for i in data if 'greek' in i.lower()]
                urls = []
                for item in items:
                    try:
                        rating = client.parseDOM(item,
                                                 'span',
                                                 attrs={'title': 'rating'})[0]
                        name = client.parseDOM(item,
                                               'span',
                                               attrs={'class':
                                                      'subdesc'})[0].replace(
                                                          'subtitle', '')
                        name = client.replaceHTMLCodes(name)
                        name = name.encode('utf-8')

                        url = client.parseDOM(item,
                                              'a',
                                              ret='href',
                                              attrs={'class':
                                                     'subtitle-page'})[0]
                        url = client.replaceHTMLCodes(url)
                        url = url.encode('utf-8')
                        urls += [(name, url, rating)]

                    except BaseException:
                        pass
            else:
                return self.list

        except BaseException:
            return

        for i in urls:
            try:
                r = client.request(urlparse.urljoin(self.base_link, i[1]))
                url = client.parseDOM(
                    r,
                    'a',
                    ret='href',
                    attrs={'class': 'dl-button blue download-subtitle'})[0]
                self.list.append({
                    'name': i[0],
                    'url': url,
                    'source': 'yifi',
                    'rating': '5'
                })
            except BaseException:
                pass

        return self.list
Пример #18
0
    def get(self, query):
        try:
            query, imdb = query.split('/imdb=')
            match = re.findall('^(?P<title>.+)[\s+\(|\s+](?P<year>\d{4})',
                               query)
            #xbmc.log('$#$MATCH-SUBZ: %s' % match, xbmc.LOGNOTICE)

            if len(match) > 0:

                title, year = match[0][0], match[0][1]
                cj = requests.get('https://subztv.online/rainbow/master-js',
                                  headers=self.hdr).cookies

                if imdb.startswith('tt'):
                    r = requests.get('https://subztv.online/view/%s' % imdb,
                                     headers=self.hdr,
                                     cookies=cj).content
                    r = re.sub(r'[^\x00-\x7F]+', ' ', r)
                else:
                    url = 'https://subztv.online/search/%s/movies' % urllib.quote(
                        title)

                    data = requests.get(url, headers=self.hdr).content
                    data = client.parseDOM(data, 'span', attrs={'class': 'h5'})
                    data = [(client.parseDOM(i, 'a')[0],
                             client.parseDOM(i, 'a', ret='href')[0])
                            for i in data if i]

                    link = [
                        i[1] for i in data
                        if cleantitle.get(i[0]) == cleantitle.get(title)
                    ][0]

                    cj = requests.get(
                        'https://subztv.online/rainbow/master-js',
                        headers=self.hdr).cookies

                    r = requests.get(link, headers=self.hdr,
                                     cookies=cj).content
                    r = re.sub(r'[^\x00-\x7F]+', ' ', r)

                secCode = client.parseDOM(r,
                                          'input',
                                          ret='value',
                                          attrs={'id': 'secCode'})[0]
                items = client.parseDOM(r, 'tbody')[0]
                items = client.parseDOM(items, 'tr')

            else:
                title, season, episode = re.findall(
                    '^(?P<title>.+)\s+S(\d+)E(\d+)', query, re.I)[0]
                xbmc.log(
                    '$#$MATCH-SUBZ: %s | %s | %s' % (title, season, episode),
                    xbmc.LOGNOTICE)

                season, episode = '%01d' % int(season), '%01d' % int(episode)
                hdlr = 'season-%s-episode-%s' % (season, episode)
                cj = requests.get('https://subztv.online/rainbow/master-js',
                                  headers=self.hdr).cookies
                xbmc.log('$#$MATCH-SUBZ-ΨΟΟΚΙΕΣ: %s' % hdlr)
                xbmc.log('$#$MATCH-SUBZ-IMDB: %s' % imdb)
                if imdb.startswith('tt'):
                    xbmc.log('$#$MALAKASSSSSS')
                    r = requests.get('https://subztv.online/view/%s' % imdb,
                                     headers=self.hdr).content
                    # xbmc.log('$#$MATCH-SUBZ-RRR-source: %s' % r)
                    r = re.sub(r'[^\x00-\x7F]+', ' ', r)
                    frames = client.parseDOM(r, 'a', ret='href')
                    frame = [i for i in frames if hdlr in i][0]
                    xbmc.log('$#$MATCH-SUBZ-IMDB: %s' % frame)
                else:
                    url = 'https://subztv.online/search/%s/tv' % urllib.quote(
                        title)
                    data = requests.get(url, headers=self.hdr).text
                    data = re.sub(r'[^\x00-\x7F]+', ' ', data)
                    # xbmc.log('$#$MATCH-SUBZ-DATA-source: %s' % data)
                    data = client.parseDOM(data, 'span', attrs={'class': 'h5'})
                    data = [(client.parseDOM(i, 'a')[0],
                             client.parseDOM(i, 'a', ret='href')[0])
                            for i in data if i]
                    xbmc.log('$#$MATCH-SUBZ-DATA-list: %s' % data)
                    try:
                        frame = [i[1] for i in data if hdlr in i[1]][0]
                        xbmc.log('$#$MATCH-SUBZ-TRYYYY: %s' % frame)
                    except BaseException:
                        link = [
                            i[1] for i in data
                            if cleantitle.get(i[0]) == cleantitle.get(title)
                        ][0]
                        xbmc.log('$#$MATCH-SUBZ-EXCEPT: %s' % link)
                        r = requests.get(link, headers=self.hdr,
                                         cookies=cj).text
                        r = re.sub(r'[^\x00-\x7F]+', ' ', r)

                        url = client.parseDOM(
                            r,
                            'section',
                        )
                        url = [i for i in url if 'sessaon' in i][0]
                        url = client.parseDOM(url, 'a', ret='href')
                        frame = [i for i in url if hdlr in i][0]
                        xbmc.log('$#$MATCH-SUBZ-LINKKK: %s' % frame)

                r = requests.get(frame, headers=self.hdr).content
                r = re.sub(r'[^\x00-\x7F]+', ' ', r)
                secCode = client.parseDOM(r,
                                          'input',
                                          ret='value',
                                          attrs={'id': 'secCode'})[0]
                items = client.parseDOM(r, 'tbody')[0]
                items = client.parseDOM(items, 'tr')

        except BaseException:
            return

        for item in items:
            try:

                data = re.compile(
                    '''downloadMe\(['"](\w+\-\w+).+?label.+?>(\d+).+?<td>(.+?)</td''',
                    re.I | re.DOTALL).findall(str(item))[0]
                name = data[2]

                name = client.replaceHTMLCodes(name)
                name = name.encode('utf-8')

                url = 'https://subztv.online/' + 'dll/' + data[
                    0] + '/0/' + secCode
                url = client.replaceHTMLCodes(url)
                url = url.encode('utf-8')

                down = data[1]
                rating = self._rating(down)

                self.list.append({
                    'name':
                    name,
                    'url':
                    '%s|%s|%s|%s' % (url, name, cj['PHPSESSID'], imdb),
                    'source':
                    'subztv',
                    'rating':
                    rating
                })

            except BaseException:
                pass

        return self.list
Пример #19
0
    def get(self, query):
        try:
            match = re.findall('(.+?) \((\d{4})\)/imdb=(.+?)$', query)

            if len(match) > 0:
    
                title, year, imdb = match[0][0], match[0][1], match[0][2]
                cj = requests.get('https://subztv.club/rainbow/master-js', headers=self.hdr).cookies
                
                if imdb.startswith('tt'):
                    r = requests.get('https://subztv.club/view/%s' % imdb, headers=self.hdr, cookies=cj).content
                    r = re.sub(r'[^\x00-\x7F]+', ' ', r)
                else:
                    url = 'https://subztv.club/search/%s/movies' % urllib.quote(title)
                    
                    data = requests.get(url, headers=self.hdr).content
                    data = client.parseDOM(data, 'span', attrs={'class': 'h5'})
                    data = [(client.parseDOM(i, 'a')[0],
                             client.parseDOM(i, 'a', ret='href')[0]) for i in data if i]
    
                    link = [i[1] for i in data if cleantitle.get(i[0]) == cleantitle.get(title)][0]
    
                    cj = requests.get('https://subztv.club/rainbow/master-js', headers=self.hdr).cookies
                    
                    r = requests.get(link, headers=self.hdr, cookies=cj).content
                    r = re.sub(r'[^\x00-\x7F]+', ' ', r)

                secCode = client.parseDOM(r,'input', ret='value', attrs={'id':'secCode'})[0]
                items = client.parseDOM(r, 'tbody')[0]
                items = client.parseDOM(items, 'tr')

            else:
                title, season, episode = re.findall('(.+?) S(\d+)E(\d+)/imdb=', query)[0]
    
                season, episode = '%01d' % int(season), '%01d' % int(episode)
                hdlr = 'season-%s-episode-%s' % (season, episode)
                url = 'https://subztv.club/search/%s/tv' % urllib.quote(title)
                data = requests.get(url, headers=self.hdr).content
                data = client.parseDOM(data, 'span', attrs={'class':'h5'})
                data = [(client.parseDOM(i, 'a')[0],
                         client.parseDOM(i, 'a', ret='href')[0])for i in data if i]
                try:
                    url = [i[1] for i in data if hdlr in i[1]][0]
                except:
                        link = [i[1] for i in data if cleantitle.get(i[0]) == cleantitle.get(title)][0]
    
                        cj = requests.get('https://subztv.club/rainbow/master-js', headers=self.hdr).cookies
                        r = requests.get(link, headers=self.hdr, cookies=cj).content
                        r = re.sub(r'[^\x00-\x7F]+', ' ', r)
    
                        url = client.parseDOM(r, 'section',)
                        url = [i for i in url if 'sessaon' in i][0]
                        url = client.parseDOM(url, 'a', ret='href')
                        url = [i for i in url if hdlr in i][0]
                imdb = re.findall('(tt\d+)', url)[0]
                cj = requests.get('https://subztv.club/rainbow/master-js', headers=self.hdr).cookies
    
                r = requests.get(url, headers=self.hdr, cookies=cj).content
                r = re.sub(r'[^\x00-\x7F]+', ' ', r)
                secCode = client.parseDOM(r,'input', ret='value', attrs={'id':'secCode'})[0]
                items = client.parseDOM(r, 'tbody')[0]
                items = client.parseDOM(items, 'tr')

        except:
            return

        for item in items:
            try:
                
                data = re.compile('''downloadMe\(['"](\w+\-\w+).+?label.+?>(\d+).+?<td>(.+?)</td''', re.I|re.DOTALL).findall(str(item))[0]
                name = data[2]
                
                name = client.replaceHTMLCodes(name)
                name = name.encode('utf-8')

                url = 'https://subztv.club/' + 'dll/' + data[0] + '/0/' + secCode
                url = client.replaceHTMLCodes(url)
                url = url.encode('utf-8')

                down = data[1]
                rating = self._rating(down)

                self.list.append({'name': name, 'url': '%s|%s|%s|%s' % (url, name, cj['PHPSESSID'], imdb), 'source': 'subztv', 'rating': rating})
                
            except:
                pass

        return self.list
Пример #20
0
    def get(self, query):

        try:

            match = re.findall('(.+?) \((\d{4})\)/imdb=', query)

            if len(match) > 0:
                hdr = {
                    'User-Agent':
                    'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'
                }

                title, year = match[0][0], match[0][1]

                query = urllib.quote_plus(title + ' ' + year)

                url = urlparse.urljoin(self.base_link, self.search % query)

                req = requests.get(url, headers=hdr)
                cj = req.cookies
                r = req.content
                r = re.sub(r'[^\x00-\x7F]+', ' ', r)

                urls = client.parseDOM(r, 'div', attrs={'class': ' seeDark'})
                urls += client.parseDOM(r,
                                        'div',
                                        attrs={'class': ' seeMedium'})
                urls = [i for i in urls if 'com/el.gif' in i]
                urls = [(client.parseDOM(i, 'tr')[0],
                         re.findall('<B>(\d+)</B>DLs', i, re.I)[0])
                        for i in urls if i]
                urls = [(client.parseDOM(i[0], 'a', ret='href')[0],
                         client.parseDOM(i[0], 'a', ret='title')[0], i[1])
                        for i in urls if i]
                urls = [(urlparse.urljoin(self.base_link,
                                          i[0]), i[1].split('for ',
                                                            1)[1], i[2])
                        for i in urls if i]
                urls = [(i[0], i[1], i[2]) for i in urls if i]

            else:
                hdr = {
                    'User-Agent':
                    'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3',
                    'Referer': 'http://www.subs4series.com/'
                }
                title, season, episode = re.findall('(.+?) S(\d+)E(\d+)/imdb=',
                                                    query)[0]

                hdlr = 'S%02dE%02d' % (int(season), int(episode))

                query = urllib.quote(title + ' ' + hdlr)

                url = urlparse.urljoin(self.base_TVlink, self.search % query)

                req = requests.get(url, headers=hdr)

                cj = req.cookies
                r = req.content
                r = re.sub(r'[^\x00-\x7F]+', ' ', r)
                xbmc.log('@@URL:%s' % r)

                urls = client.parseDOM(r, 'div', attrs={'class': ' seeDark'})
                urls += client.parseDOM(r,
                                        'div',
                                        attrs={'class': ' seeMedium'})
                urls = [i for i in urls if not 'com/en.gif' in i]
                urls = [(client.parseDOM(i, 'tr')[0],
                         re.findall('<B>(\d+)</B>DLs', i, re.I)[0])
                        for i in urls if i]
                urls = [(client.parseDOM(i[0], 'a', ret='href')[0],
                         client.parseDOM(i[0], 'a', ret='title')[0], i[1])
                        for i in urls if i]
                urls = [(urlparse.urljoin(self.base_TVlink, i[0]),
                         re.sub('Greek subtitle[s] for ', '', i[1]), i[2])
                        for i in urls if i]
                urls = [(i[0], i[1], i[2]) for i in urls if i]

        except:
            return

        for i in urls:
            try:
                rating = self._rating(i[2])
                name = i[1].replace('_', '').replace('%20', '.')
                name = client.replaceHTMLCodes(name)
                name = name.encode('utf-8')

                url = i[0]
                url = client.replaceHTMLCodes(url)
                url = url.encode('utf-8')

                self.list.append({
                    'name':
                    name,
                    'url':
                    '%s|%s|%s' % (url, cj['PHPSESSID'], cj['__cfduid']),
                    'source':
                    's4f',
                    'rating':
                    rating
                })
            except:
                pass

        return self.list