コード例 #1
0
 def parse_menu(self, url, meniu, info={}):
     lists = []
     imagine = ''
     if meniu == 'recente' or meniu == 'cauta':
         if meniu == 'cauta':
             from resources.Core import Core
             Core().searchSites({'landsearch': self.__class__.__name__})
         else:
             link = fetchData(url)
             regex_menu = '''<article(.+?)</article'''
             regex_submenu = '''href=['"](.+?)['"].+?title=['"](.+?)['"].+?content=['"]([htp].+?)['"]'''
             regex_search = '''<span class='pager-older-link.+?href=['"](.+?)['"].+?</span'''
             if link:
                 for meniu in re.compile(regex_menu,
                                         re.DOTALL).findall(link):
                     match = re.findall(regex_submenu, meniu, re.DOTALL)
                     for legatura, nume, imagine in match:
                         nume = htmlparser.HTMLParser().unescape(
                             nume.decode('utf-8')).encode('utf-8')
                         info = {
                             'Title': nume,
                             'Plot': nume,
                             'Poster': imagine
                         }
                         szep = re.findall(
                             '(?:sezo[a-zA-Z\s]+(\d+).+?)?epi[a-zA-Z\s]+(\d+)',
                             nume, re.IGNORECASE | re.DOTALL)
                         if szep:
                             try:
                                 if re.search('–|-|~', nume):
                                     all_name = re.split(
                                         r'–|-|:|~', nume, 1)
                                     title = all_name[0]
                                     title2 = all_name[1]
                                 else:
                                     title = nume
                                     title2 = ''
                                 title, year = xbmc.getCleanMovieTitle(
                                     title)
                                 title2, year2 = xbmc.getCleanMovieTitle(
                                     title2)
                                 title = title if title else title2
                                 year = year if year else year2
                                 if year: info['Year'] = year
                                 if szep[0][1] and not szep[0][0]:
                                     info['Season'] = '01'
                                 else:
                                     info['Season'] = str(szep[0][0])
                                 info['Episode'] = str(szep[0][1])
                                 info['TvShowTitle'] = (re.sub(
                                     '(?:sezo[a-zA-Z\s]+\d+.+?)?epi[a-zA-Z\s]+\d+.+?$',
                                     '',
                                     title,
                                     flags=re.IGNORECASE
                                     | re.DOTALL)).strip()
                             except:
                                 pass
                         lists.append(
                             (nume, legatura, imagine, 'get_links', info))
                 match = re.compile(regex_search, re.DOTALL).findall(link)
                 if match:
                     nexturl = match[0]
                     lists.append(
                         ('Next', nexturl, self.nextimage, meniu, {}))
     elif meniu == 'get_links':
         sources = []
         link = fetchData(url)
         regex_lnk = '''(?:item title="(.+?)".+?)?<iframe.+?src="((?:[htt]|[//]).+?)"'''
         match_lnk = re.findall(regex_lnk, link, re.IGNORECASE | re.DOTALL)
         for nume, match in match_lnk:
             if not '+f.id+' in match: sources.append((nume, match))
         for host, link1 in get_links(sources):
             lists.append(
                 (host, link1, '', 'play', info, url)
             )  #addLink(host, link1, thumb, name, 10, striphtml(match_nfo[0]))
     elif meniu == 'liste':
         link = fetchData(url)
         regex_menu = '''(?s)<table (.+?)</table'''
         regex_submenu = '''(?:(?s)<li.+?href="(.+?)">(.+?)<(?:.+?src="(.+?)")?|td>.+?href="(.+?)">(.+?)<)'''
         regex2_submenu = '''(?s)data-label="(.+?)"><a.+?href="(.+?)"(?:.+?src="(.+?)")?'''
         for meniu in re.compile(regex_menu, re.DOTALL).findall(link):
             match = re.compile(regex_submenu).findall(meniu)
             for legatura, nume, imagine, legatura2, nume2 in match:
                 if not imagine: imagine = self.thumb
                 if not legatura and not nume:
                     legatura = legatura2
                     nume = nume2
                 nume = htmlparser.HTMLParser().unescape(
                     striphtml(nume.decode('utf-8'))).encode('utf-8')
                 if legatura.endswith(".html"): switch = 'get_links'
                 elif re.search('/search/', legatura): switch = 'recente'
                 else: switch = 'liste'
                 if nume and not nume.isspace():
                     lists.append((nume, legatura.replace('"', ''), imagine,
                                   switch, info))
         for meniu in re.compile(regex_menu, re.DOTALL).findall(link):
             match = re.compile(regex2_submenu).findall(meniu)
             for nume, legatura, imagine in match:
                 if not imagine: imagine = self.thumb
                 nume = htmlparser.HTMLParser().unescape(
                     striphtml(nume.decode('utf-8'))).encode('utf-8')
                 if legatura.endswith(".html"): switch = 'get_links'
                 elif re.search('/search/', legatura): switch = 'recente'
                 else: switch = 'liste'
                 if not nume.isspace():
                     lists.append((nume, legatura.replace('"', ''), imagine,
                                   switch, info))
     elif meniu == 'online':
         link = fetchData(url)
         regex_menu = '''(?s)<table (.+?)</table'''
         regex_submenu = '''(?s)<li.+?href="(.+?)">(.+?)<'''
         for meniu in re.compile(regex_menu, re.DOTALL).findall(link):
             match = re.compile(regex_submenu).findall(meniu)
             for legatura, nume in match:
                 nume = htmlparser.HTMLParser().unescape(
                     striphtml(nume.decode('utf-8'))).encode('utf-8')
                 if re.search('sezonul|episodul', legatura):
                     switch = 'get_links'
                 elif re.search('/search/', legatura):
                     switch = 'recente'
                 else:
                     switch = 'liste'
                 lists.append(
                     (nume, legatura.replace('"',
                                             ''), self.thumb, switch, info))
     return lists
コード例 #2
0
    def parse_menu(self, url, meniu, info={}, keyw=None):
        lists = []
        #log('link: ' + link)
        imagine = ''
        if meniu == 'recente':
            link = fetchData(url, base_url + '/')
            regex_submenu = '''class="post.+?href=['"](.+?)['"].+?title">(.+?)<.+?(?:imdb).+?([\d.]+)?.+?views.+?(\d+).+?src="(.+?)"'''
            if link:
                match = re.compile(regex_submenu,
                                   re.IGNORECASE | re.DOTALL).findall(link)
                if len(match) > 0:
                    for legatura, nume, imdb, views, imagine in match:
                        try:
                            nume = htmlparser.HTMLParser().unescape(
                                nume.decode('utf-8')).encode('utf-8').strip()
                        except:
                            nume = nume.strip()
                        info = {
                            'Title': nume,
                            'Plot': nume,
                            'Poster': imagine,
                            'Rating': imdb
                        }
                        lists.append(
                            (nume, legatura, imagine, 'get_links', info))
                match = re.compile('"navigation', re.IGNORECASE).findall(link)
                if len(match) > 0:
                    if '/page/' in url:
                        new = re.compile('/page/(\d+)').findall(url)
                        nexturl = re.sub('/page/(\d+)',
                                         '/page/' + str(int(new[0]) + 1), url)
                    else:
                        if '/?s=' in url:
                            nextpage = re.compile('\?s=(.+?)$').findall(url)
                            nexturl = '%s%s?s=%s' % (
                                base_url, ('page/2/' if str(url).endswith('/')
                                           else '/page/2/'), nextpage[0])
                        else:
                            nexturl = url + "/page/2"
                    lists.append(('Next', nexturl, self.nextimage, meniu, {}))
        elif meniu == 'cauta':
            if url == 'post':

                if keyw:
                    url = self.get_search_url(keyw)
                    link = fetchData(url)
                else:
                    link = None
                    from resources.Core import Core
                    Core().searchSites({'landsearch': self.__class__.__name__})
            else:
                link = fetchData(url)
            regex = '''post-.+?href="(.+?)".+?>(.+?)<.+?summary">(.+?)</div'''
            if link:
                match = re.compile(regex,
                                   re.IGNORECASE | re.DOTALL).findall(link)
                if len(match) > 0:
                    for legatura, nume, descriere in match:
                        imagine = self.thumb
                        nume = htmlparser.HTMLParser().unescape(
                            nume.decode('utf-8')).encode('utf-8').strip()
                        descriere = htmlparser.HTMLParser().unescape(
                            striphtml(descriere).decode('utf-8')).encode(
                                'utf-8').strip()
                        info = {
                            'Title': nume,
                            'Plot': descriere,
                            'Poster': imagine
                        }
                        lists.append(
                            (nume, legatura, imagine, 'get_links', info))
                match = re.compile('"navigation', re.IGNORECASE).findall(link)
                if len(match) > 0:
                    if '/page/' in url:
                        new = re.compile('/page/(\d+)').findall(url)
                        nexturl = re.sub('/page/(\d+)',
                                         '/page/' + str(int(new[0]) + 1), url)
                    else:
                        if '/?s=' in url:
                            nextpage = re.compile('\?s=(.+?)$').findall(url)
                            nexturl = '%s%s?s=%s' % (
                                base_url, ('page/2/' if str(url).endswith('/')
                                           else '/page/2/'), nextpage[0])
                        else:
                            nexturl = url + "/page/2"
                    lists.append(('Next', nexturl, self.nextimage, meniu, {}))

        elif meniu == 'get_links':
            link = fetchData(url)
            links = []
            regex_lnk = '''<iframe.+?src="((?:[htt]|[//]).+?)"'''
            regex_infos = '''movie-description">(.+?)</p'''
            reg_id = '''data-singleid="(.+?)"'''
            reg_server = '''data-server="(.+?)"'''
            match_nfo = re.compile(regex_infos,
                                   re.IGNORECASE | re.DOTALL).findall(link)
            match_id = re.findall(reg_id, link, re.IGNORECASE | re.DOTALL)
            match_server = re.findall(reg_server, link,
                                      re.IGNORECASE | re.DOTALL)
            try:
                mid = list(set(match_id))[0]
                mserver = list(set(match_server))
                for code in mserver:
                    try:
                        get_stupid_links = fetchData(
                            '%s/wp-admin/admin-ajax.php' % base_url,
                            data={
                                'action': 'samara_video_lazyload',
                                'server': code,
                                'singleid': mid
                            })
                        match_lnk = re.findall(regex_lnk, get_stupid_links,
                                               re.IGNORECASE | re.DOTALL)
                        links.append(match_lnk[0])
                    except:
                        pass
            except:
                pass
            try:
                info = eval(str(info))
                info['Plot'] = (striphtml(match_nfo[0]).strip())
            except:
                pass
            for host, link1 in get_links(links):
                lists.append((host, link1, '', 'play', info, url))
        elif meniu == 'genuri':
            link = fetchData(url)
            regex_cats = '''"cat-item.+?href=['"](.+?)['"](?:>|.+?title.+?">)(.+?)<'''
            if link:
                match = re.compile(regex_cats, re.IGNORECASE | re.MULTILINE
                                   | re.DOTALL).findall(link)
                if len(match) >= 0:
                    for legatura, nume in sorted(match, key=self.getKey):
                        lists.append(
                            (nume, legatura.replace('"',
                                                    ''), '', 'recente', info)
                        )  #addDir(nume, legatura.replace('"', ''), 6, movies_thumb, 'recente')
        return lists
コード例 #3
0
ファイル: filmehdnet.py プロジェクト: drkman1/gruprepo
 def parse_menu(self, url, meniu, info={}):
     lists = []
     imagine = ''
     if meniu == 'recente' or meniu == 'cauta':
         if meniu == 'cauta':
             from resources.Core import Core
             Core().searchSites({'landsearch': self.__class__.__name__})
         else: 
             link = fetchData(url)
             regex_submenu = '''class="imgleft".+?href="(.+?)".+?src="(.+?)".+?href.+?>(.+?)<'''
             if link:
                 match = re.compile(regex_submenu, re.DOTALL).findall(link)
                 for legatura, imagine, nume in match:
                     nume = htmlparser.HTMLParser().unescape(nume.decode('utf-8')).encode('utf-8')
                     info = {'Title': nume,'Plot': nume,'Poster': imagine}
                     if 'serial-tv' in link or 'miniserie-tv' in link:
                         try:
                             if re.search('–|-|~', nume):
                                 all_name = re.split(r'–|-|:|~', nume,1)
                                 title = all_name[0]
                                 title2 = all_name[1]
                             else: title2 = ''
                             title, year = xbmc.getCleanMovieTitle(title)
                             title2, year2 = xbmc.getCleanMovieTitle(title2)
                             title = title if title else title2
                             year = year if year else year2
                             info['Year'] = year
                             info['TVShowTitle'] = title
                         except:pass
                     lists.append((nume, legatura, imagine, 'get_all', info))
                 match = re.compile('class=\'wp-pagenavi', re.IGNORECASE).findall(link)
                 if len(match) > 0:
                     if '/page/' in url:
                         new = re.compile('/page/(\d+)').findall(url)
                         nexturl = re.sub('/page/(\d+)', '/page/' + str(int(new[0]) + 1), url)
                     else:
                         if '/?s=' in url:
                             nextpage = re.compile('\?s=(.+?)$').findall(url)
                             nexturl = '%s%s?s=%s' % (base_url, ('page/2/' if str(url).endswith('/') else '/page/2/'), nextpage[0])
                         else: nexturl = url + "/page/2"
                     lists.append(('Next', nexturl, self.nextimage, meniu, {}))
     elif meniu == 'get_all':
         link = fetchData(url)
         regex_lnk = '''(?:id="tabs_desc_.+?_(.+?)".+?)?(?:<center>(.+?)</center>.+?)?data-src=['"]((?:[htt]|[//]).+?)['"]'''
         regex_infos = '''Descriere film.+?p>(.+?)</p'''
         match_lnk = re.findall(regex_lnk, link, re.IGNORECASE | re.DOTALL)
         match_nfo = re.findall(regex_infos, link, re.IGNORECASE | re.DOTALL)
         info = eval(str(info))
         try:
             info['Plot'] = (striphtml(match_nfo[0]).strip())
         except: pass
         for server, name, legatura in match_lnk:
             if server: lists.append(('Server %s' % server,legatura,'','nimic', info, url))
             if not legatura.startswith('http'):
                 legatura = '%s%s' % (base_url, legatura.replace('&amp;', '&'))
                 name = striphtml(name)
                 if info.get('TVShowTitle'):
                     try:
                         szep = re.findall('sezo[a-zA-Z\s]+(\d+)\s+epi[a-zA-Z\s]+(\d+)', name, re.IGNORECASE)
                         if szep:
                             info['Season'] = str(szep[0][0])
                             info['Episode'] = str(szep[0][1])
                     except: pass
                 if name: lists.append((name,legatura,'','get_links', str(info), url, '1'))
     elif meniu == 'get_links':
         link = fetchData(url)
         regex_lnk = '''<iframe(?:.+?)?src=['"]((?:[htt]|[//]).+?)['"]'''
         match_lnk = re.compile(regex_lnk, re.IGNORECASE | re.DOTALL).findall(link)
         from resources import Core
         core = Core.Core()
         core.executeAction({'info': quote(info), 'favorite': 'check', 'site': 'filmehdnet', 'landing': quote(url), 'nume': 'nume', 'switch': 'play', 'link': quote(match_lnk[0]), 'action': 'OpenSite', 'watched': 'check', 'subtitrare': ''})
     elif meniu == 'genuri':
         link = fetchData(url)
         cats = []
         regex_menu = '''<ul[\s]+class="sub-menu(.+?)</li></ul></div> '''
         regex_submenu = '''<li.+?a href="(.+?)">(.+?)<'''
         for meniu in re.compile(regex_menu, re.IGNORECASE | re.MULTILINE | re.DOTALL).findall(link):
             match = re.compile(regex_submenu, re.DOTALL).findall(meniu)
             for legatura, nume in match:
                 nume = clean_cat(htmlparser.HTMLParser().unescape(nume.decode('utf-8')).encode('utf-8')).capitalize()
                 cats.append((legatura, nume))
             cats.append(('http://filmehd.net/despre/filme-romanesti', 'Romanesti'))
         for legatura, nume in sorted(cats, key=self.getKey):
             lists.append((nume,legatura.replace('"', ''),self.thumb,'recente', info))
     elif meniu == 'ani':
         import datetime
         an = datetime.datetime.now().year
         while (an > 1929):
             legatura = base_url + '/despre/filme-' + str(an)
             lists.append((str(an),legatura,self.thumb,'recente', info))
             an -= 1
     return lists
           
コード例 #4
0
    def parse_menu(self, url, meniu, info={}):
        lists = []
        imagine = ''
        if meniu == 'recente' or meniu == 'cauta':
            if meniu == 'cauta':
                from resources.Core import Core
                Core().searchSites({'landsearch': self.__class__.__name__})
            else:
                link = fetchData(url)
                gid = re.search('gid="(.+?)"', link)
                regex_submenu = '''data-movie-id="(.+?)".+?href="(.+?)".+?data-url="(.+?)".+?(?:eps">(.+?)</span.+?)?(?:quality"(?:[a-zA-Z\n\s#=":]+)?>(.+?)<.+?)?data-original="(.+?)".+?info">(.+?)</span'''
                if link:
                    match = re.compile(regex_submenu, re.DOTALL).findall(link)
                    for mid, legatura, infolink, season, calitate, imagine, nume in match:
                        nume = (htmlparser.HTMLParser().unescape(
                            striphtml(nume).decode('utf-8')).encode('utf-8')
                                ).strip()
                        info = {'Title': nume, 'Plot': nume, 'Poster': imagine}
                        lists.append(
                            (nume, '%ssplitthishere%ssplitthishere%s' %
                             (mid, legatura, gid.group(1) if gid else 'nimic'),
                             imagine, 'get_all', info))
        elif meniu == 'seriale' or meniu == 'filme' or meniu == 'gen':
            tip = 'tip'
            new_url = base_url + '/ajax/filtru.php'
            if meniu == 'gen':
                tip = 'genul'
                gen = url.split('splitthishere')
                try:
                    genul = gen[0].rsplit('/', 1)[-1]
                except:
                    genul = gen[0]
                tipmode = genul
                url = gen[1]
            elif meniu == 'seriale':
                tipmode = 'tv'
            elif meniu == 'filme':
                tipmode = 'film'
            data = {tip: tipmode, 'offset': url}
            link = fetchData(new_url, data=data)
            regex_submenu = '''data-movie-id="(.+?)".+?href="(.+?)".+?data-url="(.+?)".+?(?:eps">(.+?)</span.+?)?(?:quality"(?:[a-zA-Z\n\s#=":]+)?>(.+?)<.+?)?data-original="(.+?)".+?info">(.+?)</span'''
            if link:
                match = re.compile(regex_submenu, re.DOTALL).findall(link)
                for mid, legatura, infolink, season, calitate, imagine, nume in match:
                    nume = (htmlparser.HTMLParser().unescape(
                        striphtml(nume).decode('utf-8')).encode('utf-8')
                            ).strip()
                    info = {'Title': nume, 'Plot': nume, 'Poster': imagine}
                    lists.append((nume, '%ssplitthishere%s' % (mid, legatura),
                                  imagine, 'get_all', info))
                nexturl = int(url) + 48
                if meniu == 'gen':
                    lists.append(
                        ('Next', '%ssplitthishere%s' % (genul, nexturl),
                         self.nextimage, meniu, {}))
                else:
                    lists.append(('Next', nexturl, self.nextimage, meniu, {}))
        elif meniu == 'get_all':
            import base64
            urls = url.split('splitthishere')
            try:
                gid_url = fetchData(urls[1], base_url)
            except:
                gid_url = ''
            gid = re.search('gid="(.+?)"', gid_url) if gid_url else ''
            if gid: gid = gid.group(1)
            else: gid = 'nimic'
            info = json.loads(info)
            titlu = info.get('Title')
            #fetchData('%s/ajax/so.php?mid=%s' % (base_url, urls[0]), headers={'Host': 'filme-online.to'})
            new_url = '%s/ajax/mep.php?id=%s' % (base_url, urls[0])
            link = fetchData(new_url, rtype='json')
            regex_servers = r'''div id="sv-(.+?)</div></div>'''
            if link:
                servers = re.findall(regex_servers, link.get('html'))
                ts = link.get('ts')
                try:
                    updata = re.findall('updata[\s=]+"(.+?)"',
                                        link.get('html'))[0]
                except:
                    updata = '0'
                for server in servers:
                    server_number = re.findall(
                        r'''"ep-.+?data-id=[\\'"]+(.+?)[\\'"]+.+?epNr=[\\'"]+(.+?)[\\'"]+.+?so=[\\'"]+(.+?)[\\'"]+[\s\w-]+=[\\'"]+(.+?)[\\'"]+.+?data-tip="(.+?)".+?data-index=[\\'"]+(.+?)[\\'"]+.+?data-srvr=[\\'"]+(.+?)[\\'"]+.+?ep-item[\sso\d\\">]+(.+?)<''',
                        server,
                        flags=re.I)
                    for data_id, ep_nr, data_so, data_server, tip, data_index, data_srvr, name in server_number:
                        #if data_server == '5' or data_server == '6':
                        if data_server == '5': numeserver = '1'
                        elif data_server == '6': numeserver = '2'
                        elif data_server == 'G1': numeserver = '3'
                        elif data_server == '4': numeserver = '4'
                        else: numeserver = data_server
                        if numeserver:
                            #if ep_nr <> '0' and not '/tv/' in urls[1]:
                            if '/tv/' in urls[1]:
                                nume = titlu.split(' - ')
                                nameone = nume[0]
                                try:
                                    sezon = str(
                                        re.findall('season(?:[\s]+)?(\d+)',
                                                   nume[1], re.IGNORECASE)[0])
                                except:
                                    sezon = ''
                                episod = ep_nr
                                info['Title'] = name
                                info['Season'] = sezon
                                info['Episode'] = episod
                                info['TVShowTitle'] = nameone
                            else:
                                nameone = None
                            link1 = '%ssplitthishere%ssplitthishere%ssplitthishere%ssplitthishere%ssplitthishere%ssplitthishere%ssplitthishere%ssplitthishere%ssplitthishere%ssplitthishere%ssplitthishere%ssplitthishere%s' % (
                                urls[0], data_id, data_so, data_server,
                                urls[1], gid, ep_nr, data_index, ts, updata,
                                base64.urlsafe_b64encode(url), tip, data_srvr)
                            lists.append(
                                ('[COLOR lime]Server - %s:[/COLOR] %s %s' %
                                 (numeserver, nameone if nameone else titlu,
                                  name), link1, '', 'get_links', str(info),
                                 '1'))
        elif meniu == 'get_links':
            import base64
            x = "iQDWcsGqN"

            def r(t):
                i = 0
                n = 0
                while (i < len(t)):
                    n += ord(t[i]) + i
                    i += 1
                return n

            def functie(t, i):
                e = 0
                n = 0
                while (n < max(len(t), len(i))):
                    e += ord(i[n]) if n < len(i) else 0
                    e += ord(t[n]) if n < len(t) else 0
                    n += 1
                return hex(int(e)).lstrip('0x')

            def ops(eid, up, ts, srvr):
                s = r(x)
                h = {}
                c = {}
                o = {
                    'id': eid,
                    'server': str(srvr),
                    'ts': ts
                }
                for n, p in o.items():
                    print(functie(x + n, str(p)))
                    s += r(functie(x + n, str(p)))
                return s

            def decodetoken(t):
                t = t[1:]
                i = -18
                e = []
                n = 0
                l = len(t)
                while (n < l):
                    c = ord(t[n])
                    if c >= 97 and c <= 122:
                        e.append((c - 71 + i) % 26 + 97)
                    else:
                        if c >= 65 and c <= 90:
                            e.append((c - 39 + i) % 26 + 65)
                        else:
                            e.append(c)
                    n += 1
                return ''.join(map(unichr, e))

            link_parts = url.split('splitthishere')
            #log(link_parts)
            mid = link_parts[0]
            eid = link_parts[1]
            so = link_parts[2]
            server = link_parts[3]
            referer = link_parts[4]
            epnr = link_parts[6]
            epindex = link_parts[7]
            ts = link_parts[8]
            up = link_parts[9]
            landing = base64.urlsafe_b64decode(
                link_parts[10]) if len(link_parts) > 10 else url
            tip = link_parts[11] if len(link_parts) > 11 else 'tv'
            srvr = link_parts[12] if len(link_parts) > 12 else (
                '24' if tip == 'embed' else '34')
            try:
                lid = '&lid=%s' % (ops(eid, up, ts, srvr))
            except:
                lid = ''
            gid = ('&gid=%s' %
                   link_parts[5]) if not link_parts[5] == 'nimic' else ''
            mstime = lambda: int(round(time.time() * 1000))
            if not tip == 'embed':
                #log(landing)
                if '/tv/' in landing or server == 'F2' or server == 'G4' or server == 'G1' or server == '4':
                    #&_=1506415634918
                    url_tokens = '%s/ajax/mtoken.php?eid=%s&mid=%s%s&ts=%s&up=%s&so=%s&epNr=%s&srvr=%s&_=%s' % (
                        base_url, eid, mid, lid, ts, up, so, epnr, srvr,
                        mstime())
                else:
                    url_tokens = '%s/ajax/mtoken.php?eid=%s&mid=%s&so=%s&server=%s&epNr=%s' % (
                        base_url, eid, mid, so, server, epnr)
                #log(url_tokens)
                headers = {
                    'Host':
                    'filme-online.to',
                    'Accept':
                    'text/javascript, application/javascript, application/ecmascript, application/x-ecmascript, */*; q=0.01'
                }
                tokens = fetchData(url_tokens, headers=headers)
                #log(tokens)
                x = re.search('''_x=['"]([^"']+)''', tokens)
                if x: x = x.group(1)
                y = re.search('''_y=['"]([^"']+)''', tokens)
                if y: y = y.group(1)
                if not x or not y:
                    try:
                        script = '(' + tokens.split("(_$$)) ('_');")[0].split(
                            "/* `$$` */")[-1].strip()
                        script = script.replace('(__$)[$$$]', '\'"\'')
                        script = script.replace('(__$)[_$]', '"\\\\"')
                        script = script.replace('(o^_^o)', '3')
                        script = script.replace('(c^_^o)', '0')
                        script = script.replace('(_$$)', '1')
                        script = script.replace('($$_)', '4')
                        script = script.replace('+', '|x|')
                        vGlobals = {
                            "__builtins__": None,
                            '__name__': __name__,
                            'str': str,
                            'Exception': Exception
                        }
                        vLocals = {'param': None}
                        exec(CODE % script) in vGlobals, vLocals
                        data = vLocals['param'].decode('string_escape')
                        x = re.search('''_x=['"]([^"']+)''', data).group(1)
                        y = re.search('''_y=['"]([^"']+)''', data).group(1)
                    except:
                        x = ''
                        y = ''
                ip = re.search('''_ip(?:[\s+])?=(?:[\s+])?['"]([^"']+)''',
                               tokens)
                ip = ip.group(1) if ip else ''
                z = re.search('''_z(?:[\s+])?=(?:[\s+])?['"]([^"']+)''',
                              tokens)
                z = z.group(1) if z else ''
                if y.startswith("."): y = decodetoken(y)
                if x.startswith("."): x = decodetoken(x)
            if tip == 'embed':
                if '/film/' in landing: gen = 'film'
                else: gen = 'tv'
                if so == '1' or so == '5': lid = '&lid=undefined'
                url_source = '%s/ajax/movie_embed.php?eid=%s%s%s&up=0&mid=%s%s&epNr=%s&type=%s&server=%s&epIndex=%s&so=%s' % (
                    base_url, eid, lid, ('&ts=%s' % ts) if ts else '', mid,
                    gid, epnr, gen, server, epindex, so)
            else:
                url_source = '%s/ajax/msources.php?eid=%s&x=%s&y=%s&z=%s&ip=%s&mid=%s%s&lang=rum&epIndex=%s&server=%s&so=%s&epNr=%s&srvr=%s' % (
                    'https://www.filme-online.to', eid, x, y, z, ip, mid, gid,
                    epindex, server, so, epnr, srvr)
            one_urls = fetchData(url_source,
                                 referer,
                                 rtype='json',
                                 headers={'Host': 'www.filme-online.to'})
            #log(url_source)
            selm = -1
            if one_urls:
                if tip == 'embed':
                    try:
                        playlink = one_urls.get('src')
                        sublink = None
                        selm = 0
                    except:
                        pass
                else:
                    try:
                        dialogb = xbmcgui.Dialog()
                        tracks = one_urls.get('playlist')[0].get('tracks')
                        if len(tracks) > 1:
                            sel = dialogb.select(
                                "Alege subtitrarea",
                                [sel_s.get('label') for sel_s in tracks])
                        else:
                            sel = 0
                        sublink = tracks[sel].get('file')
                        sublink = '%s%s' % (base_url,
                                            sublink) if sublink.startswith(
                                                '/') else sublink
                    except:
                        sublink = None
                    #try:
                    dialogb = xbmcgui.Dialog()
                    msources = one_urls.get('playlist')[0].get('sources')
                    if msources:
                        if isinstance(msources, list):
                            if len(msources) > 1:
                                selm = dialogb.select(
                                    "Alege varianta",
                                    [sel_m.get('label') for sel_m in msources])
                            else:
                                selm = 0
                            playlink = msources[selm].get('file')
                        else:
                            playlink = msources.get('file')
                            selm = 0
                        if playlink:
                            if playlink.endswith('?i=s'):
                                playlink = playlink.replace('?i=s', '')
                        else:
                            playlink = ''
                        if re.search('appspot|blogspot|googleapis', playlink):
                            playlink = playlink + '|User-Agent=%s' % quote(
                                randomagent())
                    else:
                        playlink = None
                data = json.loads(info)
                if data.get('TVShowTitle'):
                    viewmark = url
                    playname = '%s %s' % (data.get('TVShowTitle'),
                                          data.get('Title'))
                else:
                    viewmark = landing
                    playname = data.get('Title')
                if not sublink:
                    playname = playname + ' Fara subtitrare pe site'
                if playlink and selm <> -1:
                    from resources import Core
                    core = Core.Core()
                    core.executeAction({
                        'info':
                        quote(info),
                        'favorite':
                        'check',
                        'site':
                        'filmeonlineto',
                        'landing':
                        quote(viewmark),
                        'nume':
                        playname,
                        'switch':
                        'play',
                        'link':
                        quote(playlink),
                        'action':
                        'OpenSite',
                        'watched':
                        'check',
                        'subtitrare':
                        quote(sublink) if sublink else ''
                    })
                else:
                    #log(playlink)
                    #log(selm)
                    xbmc.executebuiltin(
                        'Notification(%s,%s)' %
                        (xbmcaddon.Addon().getAddonInfo('name'),
                         'Nu s-a găsit link'))
                    #xbmcplugin.endOfDirectory(handle=int(sys.argv[1]), succeeded=False)
                #lists.append(('Play %s' % (playname),playlink,'','play', info, viewmark, sublink))
        elif meniu == 'genuri':
            link = fetchData(base_url)
            regex = '''title="genuri"(.+?)</div'''
            regex_cats = '''href="(.+?)">(.+?)<'''
            if link:
                for cats in re.findall(regex, link, re.DOTALL | re.IGNORECASE):
                    match = re.findall(regex_cats, cats, re.DOTALL)
                    if len(match) >= 0:
                        for legatura, nume in sorted(match, key=self.getKey):
                            legatura = '%ssplitthishere0' % legatura
                            lists.append((nume, legatura, '', 'gen', info))
        return lists
コード例 #5
0
 def parse_menu(self, url, meniu, info={}):
     lists = []
     imagine = ''
     if meniu == 'recente' or meniu == 'cauta':
         if meniu == 'cauta':
             from resources.Core import Core
             Core().searchSites({'landsearch': self.__class__.__name__})
         else:
             link = fetchData(url, url)
             if not re.search(">Nothing Found", link):
                 regex_menu = '''<article.+?href="(.+?)".+?(?:src="(.+?)".+?"Title">(.+?)<.+?(?:"Info">(.+?)</.+?)?"description">(.+?))?</articl'''
                 if link:
                     match = re.findall(regex_menu, link,
                                        re.DOTALL | re.IGNORECASE)
                     for legatura, imagine, nume, detalii, descriere in match:
                         if not "&paged=" in legatura:
                             nume = htmlparser.HTMLParser().unescape(
                                 striphtml(nume).decode('utf-8')).encode(
                                     'utf-8')
                             descriere = " ".join(
                                 htmlparser.HTMLParser().unescape(
                                     striphtml(descriere).decode(
                                         'utf-8')).encode('utf-8').split())
                             info = {
                                 'Title': nume,
                                 'Plot': descriere,
                                 'Poster': imagine
                             }
                             lists.append((nume, legatura, imagine,
                                           'get_links', info))
                     match = re.compile('pagenavi',
                                        re.IGNORECASE).findall(link)
                     if len(match) > 0:
                         if '/page/' in url:
                             new = re.compile('/page/(\d+)').findall(url)
                             nexturl = re.sub(
                                 '/page/(\d+)',
                                 '/page/' + str(int(new[0]) + 1), url)
                         else:
                             if '/?s=' in url:
                                 nextpage = re.compile(
                                     '\?s=(.+?)$').findall(url)
                                 nexturl = '%s%s?s=%s' % (base_url, (
                                     'page/2/' if str(url).endswith('/')
                                     else '/page/2/'), nextpage[0])
                             else:
                                 nexturl = url + "/page/2"
                         lists.append(
                             ('Next', nexturl, self.nextimage, meniu, {}))
     elif meniu == 'get_links':
         import base64
         second = []
         link = fetchData(url)
         regex_lnk = '''(?:">(Episodul.+?)<.+?)?<iframe.+?src="((?:[htt]|[//]).+?)"'''
         regex_lnk2 = '''(?:">(Episodul.+?)<.+?)?atob\("(.+?)"'''
         regex_infos = '''kalin".+?<p>(.+?)</p'''
         regex_tag = '''category tag">(.+?)<'''
         match_lnk = re.findall(regex_lnk, link, re.IGNORECASE | re.DOTALL)
         match_lnk2 = re.findall(regex_lnk2, link,
                                 re.IGNORECASE | re.DOTALL)
         match_nfo = re.findall(regex_infos, link,
                                re.IGNORECASE | re.DOTALL)
         match_tag = re.findall(regex_tag, link, re.IGNORECASE | re.DOTALL)
         try:
             info = eval(str(info))
             info['Plot'] = (striphtml(match_nfo[0]).strip())
             info['Genre'] = ', '.join(match_tag)
         except:
             pass
         infos = eval(str(info))
         try:
             for nume2, coded in match_lnk2:
                 second.append((nume2, base64.b64decode(coded)))
             second = second + match_lnk
         except:
             second = match_lnk
         for nume, link1 in second:
             try:
                 host = link1.split('/')[2].replace('www.', '').capitalize()
                 try:
                     year = re.findall("\((\d+)\)", infos.get('Title'))
                     infos['Year'] = year[0]
                 except:
                     pass
                 try:
                     infos['TvShowTitle'] = re.sub(" (?:–|\().+?\)", "",
                                                   info.get('Title'))
                     try:
                         infos['Season'] = str(
                             re.findall("sezonul (\d+) ", info.get('Title'),
                                        re.IGNORECASE)[0])
                     except:
                         infos['Season'] = '01'
                     infos['Episode'] = str(
                         re.findall("episodul (\d+)$", nume,
                                    re.IGNORECASE)[0])
                     infos['Title'] = '%s S%sE%s' % (
                         infos['TvShowTitle'], infos['Season'].zfill(2),
                         infos['Episode'].zfill(2))
                     infos['Plot'] = infos['Title'] + ' ' + info['Plot']
                 except:
                     pass
                 if nume:
                     lists.append(('[COLOR lime]%s[/COLOR]' % nume, 'nimic',
                                   '', '', {}))
                 lists.append((host, link1, '', 'play', str(infos), url))
             except:
                 pass
     elif meniu == 'genuri':
         link = fetchData(url)
         regex_cats = '''categories-2"(.+?)</ul'''
         regex_cat = '''href="(.+?)">(.+?)<'''
         if link:
             for cat in re.findall(regex_cats, link,
                                   re.IGNORECASE | re.DOTALL):
                 match = re.findall(regex_cat, cat,
                                    re.IGNORECASE | re.DOTALL)
                 if len(match) >= 0:
                     for legatura, nume in sorted(match, key=self.getKey):
                         if not 'fa-home' in nume:
                             nume = clean_cat(
                                 htmlparser.HTMLParser().unescape(
                                     nume.decode('utf-8')).encode(
                                         'utf-8')).capitalize()
                             lists.append((nume, legatura.replace('"', ''),
                                           '', 'recente', info))
     return lists
コード例 #6
0
ファイル: fsonlineorg.py プロジェクト: drkman1/gruprepo
 def parse_menu(self, url, meniu, info={}):
     lists = []
     #log('link: ' + link)
     imagine = ''
     if meniu == 'recente' or meniu == 'cauta':
         if meniu == 'cauta':
             from resources.Core import Core
             Core().searchSites({'landsearch': self.__class__.__name__})
         else: 
             link = fetchData(url, base_url+ '/')
             regex = '''<div id="m.+?class="item".+?href="(.+?)".+?src="(.+?)".+?alt="(.+?)"(?:.+?"icon-star">(.+?)</span.+?"ttx">(.+?)<.+?"typepost">(.+?)<.+?"year">(.+?)<)?'''
             if link:
                 match = re.findall(regex, link, re.DOTALL)
                 for legatura, imagine, nume, rating, descriere, tip, an in match:
                     rating = striphtml(rating)
                     descriere = htmlparser.HTMLParser().unescape(descriere.decode('utf-8')).encode('utf-8').strip()
                     nume = htmlparser.HTMLParser().unescape(nume.decode('utf-8')).encode('utf-8').strip()
                     imagine = imagine.strip()
                     info = {'Title': nume,
                         'Plot': descriere,
                         'Rating': rating,
                         'Poster': imagine,
                         'Year': an}
                     numelista = '%s (%s)' % (nume, an) if an else nume
                     if tip == 'serial' or re.search('/seriale/', legatura): lists.append((numelista + ' - Serial', legatura, imagine, 'seriale', str(info)))
                     elif tip == 'film': lists.append((numelista,legatura,imagine,'get_links', str(info)))
                 match = re.compile('"paginador"', re.IGNORECASE).findall(link)
                 if len(match) > 0:
                     if '/page/' in url:
                         new = re.compile('/page/(\d+)').findall(url)
                         nexturl = re.sub('/page/(\d+)', '/page/' + str(int(new[0]) + 1), url)
                     else:
                         if '/?s=' in url:
                             nextpage = re.compile('\?s=(.+?)$').findall(url)
                             nexturl = '%s%s?s=%s' % (base_url, ('page/2/' if str(url).endswith('/') else '/page/2/'), nextpage[0])
                         else: nexturl = url + "/page/2"
                     lists.append(('Next', nexturl, self.nextimage, meniu, {}))
     elif meniu == 'get_links':
         from resources.lib import requests
         from resources.lib.requests.packages.urllib3.exceptions import InsecureRequestWarning
         requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
         s = requests.Session()
         #http://www.filmeserialeonline.org/wp-content/themes/grifus/includes/single/second.php
         second = "http://www.filmeserialeonline.org/wp-content/themes/grifus/loop/second.php"
         third = 'http://www.filmeserialeonline.org/wp-content/themes/grifus/includes/single/second.php'
         reg_id = '''id[\:\s]+(\d+)\}'''
         headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0', 'Referer': url}
         first = s.get(url, headers=headers)
         mid = re.findall(reg_id, first.content)[0].strip()
         dataid = {'id': mid}
         data1 = {'call': '03AHhf_52tCb5gUikGtjLeSMufA-2Hd3hcejVejJrPldhT-fjSepWRZdKTuQ0YjvPiph7-zcazBsIoVtGAwi_C3JsOFH74_TvXq2rRRQ4Aev59zTCFHFIAOOyxuOHRyIKIy4AZoxalLMegYUL5-J6LBvFZvFuTeKa6h3oNLISO4J0qw0fZSGrEhN02Hlbtnmdilj-nRUrMUCpPLWnZaV8eB8iatMaOg6FEqayxdJ1oF8AaFlOoVOnRrw_WWPu0cH97VkreacJNaQqh0qz-5yB1tbFD0GVOHLtU7Bd6DvUf_24hTxFsCszvjPD_hltYNxTrSOj49_lpTs279NghbyVvz-yVFfC-3mU-bQ'}
         s.post(second, data=data1, headers=headers)
         if re.search('/episodul/', url):
             g = s.post(second, data=dataid, headers=headers)
         else:  g = s.post(third, data=dataid, headers=headers)
         reg = '''<iframe(?:.+?)?src="(?:[\s+])?((?:[htt]|[//]).+?)"'''
         match_lnk = re.findall(reg, g.content, re.IGNORECASE | re.DOTALL)
         for host, link1 in get_links(match_lnk):
             if re.search('youtube.com', host, flags=re.IGNORECASE):
                 lists.append(('Trailer youtube',link1,'','play', info, url))
             else:
                 lists.append((host,link1,'','play', info, url))
     elif meniu == 'genuri':
         link = fetchData(base_url)
         regex_cats = '''"categorias">(.+?)</div'''
         regex_cat = '''href="(.+?)"(?:\s)?>(.+?)<.+?n>(.+?)<'''
         gen = re.findall(regex_cats, link, re.IGNORECASE | re.DOTALL)
         if url == 'seriale': match = re.findall(regex_cat, gen[1], re.DOTALL)
         else: match = re.findall(regex_cat, gen[0], re.DOTALL)
         for legatura, nume, cantitate in match:
             nume = clean_cat(htmlparser.HTMLParser().unescape(nume.decode('utf-8')).encode('utf-8')).capitalize()
             lists.append((nume,legatura,'','recente', info))
                     #for legatura, nume in sorted(match, key=self.getKey)
     elif meniu == 'seriale':
         link = fetchData(url)
         #log('link: ' + str(link))
         regex = '''(?:"se-q".+?title">(.*?)</span.+?)?"numerando">(.+?)<.+?class="episodiotitle.+?href="(.+?)"(?:[\s]+)?>(.+?)<.+?"date">(.+?)<'''
         match = re.findall(regex, link, re.DOTALL | re.IGNORECASE)
         info = eval(info)
         title = info.get('Title')
         #log(link)
         plot = info.get('Plot')
         for sezon, numerotare, link, nume, data in match:
             epis = numerotare.split('x')
             try:
                 infos = info
                 infos['Season'] = epis[0].strip()
                 infos['Episode'] = epis[1].strip()
                 infos['TVshowtitle'] = title
                 infos['Title'] = '%s S%02dE%02d' % (title, int(epis[0].strip()), int(epis[1].strip()))
                 infos['Plot'] = '%s S%02dE%02d - %s' % (title, int(epis[0].strip()), int(epis[1].strip()), plot)
             except: pass
             if sezon: lists.append(('[COLOR lime]%s[/COLOR]' % sezon,'nolink','','nimic', {}))
             lists.append((nume,link,'','get_links', str(info)))
     elif meniu == 'ani':
         link = fetchData(url)
         regex_cats = '''"filtro_y">.+?Anul(.+?)</div'''
         regex_cat = '''href="(.+?)"(?:\s)?>(.+?)<'''
         an = re.compile(regex_cats, re.DOTALL).findall(link)
         match = re.compile(regex_cat, re.DOTALL).findall(an[0])
         for legatura, nume in match:
             lists.append((nume,legatura,'','recente', info))
     return lists
           
コード例 #7
0
 def parse_menu(self, url, meniu, info={}):
     lists = []
     #log('link: ' + link)
     imagine = ''
     if meniu == 'recente' or meniu == 'cauta':
         if meniu == 'cauta':
             from resources.Core import Core
             Core().searchSites({'landsearch': self.__class__.__name__})
         else:
             link = fetchData(url)
             regex_menu = '''"thumb".+?href="(.+?)"\s+title="(.+?)".+?src="(.+?)"'''
             if link:
                 match = re.findall(regex_menu, link,
                                    re.DOTALL | re.IGNORECASE)
                 for legatura, nume, imagine in match:
                     nume = htmlparser.HTMLParser().unescape(
                         striphtml(nume).decode('utf-8')).encode('utf-8')
                     info = {'Title': nume, 'Plot': nume, 'Poster': imagine}
                     lists.append(
                         (nume, legatura, imagine, 'get_links', info))
                 match = re.compile('class=\'wp-pagenavi',
                                    re.IGNORECASE).findall(link)
                 if len(match) > 0:
                     if '/page/' in url:
                         new = re.compile('/page/(\d+)').findall(url)
                         nexturl = re.sub('/page/(\d+)',
                                          '/page/' + str(int(new[0]) + 1),
                                          url)
                     else:
                         if '/?s=' in url:
                             nextpage = re.compile('\?s=(.+?)$').findall(
                                 url)
                             nexturl = '%s%s?s=%s' % (base_url, (
                                 'page/2/' if str(url).endswith('/') else
                                 '/page/2/'), nextpage[0])
                         else:
                             nexturl = url + "/page/2"
                     lists.append(
                         ('Next', nexturl, self.nextimage, meniu, {}))
     elif meniu == 'get_links':
         news = []
         link = fetchData(url)
         regex2_lnk = '''"entry-embed".+?src="((?:[htt]|[//]).+?)"'''
         regex_infos = '''kalin".+?<p>(.+?)</p'''
         regex_tag = '''category tag">(.+?)<'''
         match2_lnk = re.findall(regex2_lnk, link,
                                 re.IGNORECASE | re.DOTALL)
         match_nfo = re.findall(regex_infos, link,
                                re.IGNORECASE | re.DOTALL)
         match_tag = re.findall(regex_tag, link, re.IGNORECASE | re.DOTALL)
         try:
             info = eval(str(info))
             info['Plot'] = (striphtml(match_nfo[0]).strip())
             info['Genre'] = ', '.join(match_tag)
         except:
             pass
         try:
             for new_link in match2_lnk:
                 regex_lnk = '''<iframe.+?src="((?:[htt]|[//]).+?)"'''
                 match_lnk = re.findall(regex_lnk, fetchData(new_link),
                                        re.IGNORECASE | re.DOTALL)[0]
                 news.append(match_lnk)
         except:
             pass
         for host, link1 in get_links(news):
             lists.append((host, link1, '', 'play', info, url))
     elif meniu == 'genuri':
         link = fetchData(url)
         regex_cats = '''"meniu_categorii"(.+?)</ul>'''
         regex_cat = '''href="(.+?)">(.+?)<'''
         if link:
             for cat in re.findall(regex_cats, link,
                                   re.IGNORECASE | re.DOTALL):
                 match = re.findall(regex_cat, cat,
                                    re.IGNORECASE | re.DOTALL)
                 if len(match) >= 0:
                     for legatura, nume in sorted(match, key=self.getKey):
                         nume = clean_cat(htmlparser.HTMLParser().unescape(
                             nume.decode('utf-8')).encode(
                                 'utf-8')).capitalize()
                         lists.append((nume, legatura.replace('"', ''), '',
                                       'recente', info))
     return lists
コード例 #8
0
ファイル: pefilme.py プロジェクト: drkman1/gruprepo
 def parse_menu(self, url, meniu, info={}):
     lists = []
     if meniu == 'recente' or meniu == 'cauta':
         if meniu == 'cauta':
             from resources.Core import Core
             Core().searchSites({'landsearch': self.__class__.__name__})
         else:
             link = fetchData(url, base_url + '/')
             regex_menu = '''post".+?href=['"](.+?)['"].+?imdb">(.+?)<.+?data-src=['"](.+?)['"].+?alt="(.+?)"'''
             if link:
                 match = re.compile(regex_menu, re.DOTALL).findall(link)
                 for legatura, imdb, imagine, nume in match:
                     nume = htmlparser.HTMLParser().unescape(
                         nume.decode('utf-8')).encode('utf-8')
                     info = {
                         'Title': nume,
                         'Plot': nume,
                         'Poster': imagine,
                         'Rating': imdb
                     }
                     if 'serial-tv' in link or 'miniserie-tv' in link or 'sezonul-' in link:
                         try:
                             if re.search('–|-|~', nume):
                                 all_name = re.split(r'–|-|:|~', nume, 1)
                                 title = all_name[0]
                                 title2 = all_name[1]
                             else:
                                 title2 = ''
                             title, year = xbmc.getCleanMovieTitle(title)
                             title2, year2 = xbmc.getCleanMovieTitle(title2)
                             title = title if title else title2
                             year = year if year else year2
                             info['Year'] = year
                             info['TVShowTitle'] = title
                         except:
                             pass
                     lists.append(
                         (nume, legatura, imagine, 'get_links', info))
                 match = re.compile('"current"',
                                    re.IGNORECASE).findall(link)
                 if len(match) > 0:
                     if '/page/' in url:
                         new = re.compile('/page/(\d+)').findall(url)
                         nexturl = re.sub('/page/(\d+)',
                                          '/page/' + str(int(new[0]) + 1),
                                          url)
                     else:
                         if '/?s=' in url:
                             nextpage = re.compile('\?s=(.+?)$').findall(
                                 url)
                             nexturl = '%s%s?s=%s' % (base_url, (
                                 'page/2/' if str(url).endswith('/') else
                                 '/page/2/'), nextpage[0])
                         else:
                             nexturl = url + "/page/2"
                     lists.append(
                         ('Next', nexturl, self.nextimage, meniu, {}))
     elif meniu == 'get_links':
         link = fetchData(url)
         cats = []
         links = []
         reg_info = '''"infos"(.+?)</div'''
         reg_cat = '''href.+?>(.+?)<'''
         reg_descriere = '''desfilm">(.+?)<(?:div|/div)'''
         reg_servers = '''class="tab"(.+?)</ul'''
         reg_each = '''href="(.+?)"(?:.+?)?>(.+?)</'''
         reg_frame = '''(?:.+?<strong>(.+?)</strong>.+?)?.+?<iframe(?:.+?)?src=['"]((?:[htt]|[//]).+?)['"]'''
         if link:
             import base64
             for coded in re.findall(reg_servers, link, re.DOTALL):
                 match = re.findall(reg_each, coded)
                 for legatura, nume in match:
                     try:
                         legatura = base64.b64decode(
                             re.findall('/\?(.+?)$', legatura)[0])
                         legatura = re.findall(reg_frame, legatura,
                                               re.IGNORECASE | re.DOTALL)
                         for nume, legatura in legatura:
                             links.append((nume, legatura))
                     except:
                         pass
             info = eval(str(info))
             try:
                 descriere = htmlparser.HTMLParser().unescape(
                     striphtml(
                         re.findall(reg_descriere, link,
                                    re.IGNORECASE | re.DOTALL)[0]).decode(
                                        'utf-8')).encode('utf-8').strip()
                 info['Plot'] = descriere
             except:
                 pass
             try:
                 for cat in re.findall(reg_info, link,
                                       re.IGNORECASE | re.DOTALL):
                     cats = re.findall(reg_cat, cat,
                                       re.IGNORECASE | re.DOTALL)
                 info['Genre'] = ', '.join(cats)
             except:
                 pass
             plot = info['Plot']
             for host, link1 in get_links(links):
                 infos = info
                 if infos.get('TVShowTitle'):
                     try:
                         szep = re.findall(
                             '(?:sezo[a-zA-Z\s]+(\d+).+?)?epi[a-zA-Z\s]+(\d+)',
                             host, re.IGNORECASE)
                         if szep:
                             if not szep[0][0] and szep[0][1]:
                                 infos['Season'] = '01'
                             else:
                                 infos['Season'] = str(szep[0][0])
                             infos['Episode'] = str(szep[0][1])
                             infos['Plot'] = '%s %s' % (host, plot)
                     except:
                         pass
                 lists.append((host, link1, '', 'play', str(infos), url))
     elif meniu == 'genuri':
         link = fetchData(url)
         regex_cats = '''="navbar2(.+?)</ul'''
         regex_cat = '''href=["'](.*?)['"].+?>(.+?)<'''
         if link:
             for cat in re.compile(regex_cats, re.IGNORECASE | re.MULTILINE
                                   | re.DOTALL).findall(link):
                 match = re.compile(regex_cat, re.DOTALL).findall(cat)
                 for legatura, nume in match:
                     if not re.search(
                             'porno|xxx', nume, flags=re.IGNORECASE):
                         lists.append((nume, legatura.replace('"', ''), '',
                                       'recente', info))
     elif meniu == 'tags':
         link = fetchData(url)
         regex_cats = '''etichete"(.+?)</ul'''
         regex_cat = '''href=["'](.*?)['"].+?ll>(.+?)</a> -(.+?)<'''
         if link:
             for cat in re.compile(regex_cats, re.IGNORECASE | re.MULTILINE
                                   | re.DOTALL).findall(link):
                 match = re.compile(regex_cat, re.DOTALL).findall(cat)
                 for legatura, nume, numar in match:
                     nume = nume + numar
                     lists.append(
                         (nume, legatura.replace('"',
                                                 ''), '', 'recente', info))
     return lists
コード例 #9
0
    def parse_menu(self, url, meniu, info={}):
        lists = []
        imagine = ''
        if meniu == 'recente' or meniu == 'cauta':
            if meniu == 'cauta':
                from resources.Core import Core
                Core().searchSites({'landsearch': self.__class__.__name__})
            else:
                if ('%s/episod' % base_url) in url:
                    link = fetchData(url)
                    regex_all = '''<td class="bb">.+?href=".+?>(.+?)<.+?href=".+?>(.+?)<.+?src="(.+?)".+?href="(.+?)".+?>(.+?)<.+?p>(.+?)</p.+?"dd"><center>(.+?)<.+?"ee"><center>(.+?)<.+?'''
                    match = re.findall(regex_all, link,
                                       re.IGNORECASE | re.DOTALL)
                    for serial, e_pisod, imagine, legatura, nume, descriere, add_data, traducator in match:
                        serial = htmlparser.HTMLParser().unescape(
                            striphtml(serial).decode('utf-8')).encode('utf-8')
                        e_pisod = htmlparser.HTMLParser().unescape(
                            striphtml(e_pisod).decode('utf-8')).encode('utf-8')
                        nume = htmlparser.HTMLParser().unescape(
                            striphtml(nume).decode('utf-8')).encode('utf-8')
                        descriere = htmlparser.HTMLParser().unescape(
                            striphtml(descriere).decode('utf-8')).encode(
                                'utf-8')
                        imagine = imagine.strip()
                        try:
                            imagine = re.findall('url=(.+?)$', imagine)[0]
                        except:
                            pass
                        pisod = re.findall('sezonul-(\d+)?.+?episodul-(\d+)?',
                                           e_pisod, re.IGNORECASE | re.DOTALL)
                        info = {
                            'Title':
                            '%s S%s-E%s %s' % (serial.strip(), pisod[0][0],
                                               pisod[0][1], nume.strip()),
                            'Poster':
                            imagine,
                            'Plot':
                            descriere.strip(),
                            'TVShowTitle':
                            serial.strip(),
                            'Season':
                            pisod[0][0],
                            'Episode':
                            pisod[0][1]
                        }
                        nume = '%s: %s : %s' % (serial.strip(), e_pisod,
                                                nume.strip())
                        lists.append(
                            (nume, legatura, imagine, 'get_links', info))
                else:
                    link = fetchData(url)
                    regex_all = '''<div id="mt-(.+?year">.+?</span>)(?:.+?<span class="calidad2">(.+?)</span>)?'''
                    regex_info = '''src="(.+?)".+?boxinfo.+?href="(.+?)".+?"tt">(.+?)</.+?"ttx">(.+?)</.+?"year">(.+?)<'''
                    regex_n_v = '''(?:IMDB|TMDb):(.+?)</s.+?type.+?>(.+?)<'''
                    if link:
                        for bloc, tip in re.findall(
                                regex_all, link,
                                re.IGNORECASE | re.MULTILINE | re.DOTALL):
                            match = re.findall(regex_info, bloc, re.DOTALL)
                            voturi = re.findall(regex_n_v, bloc,
                                                re.IGNORECASE | re.DOTALL)
                            if voturi:
                                rating = striphtml(
                                    voturi[0][0]).split("/")[0].strip()
                                votes = striphtml(
                                    voturi[0][0]).split("/")[1].strip()
                                post = voturi[0][1]
                            else:
                                rating = None
                                votes = None
                                post = ''
                            for imagine, legatura, nume, descriere, an in match:
                                imagine = imagine.strip()
                                try:
                                    imagine = re.findall(
                                        'url=(.+?)$', imagine)[0]
                                except:
                                    pass
                                nume = htmlparser.HTMLParser().unescape(
                                    striphtml(nume).decode('utf-8')).encode(
                                        'utf-8')
                                descriere = htmlparser.HTMLParser().unescape(
                                    striphtml(descriere).decode(
                                        'utf-8')).encode('utf-8')
                                info = {
                                    'Title': nume,
                                    'Poster': imagine,
                                    'Plot': descriere.strip(),
                                    'Year': an,
                                    'Rating': '%s' % rating,
                                    'Votes': '%s' % votes,
                                    'PlotOutline': '%s' % (descriere.strip())
                                }
                                nume = (nume + ' - ' + tip) if tip else nume
                                if 'eri' in tip or 'epis' in post or '/seriale/' in legatura:
                                    lists.append((nume, legatura, imagine,
                                                  'get_episoade', info))
                                else:
                                    lists.append((nume, legatura, imagine,
                                                  'get_links', info))
                match = re.search('"pagination"|"paginador"',
                                  link,
                                  flags=re.IGNORECASE)
                if match:
                    if '/page/' in url:
                        new = re.findall('/page/(\d+)', url)
                        nexturl = re.sub('/page/(\d+)',
                                         '/page/' + str(int(new[0]) + 1), url)
                    else:
                        if '/?s=' in url:
                            nextpage = re.findall('\?s=(.+?)$', url)
                            nexturl = '%s%s?s=%s' % (
                                base_url, ('page/2/' if str(url).endswith('/')
                                           else '/page/2/'), nextpage[0])
                        else:
                            nexturl = url + "/page/2"
                    lists.append(('Next', nexturl, self.nextimage, meniu, {}))
        elif meniu == 'get_episoade':
            link = fetchData(url)
            regex_all = '''numerando">(\d+ x \d+)<.+?href="(.+?)">(.+?)<.+?date">(.+?)<'''
            episod = re.compile(regex_all, re.IGNORECASE | re.MULTILINE
                                | re.DOTALL).findall(link)
            info = eval(str(info))
            title = info['Title']
            for numero, legatura, nume, data in episod:
                nume = htmlparser.HTMLParser().unescape(
                    nume.decode('utf-8')).encode('utf-8')
                ep_data = numero.split(' x ')
                info['TVShowTitle'] = title
                info['Title'] = '%s S%02dE%02d %s' % (
                    title.decode('utf-8').encode('utf-8'), int(
                        ep_data[0]), int(ep_data[1]), nume.strip())
                info['Season'] = ep_data[0]
                info['Episode'] = ep_data[1]
                lists.append(
                    (striphtml(str(numero) + ' ' + nume + ' - ' +
                               data).replace("\n",
                                             ""), legatura, '', 'get_links',
                     str(info)))
        elif meniu == 'get_links':
            link = fetchData(url)
            links = []
            regex_lnk = '''<(?:iframe.+?|script(?:\s)?)src=[\'"]((?:[htt]|[//]).+?(?!\.js))["\']'''
            regex_lnk2 = '''type=\'text/javascript\'>(?:\s)?str=['"](.+?)["']'''
            match_lnk2 = re.compile(regex_lnk2,
                                    re.IGNORECASE | re.DOTALL).findall(link)
            match_lnk = re.compile(regex_lnk,
                                   re.IGNORECASE | re.DOTALL).findall(link)
            for host, link1 in get_links(match_lnk):
                if not link1.endswith('.js'):
                    lists.append((host, link1, '', 'play', info, url))
            for coded in match_lnk2:
                try:
                    link_script = re.findall(regex_lnk,
                                             unquote(coded.replace('@', '%')),
                                             re.IGNORECASE | re.DOTALL)[0]
                    host = link_script.split('/')[2].replace('www.',
                                                             '').capitalize()
                    lists.append(
                        (host, link_script, '', 'play', str(info), url))
                except:
                    pass
        elif meniu == 'genuri':
            link = fetchData(base_url)
            regex_cats = '''"categorias">(.+?)</div'''
            regex_cat = '''href="(.+?)"(?:\s)?>(.+?)<.+?n>(.+?)<'''
            gen = re.findall(regex_cats, link, re.IGNORECASE | re.DOTALL)
            if url == 'seriale':
                match = re.findall(regex_cat, gen[1], re.DOTALL)
            else:
                match = re.findall(regex_cat, gen[0], re.DOTALL)
            for legatura, nume, cantitate in match:
                nume = clean_cat(htmlparser.HTMLParser().unescape(
                    nume.decode('utf-8')).encode('utf-8')).capitalize()
                lists.append((nume, legatura, '', 'recente', info))
        elif meniu == 'ani' or meniu == 'aniseriale':
            link = fetchData(url)
            regex_cats = '''"filtro_y">.+?anul(.+?)</div'''
            regex_cat = '''href="(.+?)"(?:\s)?>(.+?)<.+?n>(.+?)<'''
            an = re.compile(regex_cats, re.IGNORECASE | re.MULTILINE
                            | re.DOTALL).findall(link)
            if meniu == 'ani':
                match = re.compile(regex_cat, re.DOTALL).findall(an[0])
            else:
                match = re.compile(regex_cat, re.DOTALL).findall(an[1])
            for legatura, nume, cantitate in match:
                lists.append(('%s - %s' % (nume, cantitate), legatura, '',
                              'recente', info))
        elif meniu == 'calitate':
            link = fetchData(url)
            regex_cats = '''"filtro_y">.+?calita(.+?)</div'''
            regex_cat = '''href="(.+?)"(?:\s)?>(.+?)<.+?n>(.+?)<'''
            for cat in re.findall(regex_cats, link, re.IGNORECASE | re.DOTALL):
                match = re.findall(regex_cat, cat, re.DOTALL)
                for legatura, nume, cantitate in match:
                    nume = htmlparser.HTMLParser().unescape(
                        striphtml(nume).decode('utf-8')).encode('utf-8')
                    lists.append(('%s - %s' % (nume, cantitate), legatura, '',
                                  'recente', info))

        return lists
コード例 #10
0
 def parse_menu(self, url, meniu, info={}):
     lists = []
     if meniu == 'recente':
         link = fetchData(url)
         regex = '''<li>(?:<strong>)?<a href=['"](.+?)['"].+?>(.+?)</li'''
         match = re.findall(regex, link, re.IGNORECASE | re.DOTALL)
         if len(match) > 0:
             for legatura, nume in match:
                 nume = htmlparser.HTMLParser().unescape(striphtml(nume).decode('utf-8')).encode('utf-8')
                 info = {'Title': nume,'Plot': nume,'Poster': self.thumb}
                 lists.append((nume,legatura,'','get_links', info))
     elif meniu == 'get_links':
         link = fetchData(url)
         nume = ''
         regex_lnk = '''(?:((?:episodul|partea|sursa)[\s]\d+).+?)?<iframe.+?src=['"]((?:[htt]|[//]).+?)["']'''
         regex_seriale = '''(?:<h3>.+?strong>(.+?)<.+?href=['"](.+?)['"].+?)'''
         regex_infos = '''detay-a.+?description">(.+?)</div'''
         match_lnk = re.findall(regex_lnk, link, re.IGNORECASE | re.DOTALL)
         match_srl = re.compile(regex_seriale, re.IGNORECASE | re.DOTALL).findall(link)
         match_nfo = re.compile(regex_infos, re.IGNORECASE | re.DOTALL).findall(link)
         try:
             info = eval(str(info))
             info['Plot'] = (striphtml(match_nfo[0]).strip())
         except: pass
         for host, link1 in get_links(match_lnk):
             lists.append((host,link1,'','play', info, url))
     elif meniu == 'by_genre' or meniu == 'cauta':
         if meniu == 'cauta':
             from resources.Core import Core
             Core().searchSites({'landsearch': self.__class__.__name__})
         else: 
             link = fetchData(url)
             regex_all = '''id="post-(.+?)(?:</div>){2}'''
             r_link = '''href=['"](.+?)['"].+?title.+?categ'''
             r_name = '''title.+?per.+?>(.+?)<.+?categ'''
             r_genre = '''category tag">(.+?)<'''
             r_autor = '''author">(.+?)<'''
             r_image = '''author".+?src="(.+?)"'''
             if link:
                 match = re.findall(regex_all, link, re.IGNORECASE | re.DOTALL)
                 for movie in match:
                     legatura = re.findall(r_link, movie, re.IGNORECASE | re.DOTALL)
                     if legatura:
                         legatura = legatura[0]
                         nume = re.findall(r_name, movie, re.IGNORECASE | re.DOTALL)[0]
                         gen = [', '.join(re.findall(r_genre, movie, re.IGNORECASE | re.DOTALL))]
                         autor = re.findall(r_autor, movie, re.IGNORECASE | re.DOTALL)[0]
                         imagine = re.findall(r_image, movie, re.IGNORECASE | re.DOTALL)[0]
                         nume = htmlparser.HTMLParser().unescape(striphtml(nume).decode('utf-8')).encode('utf-8').strip()
                         info = {'Title': nume,'Plot': '%s \nTraducator: %s' % (nume, autor),'Poster': imagine, 'Genre': gen}
                         lists.append((nume, legatura, imagine, 'get_links', info))
                 match = re.compile('"post-nav', re.IGNORECASE).findall(link)
                 if len(match) > 0:
                     if '/page/' in url:
                         new = re.compile('/page/(\d+)').findall(url)
                         nexturl = re.sub('/page/(\d+)', '/page/' + str(int(new[0]) + 1), url)
                     else:
                         if '/?s=' in url:
                             nextpage = re.compile('\?s=(.+?)$').findall(url)
                             nexturl = '%s/page/2/?s=%s' % (base_url, nextpage[0])
                         else: 
                             nexturl = '%s%s' % (url, 'page/2/' if str(url).endswith('/') else '/page/2/')
                     lists.append(('Next', nexturl, self.nextimage, meniu, {}))
     elif meniu == 'genuri':
         link = fetchData(url)
         regex_cat = '''class="cat-item.+?href=['"](.+?)['"][\s]?>(.+?)<'''
         if link:
             match = re.findall(regex_cat, link, re.IGNORECASE | re.DOTALL)
             if len(match) > 0:
                 for legatura, nume in match:
                     nume = clean_cat(htmlparser.HTMLParser().unescape(nume.decode('utf-8')).encode('utf-8')).capitalize()
                     lists.append((nume,legatura.replace('"', ''),'','by_genre', info))
     return lists
           
コード例 #11
0
ファイル: filme3dnet.py プロジェクト: drkman1/gruprepo
 def parse_menu(self, url, meniu, info={}, keyw=None):
     lists = []
     #log('link: ' + link)
     imagine = ''
     if meniu == 'recente':
         link = fetchData(url)
         regex_menu = r'''movie_box"(.+?)postbottom"'''
         rurl = r'''href=['"](.+?)?['"]'''
         rimagine = r'''src=['"](.+?)?['"]'''
         rnume = r'''movie-desc.+?alt.+?/>(.+?)?<sp'''
         rcategorii = r'''Cat.+?>(.+?)?<sp'''
         rdescriere = r'''line;">(.+?)?</div'''
         rcalitate = r'''class=['"](.+?)['"]>\1<'''
         if link:
             for movie in re.findall(regex_menu, link, re.IGNORECASE | re.MULTILINE | re.DOTALL):
                 if 'news-id' in movie:
                     legatura = re.findall(rurl, movie, re.DOTALL)[0]
                     imagine = re.findall(rimagine, movie, re.DOTALL)[0]
                     nume = re.findall(rnume, movie, re.DOTALL)[0]
                     nume = htmlparser.HTMLParser().unescape(striphtml(nume).decode('utf-8')).encode('utf-8').strip()
                     categorii = striphtml(re.findall(rcategorii, movie, re.DOTALL)[0]).strip()
                     try: calitate = re.findall(rcalitate, movie, re.DOTALL)[0]
                     except: calitate = ''
                     try: descriere = re.findall(rdescriere, movie, re.DOTALL)[0]
                     except: descriere = nume
                     descriere = htmlparser.HTMLParser().unescape(striphtml(descriere).decode('utf-8')).encode('utf-8').strip()
                     descriere = "-".join(descriere.split("\n"))
                     nume = '%s [COLOR yellow]%s[/COLOR]' % (nume, calitate)
                     info = {'Title': nume,'Plot': descriere,'Poster': imagine, 'Genre': categorii}
                     lists.append((nume, legatura, imagine, 'get_links', info))
             match = re.compile('pagenavi"', re.IGNORECASE).findall(link)
             if len(match) > 0:
                 if '/page/' in url:
                     new = re.compile('/page/(\d+)').findall(url)
                     nexturl = re.sub('/page/(\d+)', '/page/' + str(int(new[0]) + 1), url)
                 else:
                     if '/?s=' in url:
                         nextpage = re.compile('\?s=(.+?)$').findall(url)
                         nexturl = '%s%s?s=%s' % (base_url, ('page/2/' if str(url).endswith('/') else '/page/2/'), nextpage[0])
                     else: nexturl = url + "/page/2"
                 lists.append(('Next', nexturl, self.nextimage, meniu, {}))
     elif meniu == 'by_genre' or meniu == 'cauta':
         if meniu == 'cauta':
             if url == 'post' and keyw:
                 data = {'do': 'search', 'subaction': 'search', 'story': keyw}
                 link = fetchData(base_url, base_url + '/?s=' + keyw, data)
             else:
                 link = None
                 from resources.Core import Core
                 Core().searchSites({'landsearch': self.__class__.__name__})
         else: link = fetchData(url)
         regex_menu = r'''short_post">(.+?)more"'''
         regex_movie = r'''href=['"](.+?)?['"\s*][\s]?>(.+?)<.+?src=['"](.+?)['"].+?calitate.+?>(.+?)<.+?cat.+?:(.+?)</div'''
         if link:
             for movie in re.findall(regex_menu, link, re.IGNORECASE | re.MULTILINE | re.DOTALL):
                 for legatura, nume, imagine, calitate, categorii in re.findall(regex_movie, movie, re.IGNORECASE | re.DOTALL):
                     nume = htmlparser.HTMLParser().unescape(striphtml(nume).decode('utf-8')).encode('utf-8').strip()
                     categorii = striphtml(categorii).strip()
                     calitate = calitate.rstrip()
                     nume = '%s [COLOR yellow]%s[/COLOR]' % (nume, calitate)
                     info = {'Title': nume,'Plot': nume + categorii,'Poster': imagine, 'Genre': categorii}
                     lists.append((nume, legatura, imagine, 'get_links', info))
             match = re.compile('pagenavi"', re.IGNORECASE).findall(link)
             if len(match) > 0:
                 if '/page/' in url:
                     new = re.compile('/page/(\d+)').findall(url)
                     nexturl = re.sub('/page/(\d+)', '/page/' + str(int(new[0]) + 1), url)
                 else:
                     if '/?s=' in url:
                         nextpage = re.compile('\?s=(.+?)$').findall(url)
                         nexturl = '%s%s?s=%s' % (base_url, ('page/2/' if str(url).endswith('/') else '/page/2/'), nextpage[0])
                     else: nexturl = url + "/page/2"
                 lists.append(('Next', nexturl, self.nextimage, meniu, {}))
         
     elif meniu == 'get_links':
         link = fetchData(url)
         regex_lnk = '''<iframe.+?src="((?:[htt]|[//]).+?)"'''
         regex_infos = '''"description">(.+?)</'''
         match_lnk = re.compile(regex_lnk, re.IGNORECASE | re.DOTALL).findall(link)
         match_nfo = re.compile(regex_infos, re.IGNORECASE | re.DOTALL).findall(link)
         try:
             info = eval(str(info))
             info['Plot'] = (striphtml(match_nfo[0]).strip())
         except: pass
         for host, link1 in get_links(match_lnk):
             lists.append((host,link1,'','play', info, url))
     elif meniu == 'genuri':
         link = fetchData(url)
         regex_cats = '''class="genres"(.+?)</ul'''
         regex_cat = '''href=['"](.+?)['"].+?">(.+?)<'''
         if link:
             for cat in re.findall(regex_cats, link, re.IGNORECASE | re.DOTALL):
                 match = re.findall(regex_cat, cat, re.DOTALL)
                 for legatura, nume in match:
                     if not nume == 'Diverse':
                      legatura = base_url + legatura
                      lists.append((nume, legatura, '', 'by_genre', info))
     return lists
           
コード例 #12
0
 def parse_menu(self, url, meniu, info={}):
     lists = []
     link = fetchData(url)
     if meniu == 'recente' or meniu == 'cauta':
         if meniu == 'cauta':
             from resources.Core import Core
             Core().searchSites({'landsearch': self.__class__.__name__})
         else:
             link = fetchData(url, base_url + '/')
             regex_menu = '''<article(.+?)</art'''
             regex_submenu = '''href=['"](.+?)['"].+?title=['"](.+?)['"].+?src=['"](.+?)['"]'''
             if link:
                 for movie in re.compile(
                         regex_menu, re.IGNORECASE | re.MULTILINE
                         | re.DOTALL).findall(link):
                     match = re.compile(regex_submenu,
                                        re.DOTALL).findall(movie)
                     for legatura, nume, imagine in match:
                         nume = htmlparser.HTMLParser().unescape(
                             nume.decode('utf-8')).encode('utf-8')
                         info = {
                             'Title': nume,
                             'Plot': nume,
                             'Poster': imagine
                         }
                         lists.append(
                             (nume, legatura, imagine, 'get_links', info))
                 match = re.compile('"pagination"',
                                    re.IGNORECASE).findall(link)
                 match2 = re.compile('nav-previous',
                                     re.IGNORECASE).findall(link)
                 if len(match) > 0 or len(match2) > 0:
                     if '/page/' in url:
                         new = re.compile('/page/(\d+)').findall(url)
                         nexturl = re.sub('/page/(\d+)',
                                          '/page/' + str(int(new[0]) + 1),
                                          url)
                     else:
                         if '/?s=' in url:
                             nextpage = re.compile('\?s=(.+?)$').findall(
                                 url)
                             nexturl = '%s%s?s=%s' % (base_url, (
                                 'page/2/' if str(url).endswith('/') else
                                 '/page/2/'), nextpage[0])
                         else:
                             nexturl = url + "/page/2"
                     lists.append(
                         ('Next', nexturl, self.nextimage, meniu, {}))
     elif meniu == 'get_links':
         link = fetchData(url)
         nume = ''
         regex_lnk = '''(?:type=\'text/javascript\'>(?:\s+)?str=['"](.+?)["']|(?:(S\d+E\d+).+?)?<iframe.+?src=['"]((?:[htt]|[//]).+?)["'])'''
         regex_seriale = '''(?:<h3>.+?strong>(.+?)<.+?href=['"](.+?)['"].+?)'''
         regex_infos = '''sinopsis(.+?)<div'''
         regex_content = '''<article(.+?)</articl'''
         match_content = re.findall(regex_content, link,
                                    re.IGNORECASE | re.DOTALL)
         if len(match_content) > 0:
             match_lnk = re.compile(regex_lnk,
                                    re.IGNORECASE | re.DOTALL).findall(link)
             match_nfo = re.compile(regex_infos,
                                    re.IGNORECASE | re.DOTALL).findall(link)
             match_srl = re.compile(regex_seriale,
                                    re.IGNORECASE | re.DOTALL).findall(link)
         else:
             match_lnk = []
             match_nfo = []
             match_srl = []
         infos = eval(str(info))
         try:
             if len(match_nfo) > 0:
                 infos['Plot'] = htmlparser.HTMLParser().unescape(
                     striphtml(match_nfo[0]).strip().decode(
                         'utf-8')).encode('utf-8')
         except:
             pass
         titleorig = infos['Title']
         for numerotare, linknumerotare, linknumerotareunu in match_lnk:
             if not numerotare:
                 szep = re.findall('S(\d+)E(\d+)', linknumerotare,
                                   re.IGNORECASE | re.DOTALL)
                 if szep:
                     episod = linknumerotare
                     linknumerotare = linknumerotareunu
                     try:
                         if re.search('–|-|~', titleorig):
                             all_name = re.split(r'–|-|:|~', titleorig, 1)
                             title = all_name[1]
                             title2 = all_name[0]
                         else:
                             title = titleorig
                             title2 = ''
                         title, year = xbmc.getCleanMovieTitle(title)
                         title2, year2 = xbmc.getCleanMovieTitle(title2)
                         title = title if title else title2
                         year = year if year else year2
                         if year: infos['Year'] = year
                         if szep[0][1] and not szep[0][0]:
                             infos['Season'] = '01'
                         else:
                             infos['Season'] = str(szep[0][0])
                         infos['Episode'] = str(szep[0][1])
                         infos['TvShowTitle'] = title
                     except:
                         pass
             else:
                 numerotare = re.findall(
                     '<(?:iframe|script).+?src=[\'"]((?:[htt]|[//]).+?)["\']',
                     unquote(numerotare.replace('@', '%')),
                     re.IGNORECASE | re.DOTALL)[0]
                 try:
                     if re.search('–|-|~', titleorig):
                         all_name = re.split(r'–|-|:|~', titleorig, 1)
                         title = all_name[1]
                         title2 = all_name[0]
                     else:
                         title = titleorig
                         title2 = ''
                     title, year = xbmc.getCleanMovieTitle(title)
                     title2, year2 = xbmc.getCleanMovieTitle(title2)
                     title = title if title else title2
                     year = year if year else year2
                     if year: infos['Year'] = year
                     infos['Title'] = title
                 except:
                     pass
                 linknumerotare = numerotare
             if 'goo.gl' in linknumerotare:
                 lists.append((('%s: Hqq.tv' %
                                numerotare) if numerotare else 'Hqq.tv',
                               linknumerotare, '', 'play', str(infos), url))
             else:
                 try:
                     host = linknumerotare.split('/')[2].replace(
                         'www.', '').capitalize()
                     if not numerotare: host = episod + ': ' + host
                     lists.append((host, linknumerotare, '', 'play',
                                   str(infos), url))
                 except:
                     host = linknumerotareunu.split('/')[2].replace(
                         'www.', '').capitalize()
                     lists.append((host, linknumerotareunu, '', 'play',
                                   str(infos), url))
         for n_serial, l_serial in match_srl:
             if not n_serial.isspace():
                 if not 'https://www.portalultautv.com/filme-erotice-online/' in n_serial:
                     lists.append(
                         (n_serial, l_serial, '', 'get_links', info))
     elif meniu == 'genuri':
         link = fetchData(url)
         regex_cats = '''"menu"(.+?)</div'''
         regex_cat = '''href=["'](.*?)['"\s]>(.+?)<'''
         if link:
             for cat in re.compile(regex_cats, re.IGNORECASE | re.MULTILINE
                                   | re.DOTALL).findall(link):
                 match = re.compile(regex_cat, re.DOTALL).findall(cat)
                 for legatura, nume in match:
                     lists.append(
                         (nume, legatura.replace('"',
                                                 ''), '', 'recente', info)
                     )  #addDir(nume, legatura.replace('"', ''), 6, movies_thumb, 'recente')
     return lists
コード例 #13
0
 def parse_menu(self, url, meniu, info={}):
     lists = []
     imagine = ''
     if meniu == 'recente' or meniu == 'cauta':
         if meniu == 'cauta':
             from resources.Core import Core
             Core().searchSites({'landsearch': self.__class__.__name__})
         else:
             link = fetchData(url)
             regex_menu = '''<article(.+?)</article'''
             regex_submenu = '''href="(.+?)".+?src="(.+?)".+?mark">(.+?)<.+?excerpt">(.+?)</div'''
             if link:
                 for movie in re.compile(regex_menu, re.IGNORECASE | re.MULTILINE | re.DOTALL).findall(link):
                     match = re.compile(regex_submenu, re.DOTALL).findall(movie)
                     for legatura, imagine, nume, descriere in match:
                         nume = (htmlparser.HTMLParser().unescape(striphtml(nume).decode('utf-8')).encode('utf-8')).strip()
                         descriere = (htmlparser.HTMLParser().unescape(striphtml(descriere).decode('utf-8')).encode('utf-8')).strip()
                         info = {'Title': nume,'Plot': descriere,'Poster': imagine}
                         szep = re.findall('(?:sezo[a-zA-Z\s]+(\d+).+?)?epi[a-zA-Z\s]+(\d+)', nume, re.IGNORECASE | re.DOTALL)
                         if szep:
                             try:
                                 if re.search('–|-|~', nume):
                                     all_name = re.split(r'–|-|:|~', nume,1)
                                     title = all_name[0]
                                     title2 = all_name[1]
                                 else: 
                                     title = nume
                                     title2 = ''
                                 title, year = xbmc.getCleanMovieTitle(title)
                                 title2, year2 = xbmc.getCleanMovieTitle(title2)
                                 title = title if title else title2
                                 year = year if year else year2
                                 if year: info['Year'] = year
                                 if szep[0][1] and not szep[0][0]: info['Season'] = '01'
                                 else: info['Season'] = str(szep[0][0])
                                 info['Episode'] = str(szep[0][1])
                                 info['TvShowTitle'] = (re.sub('(?:sezo[a-zA-Z\s]+\d+.+?)?epi[a-zA-Z\s]+\d+', '', title, flags=re.IGNORECASE | re.DOTALL)).strip()
                             except: pass
                         lists.append((nume, legatura, imagine, 'get_links', str(info)))
                 match = re.compile('"nav-links"', re.IGNORECASE).findall(link)
                 if len(match) > 0:
                     if '/page/' in url:
                         new = re.compile('/page/(\d+)').findall(url)
                         nexturl = re.sub('/page/(\d+)', '/page/' + str(int(new[0]) + 1), url)
                     else:
                         if '/?s=' in url:
                             nextpage = re.compile('\?s=(.+?)$').findall(url)
                             nexturl = '%s%s?s=%s' % (base_url, ('page/2/' if str(url).endswith('/') else '/page/2/'), nextpage[0])
                         else: nexturl = url + "/page/2"
                     lists.append(('Next', nexturl, self.nextimage, meniu, {}))
     elif meniu == 'get_links':
         link = fetchData(url)
         if re.search('content-protector-captcha', link):
             cpc = re.findall('content-protector-captcha.+?value="(.+?)"', link, re.DOTALL)
             cpt = re.findall('content-protector-token.+?value="(.+?)"', link, re.DOTALL)
             cpi = re.findall('content-protector-ident.+?value="(.+?)"', link, re.DOTALL)
             cpp = re.findall('content-protector-password.+?value="(.+?)"', link, re.DOTALL)
             cpsx = '348'
             cpsy = '220'
             data = {'content-protector-captcha': cpc[0],
                     'content-protector-token': cpt[0],
                     'content-protector-ident': cpi[0],
                     'content-protector-submit.x': cpsx,
                     'content-protector-submit.y': cpsy,
                     'content-protector-password': cpp[0]}
             link = fetchData(url, data=data)
         coded_lnk = '''type=[\'"].+?text/javascript[\'"]>(?:\s+)?str=['"](.+?)["']'''
         regex_lnk = '''<iframe.+?src="((?:[htt]|[//]).+?)"'''
         regex_infos = '''"description">(.+?)</'''
         match_coded = re.compile(coded_lnk, re.IGNORECASE | re.DOTALL).findall(link)
         match_lnk = re.compile(regex_lnk, re.IGNORECASE | re.DOTALL).findall(link)
         match_nfo = re.compile(regex_infos, re.IGNORECASE | re.DOTALL).findall(link)
         try:
             info = eval(str(info))
             info['Plot'] = (striphtml(match_nfo[0]).strip())
         except: pass
         regex_sub_oload = '''"captions" src="(.+?)"'''
         regex_sub_vidoza = '''tracks[:\s]+(.+?])'''
         for host, link1 in get_links(match_lnk):
             lists.append((host,link1,'','play', info, url))
         try:
             list_link = []
             for one_code in match_coded:
                 decoded = re.findall('<(?:iframe|script).+?src=[\'"]((?:[htt]|[//]).+?)["\']', unquote(one_code.replace('@','%')), re.IGNORECASE | re.DOTALL)[0]
                 list_link.append(decoded)
             for host, link1 in get_links(list_link):
                 lists.append((host,link1,'','play', info, url))
         except: pass
             
     elif meniu == 'categorii':
         cats = ['Seriale Indiene', 'Seriale Turcesti', 'Seriale Straine', 'Emisiuni TV', 'Seriale Romanesti']
         for cat in cats:
             lists.append((cat, base_url, self.thumb, 'titluri', {'categorie': cat}))
     elif meniu == 'titluri':
         info = eval(str(info))
         link = fetchData(url)
         regex_cats = '''%s</a>(.+?)</ul''' % info.get('categorie')
         regex_cat = '''href="(.+?)"(?:\s+)?>(.+?)<'''
         if link:
             for cat in re.findall(regex_cats, link, re.IGNORECASE | re.DOTALL):
                 match = re.findall(regex_cat, cat, re.IGNORECASE | re.DOTALL)
                 if len(match) >= 0:
                     for legatura, nume in sorted(match, key=self.getKey):
                         nume = clean_cat(htmlparser.HTMLParser().unescape(nume.decode('utf-8')).encode('utf-8')).capitalize()
                         lists.append((nume,legatura.replace('"', ''),'','recente', info))
     return lists
           
コード例 #14
0
ファイル: zfilmeonline.py プロジェクト: drkman1/gruprepo
 def parse_menu(self, url, meniu, info={}):
     lists = []
     link = fetchData(url)
     if meniu == 'recente' or meniu == 'cauta':
         if meniu == 'cauta':
             from resources.Core import Core
             Core().searchSites({'landsearch': self.__class__.__name__})
         else: 
             link = fetchData(url)
             regex_menu = '''class="item">(.+?(?:.+?calidad.+?</sp.+?|</div.+?)).+?</di'''
             regex_submenu = '''href=['"](.+?)['"].+?src=['"](.+?)['"](?:.+?star">(.+?)</sp.+?year">(.+?)<sp.+?"year">(\d+)<.+?| alt="(.+?)")'''
             if link:
                 for movie in re.compile(regex_menu, re.IGNORECASE | re.MULTILINE | re.DOTALL).findall(link):
                     match = re.compile(regex_submenu, re.DOTALL).findall(movie)
                     for legatura, imagine, rating, nume, an, numealt in match:
                         nume = htmlparser.HTMLParser().unescape(striphtml(nume).decode('utf-8')).encode('utf-8').strip()
                         if not numealt: info = {'Title': nume,'Plot': nume,'Poster': imagine,'Year':str(an), 'Rating': striphtml(rating).strip()}
                         else: 
                             nume = numealt
                             info = {'Title': nume,'Plot': nume,'Poster': imagine}
                         lists.append((nume, legatura, imagine, 'get_links', info))
                 match = re.compile('"paginado"', re.IGNORECASE).findall(link)
                 if len(match) > 0:
                     if '/page/' in url:
                         new = re.compile('/page/(\d+)').findall(url)
                         nexturl = re.sub('/page/(\d+)', '/page/' + str(int(new[0]) + 1), url)
                     else:
                         if '/?s=' in url:
                             nextpage = re.compile('\?s=(.+?)$').findall(url)
                             nexturl = '%s%s?s=%s' % (base_url, ('page/2/' if str(url).endswith('/') else '/page/2/'), nextpage[0])
                         else: nexturl = url + "/page/2"
                     lists.append(('Next', nexturl, self.nextimage, meniu, {}))
     elif meniu == 'get_links':
         link = fetchData(url)
         links = []
         nume = ''
         regex_base = '''var[\s*]s[\d\s=]+\'(.+?)\''''
         reg_coded = '''var s(?:\d+) = \'(.+?)\''''
         reg_server = '''<iframe.+?src=[\'"]((?:[htt]|[//]).+?)["\']'''
         regex_lnk = '''type=\'text/javascript\'> str=['"](.+?)["']'''
         regex_seriale = '''(?:<h3>.+?strong>(.+?)<.+?href=['"](.+?)['"].+?)'''
         regex_infos = '''<div itemprop="description".+?>(.+?)</div'''
         match_lnk = re.compile(regex_lnk, re.IGNORECASE | re.DOTALL).findall(link)
         match_nfo = re.findall(regex_infos, link, re.IGNORECASE | re.DOTALL)
         match2_lnk = re.compile(reg_server, re.IGNORECASE | re.DOTALL).findall(link)
         try:
             info = eval(str(info))
             info['Plot'] = (striphtml(match_nfo[0]).strip())
         except: pass
         for coded in match_lnk:
             try:
                 link_script = re.findall(reg_server, unquote(coded.replace('@','%')), re.IGNORECASE | re.DOTALL)[0]
                 host = link_script.split('/')[2].replace('www.', '').capitalize()
                 lists.append((host,link_script,'','play', str(info), url))
             except: pass
         for host, link1 in get_links(match2_lnk):
             lists.append((host,link1,'','play', str(info), url))
         
     elif meniu == 'genuri':
         link = fetchData(url)
         regex_cat = '''class="cat-item.+?href=['"](.+?)['"\s]>(.+?)<'''
         if link:
             match = re.findall(regex_cat, link, re.IGNORECASE | re.DOTALL)
             if len(match) > 0:
                 for legatura, nume in match:
                     lists.append((nume,legatura.replace('"', ''),'','recente', info))
     return lists
           
コード例 #15
0
ファイル: flixanity.py プロジェクト: drkman1/gruprepo
    def parse_menu(self, url, meniu, info={}):
        lists = []
        #log('link: ' + link)
        imagine = ''
        if meniu == 'recente' or meniu == 'cauta':
            if meniu == 'cauta':
                from resources.Core import Core
                Core().searchSites({'landsearch': self.__class__.__name__})
            else:
                link = fetchData(url)
                #log(link)
                if '/new-episodes' in url:
                    regex_menu = '''cardbox.+?src=['"](.+?)['"].+?(?:.+?"info">(.+?)</div)?.+?data-type=['"](.+?)['"].+?href=['"](.+?)['"](?:>(.+?)<)?'''
                else:
                    regex_menu = '''cardbox.+?src=['"](.+?)['"].+?data-type=['"](.+?)['"].+?href=['"](.+?)['"](?:>(.+?)<)?'''
                regex_episod = '''href.+?>(.+?)<.+?season(.+?)episode(.+?)<.+?<p>(.+?)<'''
                if link:
                    match = re.compile(regex_menu,
                                       re.DOTALL | re.IGNORECASE).findall(link)
                    if '/new-episodes' in url:
                        if len(match) > 0:
                            for imagine, detalii_serial, tip, legatura, nume in match:
                                nume = htmlparser.HTMLParser().unescape(
                                    striphtml(nume).decode('utf-8')).encode(
                                        'utf-8')
                                info = {
                                    'Title': nume,
                                    'Plot': nume,
                                    'Poster': imagine
                                }
                                if tip.strip() == 'show':
                                    if detalii_serial:
                                        serial = re.findall(
                                            regex_episod, detalii_serial,
                                            re.DOTALL | re.IGNORECASE)
                                        if len(serial) > 0:
                                            for tvshowtitle, sezon, episod, data in serial:
                                                #log(episod)
                                                info[
                                                    'TVShowTitle'] = tvshowtitle.strip(
                                                    )
                                                info[
                                                    'Title'] = tvshowtitle.strip(
                                                    )
                                                info['Season'] = str(
                                                    '%02d' %
                                                    int(str(sezon.strip()))
                                                ) if sezon else ''
                                                info['Episode'] = str(
                                                    '%02d' %
                                                    int(str(episod.strip()))
                                                ) if episod else ''
                                                info[
                                                    'Plot'] = '%s - Sezon %s episod %s apărut in %s' % (
                                                        tvshowtitle.strip(),
                                                        sezon.strip(),
                                                        episod.strip(),
                                                        data.strip())
                                            lists.append((
                                                '%s - Sezon %s episod %s: %s' %
                                                (tvshowtitle.strip(),
                                                 sezon.strip(), episod.strip(),
                                                 data.strip()), legatura,
                                                imagine, 'get_links', info))
                    else:
                        for imagine, tip, legatura, nume in match:
                            nume = htmlparser.HTMLParser().unescape(
                                striphtml(nume).decode('utf-8')).encode(
                                    'utf-8')
                            info = {
                                'Title': nume,
                                'Plot': nume,
                                'Poster': imagine
                            }
                            if tip.strip() == 'show':
                                lists.append(('%s - Serial' % nume, legatura,
                                              imagine, 'get_seasons', info))
                            else:
                                lists.append((nume, legatura, imagine,
                                              'get_links', info))
                    match = re.compile('class="next-page',
                                       re.IGNORECASE).findall(link)
                    if len(match) > 0:
                        page = re.findall('/(\d+)(?:/)?$', url)
                        if len(page) > 0:
                            nexturl = re.sub('/(\d+)(?:/)?$',
                                             '/%s' % (int(page[0]) + 1), url)
                        else:
                            nexturl = '%s/2' % url
                        lists.append(
                            ('Next', nexturl, self.nextimage, meniu, {}))
        elif meniu == 'get_links':
            import base64
            import time
            from resources.lib import requests
            from resources.lib.requests.packages.urllib3.exceptions import InsecureRequestWarning
            requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
            s = requests.Session()
            r = s.get(url)
            c = r.cookies
            i = c.items()
            #log(r.content)
            cooked = ''
            for name, value in i:
                if name == '__cfduid':
                    cooked = '%s=%s' % (name, value)
                else:
                    cooked = ''
            s.headers.update({
                'User-Agent': randomagent(),
                'Cookie': cooked,
                'Content-Type':
                'application/x-www-form-urlencoded; charset=UTF-8',
                'Accept': 'application/json, text/javascript, */*; q=0.01',
                'Referer': url,
                'Host': 'flixanity.io',
                'X-Requested-With': 'XMLHttpRequest'
            })
            u = '%s%s' % (base_url, '/ajax/gonlflhyad.php')
            try:
                v = re.findall('(https:.*?redirector.*?)[\'\"]', r.content)
                for j in v:
                    lists.append(('Gvideo', j, '', 'play', info, url))
            except:
                pass
            action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb'
            elid = quote(base64.encodestring(str(int(time.time()))).strip())
            #log(r.content)
            token = re.findall("var\s+tok\s*=\s*'([^']+)", r.content)
            if token:
                host1 = ''
                token = token[0]
                idEl = re.findall('elid\s*=\s*"([^"]+)', r.content)[0]
                post = {
                    'action': action,
                    'idEl': idEl,
                    'token': token,
                    'elid': elid,
                    'nopop': ''
                }
                r = s.post(u, data=post)
                r = str(r.json())
                regex_lnk = '''type':[\su]+'(.+?)'.+?<iframe.+?src="((?:[htt]|[//]).+?)"'''
                match_lnk = re.findall(regex_lnk, r, re.DOTALL | re.IGNORECASE)
                #log(match_lnk)
                for host1, link2 in match_lnk:
                    if 'blogspot' in host1 or 'googleusercontent.com' in host1 or 'fbcdn.net' in link2:
                        lists.append((host1, link2, '', 'play', info, url))
                    elif 'llnwi.net' in link2:
                        headers = {'User-Agent': randomagent()}
                        result = requests.head(link2,
                                               headers=headers,
                                               allow_redirects=False,
                                               timeout=10)
                        link2 = result.headers['Location']
                        lists.append((host1, link2, '', 'play', info, url))
                    elif 'vidcdn.pro' in link2:
                        try:
                            from urlparse import urlparse
                            parsed = urlparse(link2)
                            headers = {'User-Agent': randomagent()}
                            result = requests.head(link2,
                                                   headers=headers,
                                                   allow_redirects=False,
                                                   timeout=10,
                                                   verify=False)
                            link2 = result.headers['Location']
                            domain = '{uri.scheme}://{uri.netloc}'.format(
                                uri=parsed)
                            link2 = domain + link2
                        except:
                            pass
                        lists.append((host1, link2, '', 'play', info, url))
                if not 'googleusercontent.com' in host1:
                    for host, link1 in get_links(match_lnk):
                        lists.append((host, link1, '', 'play', info, url))
        elif meniu == 'genuri_seriale' or meniu == 'genuri':
            link = fetchData(url)
            regex = '''categories"(.+?)</select'''
            regex_link = '''value="(.+?)">(.+?)<'''
            if link:
                for content in (re.findall(regex, link,
                                           re.IGNORECASE | re.DOTALL)):
                    result = re.findall(regex_link, content,
                                        re.IGNORECASE | re.DOTALL)
                    if len(result) > 0:
                        for legatura, nume in result:
                            if meniu == 'genuri_seriale':
                                if not re.search('all tv shows',
                                                 nume,
                                                 flags=re.IGNORECASE):
                                    lists.append(
                                        (nume, legatura, '', 'recente', info))
                            else:
                                if not re.search('all movies',
                                                 nume,
                                                 flags=re.IGNORECASE):
                                    lists.append(
                                        (nume, '%s/favorites' % legatura, '',
                                         'recente', info))
        elif meniu == 'get_seasons':
            link = fetchData(url)
            regex = '''seasons(.+?)</div'''
            sub_regex = '''href="(.+?)".+?>(.+?)<'''
            if link:
                for content in (re.findall(regex, link,
                                           re.IGNORECASE | re.DOTALL)):
                    result = re.findall(sub_regex, content,
                                        re.IGNORECASE | re.DOTALL)
                    if len(result) > 0:
                        for legatura, nume in result:
                            lists.append(('Sezon %s' % nume, legatura, '',
                                          'get_episodes', info))
        elif meniu == 'get_episodes':
            link = fetchData(url, rtype='1')
            #log(link)
            regex = '''id="episodes(.+?)</article>'''
            sub_regex = '''episode.+?href="(.+?)".+?>(.+?)<.+?(?:.+?data-e="(.+?)")?.+?(?:data-s="(.+?)")?'''
            if link:
                for content in (re.findall(regex, link,
                                           re.IGNORECASE | re.DOTALL)):
                    result = re.findall(sub_regex, content,
                                        re.IGNORECASE | re.DOTALL)
                    if len(result) > 0:
                        for legatura, nume, episod, sezon in result:
                            infos = eval(str(info))
                            infos['TVShowTitle'] = re.sub(
                                '\((.+?)\)', '', infos['Title']).strip()
                            infos['Title'] = re.sub('(s.*e.*: )',
                                                    '',
                                                    nume,
                                                    flags=re.IGNORECASE)
                            infos['Season'] = str('%02d' %
                                                  int(sezon)) if sezon else ''
                            infos['Episode'] = str(
                                '%02d' % int(episod)) if episod else ''
                            infos['Plot'] = infos['TVShowTitle'] + ' ' + nume
                            lists.append(
                                (nume, legatura, '', 'get_links', infos))
        elif meniu == 'cautare':
            import time
            import random
            import math
            import base64
            from resources.lib import requests
            from resources.lib.requests.packages.urllib3.exceptions import InsecureRequestWarning
            requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
            e = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
            r = 0
            t = ''
            s = requests.Session()
            r = s.get(base_url)
            while 25 > r:
                t += e[math.floor(random.random() * len(e))]
                r += 1
            s_url = 'https://api.flixanity.io/api/v1/0A6ru35yevokjaqbb3'
            q = quote(url)
            limit = '100'
            timestamp = str(int(time.time()))
            verifiedCheck = re.findall("var\s+tok\s*=\s*'([^']+)",
                                       r.content)[0]
            s_set = t
            rt = 'rPAOhkSTcEzSyJwHWwzwthPWVieITfDEKnhEVyVSOOIvHcaiVE'
            sl = '9fc895fbb0b23f1c0fb8e5a5fe02f7b5'
            data = {
                'q': q,
                'limit': limit,
                'timestamp': timestamp,
                'verifiedCheck': verifiedCheck,
                'set': s_set,
                'rt': rt,
                'sl': sl
            }
            s.headers.update = ({
                'Host': 'api.flixanity.io',
                'User-Agent': randomagent(),
                'Accept': 'application/json, text/javascript, */*; q=0.01',
                'Accept-Language': 'en,en-US;q=0.7,en;q=0.3',
                'Accept-Encoding': 'gzip, deflate, br',
                'Content-Type': 'application/x-www-form-urlencoded',
                'Referer': '%s/' % base_url,
                'Origin': base_url
            })
            try:
                x = s.post(s_url, data=data)
                result = x.json()
            except:
                result = None
            if result:
                for det in result:
                    legatura = '%s%s' % (base_url, det.get('permalink'))
                    imagine = '%s%s' % (base_url, det.get('image'))
                    nume = '%s (%s)' % (det.get('title'), str(det.get('year')))
                    info = {
                        'Title': nume,
                        'Plot': nume,
                        'Poster': imagine,
                        'Year': det.get('year')
                    }
                    tip = det.get('type')
                    if not tip == 'actor':
                        if tip == 'movie':
                            lists.append(
                                (nume, legatura, imagine, 'get_links', info))
                        elif tip == 'show':
                            lists.append(
                                (nume, legatura, imagine, 'get_seasons', info))

        return lists
コード例 #16
0
ファイル: filmebunenet.py プロジェクト: drkman1/gruprepo
 def parse_menu(self, url, meniu, info={}):
     lists = []
     #log('link: ' + link)
     imagine = ''
     if meniu == 'recente' or meniu == 'cauta':
         if meniu == 'cauta':
             from resources.Core import Core
             Core().searchSites({'landsearch': self.__class__.__name__})
         else: 
             link = fetchData(url)
             regex_menu = '''class="titlu">(.+?)<div class="clear"></div>'''
             regex_submenu = '''href=['"](.+?)['"].+?title="(.+?)".+?src="(.+?)".+?"descriere">(.+?)</div'''
             if link:
                 for movie in re.compile(regex_menu, re.IGNORECASE | re.MULTILINE | re.DOTALL).findall(link):
                     match = re.compile(regex_submenu, re.DOTALL).findall(movie)
                     for legatura, nume, imagine, descriere in match:
                         nume = htmlparser.HTMLParser().unescape(nume.decode('utf-8')).encode('utf-8')
                         descriere = htmlparser.HTMLParser().unescape(striphtml(descriere).decode('utf-8')).encode('utf-8').strip()
                         descriere = "-".join(descriere.split("\n"))
                         info = {'Title': nume,'Plot': descriere,'Poster': imagine}
                         lists.append((nume, legatura, imagine, 'get_links', info))
                 match = re.compile('"paginare"', re.IGNORECASE).findall(link)
                 if len(match) > 0:
                     if '/page/' in url:
                         new = re.compile('/page/(\d+)').findall(url)
                         nexturl = re.sub('/page/(\d+)', '/page/' + str(int(new[0]) + 1), url)
                     else:
                         if '/?s=' in url:
                             nextpage = re.compile('\?s=(.+?)$').findall(url)
                             nexturl = '%s%s?s=%s' % (base_url, ('page/2/' if str(url).endswith('/') else '/page/2/'), nextpage[0])
                         else: nexturl = url + "/page/2"
                     lists.append(('Next', nexturl, self.nextimage, meniu, {}))
     elif meniu == 'get_links':
         link = fetchData(url)
         links = []
         regex_lnk = '''<iframe.+?src="((?:[htt]|[//]).+?)"'''
         regex_infos = '''"description">(.+?)</'''
         reg_server = '''data-src="(.+?)"'''
         match_lnk = re.compile(regex_lnk, re.IGNORECASE | re.DOTALL).findall(link)
         match_nfo = re.compile(regex_infos, re.IGNORECASE | re.DOTALL).findall(link)
         match_server = re.findall(reg_server, link, re.IGNORECASE | re.DOTALL)
         try:
             mserver = list(set(match_server))
             for code in mserver:
                 try:
                     get_stupid_links = fetchData(code)
                     match_lnk = re.findall(regex_lnk, get_stupid_links, re.IGNORECASE | re.DOTALL)
                     links.append(match_lnk[0])
                 except: pass
         except: pass
         try:
             info = eval(str(info))
             info['Plot'] = (striphtml(match_nfo[0]).strip())
         except: pass
         for host, link1 in get_links(links):
             lists.append((host,link1,'','play', info, url))#addLink(host, link1, thumb, name, 10, striphtml(match_nfo[0]))
     elif meniu == 'genuri':
         link = fetchData(url)
         regex_cats = '''"cat-item.+?href=['"](.+?)['"][\s*]>(.+?)<'''
         if link:
             match = re.compile(regex_cats, re.IGNORECASE | re.MULTILINE | re.DOTALL).findall(link)
             if len(match) >= 0:
                 for legatura, nume in sorted(match, key=self.getKey):
                     lists.append((nume,legatura.replace('"', ''),'','recente', info))#addDir(nume, legatura.replace('"', ''), 6, movies_thumb, 'recente')
     return lists
           
コード例 #17
0
 def parse_menu(self, url, meniu, info={}):
     lists = []
     imagine = ''
     if meniu == 'recente' or meniu == 'cauta':
         if meniu == 'cauta':
             from resources.Core import Core
             Core().searchSites({'landsearch': self.__class__.__name__})
         else:
             if url == base_url: url = '%s/yes.html' % base_url
             link = fetchData(url)
             regex_submenu = '''"ml-item".+?href="(.+?)".+?data-url="(.+?)".+?(?:eps">(.+?)</span.+?)?(?:quality"(?:[a-zA-Z\n\s#=":]+)?>(.+?)<.+?)?data-original="(.+?)".+?info">(.+?)</span'''
             if link:
                 match = re.compile(regex_submenu, re.DOTALL).findall(link)
                 for legatura, infolink, season, calitate, imagine, nume in match:
                     nume = (htmlparser.HTMLParser().unescape(
                         striphtml(nume).decode('utf-8')).encode('utf-8')
                             ).strip()
                     info = {'Title': nume, 'Plot': nume, 'Poster': imagine}
                     sezon = re.search('season\s+(\d+)',
                                       nume,
                                       flags=re.IGNORECASE)
                     if sezon or season:
                         try:
                             numea = re.sub(
                                 '\s+-\s+season\s+\d+',
                                 '',
                                 nume,
                                 flags=re.IGNORECASE) if sezon else nume
                         except:
                             numea = re.sub('(?i)\s+-\s+season\s+\d+', '',
                                            nume) if sezon else nume
                         info['Title'] = numea
                         info['TVShowTitle'] = numea
                         info['Season'] = str(
                             sezon.group(1)) if sezon else '1'
                     lists.append(('%s - %s' %
                                   (nume, calitate if calitate else '%s' %
                                    ('Serial' if season else '')), legatura,
                                   imagine, 'get_all', info))
                 match = re.compile('"pagination',
                                    re.IGNORECASE).findall(link)
                 if len(match) > 0:
                     if '/page-' in url:
                         new = re.compile('/page-(\d+)').findall(url)
                         nexturl = re.sub('/page-(\d+)',
                                          '/page-' + str(int(new[0]) + 1),
                                          url)
                     else:
                         if '/genre/' in url or '/country/' in url or '/search/' in url:
                             nexturl = re.sub('.html', '/page-2.html', url)
                         else:
                             nexturl = re.sub(
                                 '.html',
                                 '/latest/all/all/all/all/all/page-2.html',
                                 url)
                     lists.append(
                         ('Next', nexturl, self.nextimage, meniu, {}))
     elif meniu == 'get_all':
         lid = re.search('-(\d+)\.', url).group(1)
         oldurl = url
         url = re.sub('.html', '/watching.html', url)
         new_url = '%s/ajax/v4_movie_episodes/%s' % (base_url, lid)
         link = fetchData(new_url, rtype='json')
         #log(link)
         regex_embed = '''li id="sv.+?data-id="(.+?)".+?server-item(.+?)"'''
         regex = '''ep-item.+?data-index="(.+?)".+?data-server="(.+?)".+?data-id="(.+?)".+?id="(.+?)".+?title="(.+?)"'''
         if link:
             match = re.findall(regex, link.get('html'), re.DOTALL)
             check_embed = re.findall(regex_embed, link.get('html'),
                                      re.DOTALL)
             emb_server = ''
             for serv, typ in check_embed:
                 if typ.strip() == 'embed':
                     emb_server = serv
                     break
             #log(link.get('html'))
             for dataindex, dataserver, dataid, episodeid, nume in match:
                 embed = 'no'
                 if dataserver == emb_server: embed = 'yes'
                 infos = json.loads(info)
                 try:
                     nume = (htmlparser.HTMLParser().unescape(
                         striphtml(nume).decode('utf-8')).encode('utf-8')
                             ).strip()
                 except:
                     nume = nume.strip()
                 episod = re.search('episode\s+(\d+)',
                                    nume,
                                    flags=re.IGNORECASE)
                 if episod:
                     infos['Episode'] = str(episod.group(1))
                     infos['Title'] = '%s S%s E%s' % (
                         infos['Title'], infos['Season'], infos['Episode'])
                     infos['Plot'] = '%s Episode %s' % (infos['Plot'],
                                                        infos['Episode'])
                 else:
                     infos['Episode'] = ''
                 lists.append((
                     'Server %s - %s' % (dataserver, nume),
                     '%ssplitthishere%ssplitthishere%ssplitthishere%ssplitthishere%ssplitthishere%s'
                     % (dataindex, dataserver, dataid, lid, url, embed), '',
                     'get_links', str(infos), '1'))
             #log(lists)
     elif meniu == 'get_links':
         link_parts = url.split('splitthishere')
         if len(link_parts) > 5: embed = link_parts[5]
         else: embed = 'no'
         if embed == 'no':
             url_tokens = '%s/ajax/movie_token?eid=%s&mid=%s' % (
                 base_url, link_parts[2], link_parts[3])
             headers = {
                 'Host':
                 'yesmovies.to',
                 'Accept':
                 'text/javascript, application/javascript, application/ecmascript, application/x-ecmascript, */*; q=0.01'
             }
             tokens = fetchData(url_tokens, headers=headers)
             #log('tokens: ' + tokens)
             x = re.search('''_x=['"]([^"']+)''', tokens)
             if x: x = x.group(1)
             y = re.search('''_y=['"]([^"']+)''', tokens)
             if y: y = y.group(1)
             if not x or not y:
                 try:
                     script = '(' + tokens.split("(_$$)) ('_');")[0].split(
                         "/* `$$` */")[-1].strip()
                     script = script.replace('(__$)[$$$]', '\'"\'')
                     script = script.replace('(__$)[_$]', '"\\\\"')
                     script = script.replace('(o^_^o)', '3')
                     script = script.replace('(c^_^o)', '0')
                     script = script.replace('(_$$)', '1')
                     script = script.replace('($$_)', '4')
                     script = script.replace('+', '|x|')
                     vGlobals = {
                         "__builtins__": None,
                         '__name__': __name__,
                         'str': str,
                         'Exception': Exception
                     }
                     vLocals = {'param': None}
                     exec(CODE % script) in vGlobals, vLocals
                     data = vLocals['param'].decode('string_escape')
                     x = re.search('''_x=['"]([^"']+)''', data).group(1)
                     y = re.search('''_y=['"]([^"']+)''', data).group(1)
                 except:
                     x = ''
                     y = ''
             ip = re.search('''_ip(?:[\s+])?=(?:[\s+])?['"]([^"']+)''',
                            tokens)
             ip = ip.group(1) if ip else ''
             z = re.search('''_z(?:[\s+])?=(?:[\s+])?['"]([^"']+)''',
                           tokens)
             z = z.group(1) if z else ''
             url_source = '%s/ajax/movie_sources/%s?x=%s&y=%s' % (
                 base_url, link_parts[2], x, y)
         elif embed == 'yes':
             url_source = '%s/ajax/movie_embed/%s' % (base_url,
                                                      link_parts[2])
         #log('url_source; ' + url_source)
         one_urls = fetchData(url_source,
                              link_parts[4],
                              rtype='json',
                              headers={'Host': 'yesmovies.to'})
         selm = -1
         if one_urls:
             try:
                 embed = 'yes' if one_urls.get('embed') else embed
             except:
                 pass
             if embed == 'yes':
                 try:
                     playlink = one_urls.get('src')
                     sublink = None
                     selm = 0
                 except:
                     pass
             else:
                 try:
                     dialogb = xbmcgui.Dialog()
                     tracks = one_urls.get('playlist')[0].get('tracks')
                     if len(tracks) > 1:
                         sel = dialogb.select(
                             "Alege subtitrarea",
                             [sel_s.get('label') for sel_s in tracks])
                     else:
                         sel = 0
                     sublink = tracks[sel].get('file')
                     sublink = '%s%s' % (base_url,
                                         sublink) if sublink.startswith(
                                             '/') else sublink
                 except:
                     sublink = None
                 #try:
                 dialogb = xbmcgui.Dialog()
                 msources = one_urls.get('playlist')[0].get('sources')
                 if msources:
                     if isinstance(msources, list):
                         if len(msources) > 1:
                             selm = dialogb.select(
                                 "Alege varianta",
                                 [sel_m.get('label') for sel_m in msources])
                         else:
                             selm = 0
                         playlink = msources[selm].get('file')
                     else:
                         playlink = msources.get('file')
                         selm = 0
                     if playlink and not 'googleusercontent.com' in playlink:
                         playlink = playlink + '|User-Agent=%s&Referer=%s&Origin=%s' % (
                             quote(randomagent()), quote(
                                 link_parts[4]), quote(base_url))
                 else:
                     playlink = None
             data = json.loads(info)
             #log('episode: ' + data.get('Episode'))
             if data.get('TVShowTitle'):
                 viewmark = url
                 playname = '%s %s' % (data.get('TVShowTitle'),
                                       data.get('Title'))
             else:
                 viewmark = url
                 playname = data.get('Title')
             if not sublink:
                 playname = playname + ' Fara subtitrare pe site'
             #log('playlink: ' + str(playlink))
             if playlink and selm <> -1:
                 from resources import Core
                 core = Core.Core()
                 core.executeAction({
                     'info':
                     quote(info),
                     'favorite':
                     'check',
                     'site':
                     'filmeonlineto',
                     'landing':
                     quote(viewmark),
                     'nume':
                     playname,
                     'switch':
                     'play',
                     'link':
                     quote(playlink),
                     'action':
                     'OpenSite',
                     'watched':
                     'check',
                     'subtitrare':
                     quote(sublink) if sublink else ''
                 })
             else:
                 xbmc.executebuiltin(
                     'Notification(%s,%s)' %
                     (xbmcaddon.Addon().getAddonInfo('name'),
                      'Nu s-a găsit link'))
             #xbmcplugin.endOfDirectory(handle=int(sys.argv[1]), succeeded=False)
             #lists.append(('Play %s' % (playname),playlink,'','play', info, viewmark, sublink))
     elif meniu == 'genuri':
         link = fetchData('%s/yes.html' % base_url)
         regex = '''title="Genre"(.+?)</div'''
         regex_cats = '''href="(.+?)"(?:.+?)?>(.+?)<'''
         if link:
             for cats in re.findall(regex, link, re.DOTALL | re.IGNORECASE):
                 match = re.findall(regex_cats, cats)
                 if len(match) >= 0:
                     for legatura, nume in sorted(match, key=self.getKey):
                         lists.append((nume, legatura, '', 'recente', info))
     elif meniu == 'tari':
         link = fetchData('%s/yes.html' % base_url)
         regex = '''title="Country"(.+?)</div'''
         regex_cats = '''href="(.+?)"(?:.+?)?>(.+?)<'''
         if link:
             for cats in re.findall(regex, link, re.DOTALL | re.IGNORECASE):
                 match = re.findall(regex_cats, cats)
                 if len(match) >= 0:
                     for legatura, nume in sorted(match, key=self.getKey):
                         lists.append((nume, legatura, '', 'recente', info))
     return lists