Пример #1
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if not url:
                return

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            url = self.__search(
                [data['tvshowtitle']] +
                source_utils.aliases_to_array(eval(data['aliases'])),
                data['year'], season)
            if not url: return

            r = client.request(urlparse.urljoin(self.base_link, url))

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'ep_link'})
            r = dom_parser.parse_dom(r, 'a', req='href')
            r = [(i.attrs['href'], i.content) for i in r if i]
            r = [(i[0], re.findall("^(?:episode)\s*(\d+)$", i[1], re.I))
                 for i in r]
            r = [(i[0], i[1][0] if i[1] else '0') for i in r]
            r = [i[0] for i in r if int(i[1]) == int(episode)][0]

            return source_utils.strip_domain(r)
        except:
            return
Пример #2
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)
            r = r.replace('\\"', '"')

            links = dom_parser.parse_dom(r, 'tr', attrs={'id': 'tablemoviesindex2'})

            for i in links:
                try:
                    host = dom_parser.parse_dom(i, 'img', req='alt')[0].attrs['alt']
                    host = host.split()[0].rsplit('.', 1)[0].strip().lower()
                    host = host.encode('utf-8')

                    valid, host = source_utils.is_host_valid(host, hostDict)
                    if not valid: continue

                    url = dom_parser.parse_dom(i, 'a', req='href')[0].attrs['href']
                    url = client.replaceHTMLCodes(url)
                    url = urlparse.urljoin(self.base_link, url)
                    url = url.encode('utf-8')

                    sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False,
                                    'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
Пример #3
0
    def __search(self, titles, year):
        try:
            query = self.search_link % (urllib.quote_plus(
                cleantitle.getsearch(titles[0])))

            query = urlparse.urljoin(self.base_link, query)

            t = cleantitle.get(titles[0])
            data = client.request(query)
            # data = client.request(query, referer=self.base_link)
            data = client.parseDOM(data, 'div', attrs={'class': 'result-item'})
            r = dom_parser.parse_dom(data, 'div', attrs={'class': 'title'})
            r = zip(
                dom_parser.parse_dom(r, 'a'),
                dom_parser.parse_dom(data, 'span', attrs={'class': 'year'}))

            url = []
            for i in range(len(r)):
                title = cleantitle.get(r[i][0][1])
                title = re.sub('(\d+p|4k|3d|hd|season\d+)', '', title)
                y = r[i][1][1]
                link = r[i][0][0]['href']
                if 'season' in title: continue
                if t == title and y == year:
                    if 'season' in link:
                        url.append(source_utils.strip_domain(link))
                        print url[0]
                        return url[0]
                    else:
                        url.append(source_utils.strip_domain(link))

            return url
        except:
            return
Пример #4
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        #{'source': host, 'quality': i[1], 'provider': 'Sezonlukdizi', 'url': i[0]})
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            result = client.request(url)
            result = re.sub(r'[^\x00-\x7F]+', ' ', result)

            pages = dom_parser.parse_dom(result, 'div', attrs={'class': 'item'}, req='data-id')
            pages = [i.attrs['data-id'] for i in pages]

            for page in pages:
                try:
                    url = urlparse.urljoin(self.base_link, self.video_link)

                    result = client.request(url, post={'id': page})
                    if not result: continue

                    url = dom_parser.parse_dom(result, 'iframe', req='src')[0].attrs['src']
                    if url.startswith('//'): url = 'http:' + url
                    if url.startswith('/'): url = urlparse.urljoin(self.base_link, url)

                    valid, host = source_utils.is_host_valid(url, hostDict)
                    if valid: sources.append({'source': host, 'quality': 'HD', 'url': url,'provider': 'Sezonlukdizi'})

                    if '.asp' not in url: continue

                    result = client.request(url)

                    captions = re.search('kind\s*:\s*(?:\'|\")captions(?:\'|\")', result)
                    if not captions: continue

                    matches = [(match[0], match[1]) for match in re.findall('''["']?label\s*["']?\s*[:=]\s*["']?(?P<label>[^"',]+)["']?(?:[^}\]]+)["']?\s*file\s*["']?\s*[:=,]?\s*["'](?P<url>[^"']+)''', result, re.DOTALL | re.I)]
                    matches += [(match[1], match[0]) for match in re.findall('''["']?\s*file\s*["']?\s*[:=,]?\s*["'](?P<url>[^"']+)(?:[^}>\]]+)["']?\s*label\s*["']?\s*[:=]\s*["']?(?P<label>[^"',]+)''', result, re.DOTALL | re.I)]

                    result = [(source_utils.label_to_quality(x[0]), x[1].replace('\/', '/')) for x in matches]
                    result = [(i[0], i[1]) for i in result if not i[1].endswith('.vtt')]

                    for quality, url in result: sources.append({'source': 'gvideo', 'quality': quality, 'url': url, 'provider': 'Sezonlukdizi'})
                except:
                    pass

            return sources
        except Exception as e:
            control.log('ERROR sezonlukidz %s' % e)
            return sources
Пример #5
0
    def get_episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if not url:
                return

            r = client.request(urlparse.urljoin(self.base_link, url))
            r = dom_parser.parse_dom(r, 'article', {'class': 'SeasonList'})
            r = dom_parser.parse_dom(r, 'ul')
            r = dom_parser.parse_dom(r, 'li')
            r = dom_parser.parse_dom(r, 'a', attrs={'href': re.compile('[^"]+-season-%s-episode-%s(?!\d)[^"]*' % (season, episode))}, req='href')[0].attrs['href']

            return source_utils.strip_domain(r)
        except:
            return
Пример #6
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         url = urlparse.urljoin(
             self.base_link,
             self.search_link % cleantitle.geturl(title).replace('-', '+'))
         r = client.request(url, cookie='check=2')
         m = dom_parser.parse_dom(r, 'div', attrs={'class': 'masonry'})
         m = dom_parser.parse_dom(m, 'a', req='href')
         m = [(i.attrs['href']) for i in m if i.content == title]
         if m is not None:
             url = urlparse.urljoin(self.base_link, m[0])
         return url
     except Exception:
         return
Пример #7
0
    def __search(self, imdb, titles, year):
        try:
            q = self.search_link % urllib.quote_plus(cleantitle.query(titles[0]))
            q = urlparse.urljoin(self.base_link, q)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']

            r = client.request(q)

            r = dom_parser.parse_dom(r, 'tr', attrs={'id': re.compile('coverPreview.+?')})
            r = [(dom_parser.parse_dom(i, 'a', req='href'),
                  dom_parser.parse_dom(i, 'div', attrs={'style': re.compile('.+?')}),
                  dom_parser.parse_dom(i, 'img', req='src')) for i in r]
            r = [(i[0][0].attrs['href'].strip(), i[0][0].content.strip(), i[1], i[2]) for i in r if i[0] and i[2]]
            r = [(i[0], i[1], [x.content for x in i[2] if x.content.isdigit() and len(x.content) == 4], i[3]) for i in
                 r]
            r = [(i[0], i[1], i[2][0] if i[2] else '0', i[3]) for i in r]
            r = [i for i in r if any('us_flag' in x.attrs['src'] for x in i[3])]
            r = [(i[0], i[1], i[2], [re.findall('(\d+)', x.attrs['src']) for x in i[3] if 'smileys' in x.attrs['src']])
                 for i in r]
            r = [(i[0], i[1], i[2], [x[0] for x in i[3] if x]) for i in r]
            r = [(i[0], i[1], i[2], int(i[3][0]) if i[3] else 0) for i in r]
            r = sorted(r, key=lambda x: x[3])[::-1]
            r = [(i[0], i[1], i[2], re.findall('\((.+?)\)$', i[1])) for i in r]
            r = [(i[0], i[1], i[2]) for i in r if not i[3]]
            r = [i for i in r if i[2] in y]
            r = sorted(r, key=lambda i: int(i[2]), reverse=True)  # with year > no year

            r = [(client.replaceHTMLCodes(i[0]), i[1], i[2]) for i in r]

            match = [i[0] for i in r if cleantitle.get(i[1]) in t and year == i[2]]

            match2 = [i[0] for i in r]
            match2 = [x for y, x in enumerate(match2) if x not in match2[:y]]
            if match2 == []: return

            for i in match2[:5]:
                try:
                    if match: url = match[0]; break
                    r = client.request(urlparse.urljoin(self.base_link, i))
                    r = re.findall('(tt\d+)', r)
                    if imdb in r: url = i; break
                except:
                    pass

            return source_utils.strip_domain(url)
        except:
            return
Пример #8
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return
            r = client.request(url, cookie='check=2')

            m = dom_parser.parse_dom(r, 'table', attrs={'class':
                                                        'show_links'})[0]
            links = re.findall('k">(.*?)<.*?f="(.*?)"', m.content)
            for link in links:
                try:
                    sources.append({
                        'source': link[0],
                        'quality': 'SD',
                        'language': 'en',
                        'url': link[1],
                        'direct': False,
                        'debridonly': False
                    })
                except Exception:
                    pass

            return sources
        except Exception:
            return sources
Пример #9
0
    def sezonlukdizi_tvcache(self):
        try:
            url = urlparse.urljoin(self.base_link, self.search_link)

            result = client.request(url, redirect=False)

            if not result:
                r = client.request(self.base_link)
                r = \
                    dom_parser.parse_dom(r, 'script',
                                         attrs={'type': 'text/javascript', 'src': re.compile('.*/js/dizi.*')},
                                         req='src')[0]
                url = urlparse.urljoin(self.base_link, r.attrs['src'])
                result = client.request(url)

            result = re.compile('{(.+?)}').findall(result)
            result = [(re.findall('u\s*:\s*(?:\'|\")(.+?)(?:\'|\")', i),
                       re.findall('d\s*:\s*(?:\'|\")(.+?)(?:\',|\")', i))
                      for i in result]
            result = [(i[0][0], i[1][0]) for i in result
                      if len(i[0]) > 0 and len(i[1]) > 0]
            result = [(re.compile('/diziler(/.+?)(?://|\.|$)').findall(i[0]),
                       re.sub('&#\d*;', '', i[1])) for i in result]
            result = [(i[0][0] + '/', cleantitle.query(self.lat2asc(i[1])))
                      for i in result if len(i[0]) > 0]

            return result
        except:
            return []
Пример #10
0
def parseDOM(html, name='', attrs=None, ret=False):
    if attrs: attrs = dict((key, re.compile(value + ('$' if value else ''))) for key, value in attrs.iteritems())
    results = dom_parser.parse_dom(html, name, attrs, ret)
    if ret:
        results = [result.attrs[ret.lower()] for result in results]
    else:
        results = [result.content for result in results]
    return results
Пример #11
0
def parseDOM(html, name='', attrs=None, ret=False):
    if attrs: attrs = dict((key, re.compile(value + ('$' if value else ''))) for key, value in attrs.iteritems())
    results = dom_parser.parse_dom(html, name, attrs, ret)
    if ret:
        results = [result.attrs[ret.lower()] for result in results]
    else:
        results = [result.content for result in results]
    return results
Пример #12
0
    def __search(self, search_url, title, year):
        try:
            url = search_url % cleantitle.geturl(title)

            r = urlparse.urljoin(self.base_link, url)
            r = client.request(r, limit='1', timeout='10')
            r = dom_parser.parse_dom(r, 'title')[0].content
            return url if year in r else None
        except:
            pass
Пример #13
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if url is None:
             return
         data = urlparse.parse_qs(url)
         data = dict((i, data[i][0]) for i in data)
         url = urlparse.urljoin(
             self.base_link, self.search_link %
             cleantitle.geturl(data['tvshowtitle']).replace('-', '+'))
         r = client.request(url, cookie='check=2')
         m = dom_parser.parse_dom(r, 'div', attrs={'class': 'masonry'})
         m = dom_parser.parse_dom(m, 'a', req='href')
         m = [(i.attrs['href']) for i in m
              if i.content == data['tvshowtitle']]
         query = '%s/season-%s/episode-%s/' % (m[0], season, episode)
         url = urlparse.urljoin(self.base_link, query)
         return url
     except Exception:
         return
Пример #14
0
 def __get_base_url(self, fallback):
     try:
         for domain in self.domains:
             try:
                 url = 'http://%s' % domain
                 r = client.request(url, timeout='10')
                 r = dom_parser.parse_dom(r, 'meta', attrs={'name': 'author'}, req='content')
                 if r and 'movie4k.to' in r[0].attrs.get('content').lower():
                     return url
             except:
                 pass
     except:
         pass
Пример #15
0
    def __search(self, titles, year, season='0'):
        try:
            query = self.search_link % (urllib.quote_plus(titles[0]))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1), '0'
            ]

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'list_movies'})
            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'item_movie'})
            r = dom_parser.parse_dom(r, 'h2', attrs={'class': 'tit'})
            r = dom_parser.parse_dom(r, 'a', req='href')
            r = [(i.attrs['href'], i.content.lower()) for i in r if i]
            r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
            r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1],
                  i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
            r = [(i[0], i[1], i[2],
                  re.findall('(.+?)\s+(?:\s*-?\s*(?:season|s))\s*(\d+)', i[1]))
                 for i in r]
            r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2],
                  i[3][0][1] if len(i[3]) > 0 else '0') for i in r]
            r = [(i[0], i[1], i[2],
                  '1' if int(season) > 0 and i[3] == '0' else i[3]) for i in r]
            r = sorted(r, key=lambda i: int(i[2]),
                       reverse=True)  # with year > no year
            r = [
                i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y
                and int(i[3]) == int(season)
            ][0]

            return source_utils.strip_domain(r)
        except:
            return
Пример #16
0
    def searchMovie(self, title, year, aliases, headers):
        try:
            clean_title = cleantitle.geturl(title).replace('-', '+')

            url = urlparse.urljoin(self.base_link,
                                   self.search_link % ('%s' % clean_title))
            r = client.request(url)

            r = client.parseDOM(r, 'div', attrs={'class': 'list_movies'})
            r = dom_parser.parse_dom(r, 'a', req='href')
            r = [(i.attrs['href']) for i in r
                 if i.content == '%s (%s)' % (title, year)]

            return r[0]
        except:
            return
Пример #17
0
    def searchShow(self, title, season, year, aliases, headers):
        try:
            clean_title = cleantitle.geturl(title).replace('-', '+')

            url = urlparse.urljoin(
                self.base_link, self.search_link %
                ('%s+Season+%01d' % (clean_title, int(season))))
            r = client.request(url)

            r = client.parseDOM(r, 'div', attrs={'class': 'list_movies'})
            r = dom_parser.parse_dom(r, 'a', req='href')
            r = [(i.attrs['href']) for i in r
                 if '%s - Season %01d' % (title, int(season)) in i.content]

            return r[0]
        except:
            return
Пример #18
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return

            url = urlparse.urljoin(self.base_link, url)
            result = client.request(url)

            title = cleantitle.get(title)
            premiered = re.compile('(\d{4})-(\d{2})-(\d{2})').findall(premiered)[0]
            premiered = '%s/%s/%s' % (premiered[2], premiered[1], premiered[0])
            items = dom_parser.parse_dom(result, 'a', attrs={'itemprop': 'url'})

            url = [i.attrs['href'] for i in items if bool(
                re.compile('<span\s*>%s<.*?itemprop="episodeNumber">%s<\/span>' % (season, episode)).search(
                    i.content))][0]

            url = url.encode('utf-8')
            return url
        except:
            return
Пример #19
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources
            url = urlparse.urljoin(self.base_link, url)
            for i in range(3):
                result = client.request(url, timeout=10)
                if not result == None: break

            dom = dom_parser.parse_dom(result, 'div', attrs={'class': 'links', 'id': 'noSubs'})
            result = dom[0].content

            links = re.compile(
                '<tr\s*>\s*<td><i\s+class="fa fa-youtube link-logo"></i>([^<]+).*?href="([^"]+)"\s+class="watch',
                re.DOTALL).findall(result)
            for link in links[:5]:
                try:
                    url2 = urlparse.urljoin(self.base_link, link[1])
                    for i in range(2):
                        result2 = client.request(url2, timeout=3)
                        if not result2 == None: break
                    r = re.compile('href="([^"]+)"\s+class="action-btn').findall(result2)[0]
                    valid, hoster = source_utils.is_host_valid(r, hostDict)
                    if not valid: continue
                    # log_utils.log('JairoxDebug1: %s - %s' % (url2,r), log_utils.LOGDEBUG)
                    urls, host, direct = source_utils.check_directstreams(r, hoster)
                    for x in urls: sources.append(
                        {'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct,
                         'debridonly': False})

                except:
                    # traceback.print_exc()
                    pass

                    # log_utils.log('JairoxDebug2: %s' % (str(sources)), log_utils.LOGDEBUG)
            return sources
        except:
            return sources
Пример #20
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return

            url = urlparse.urljoin(self.base_link, url)

            for i in range(4):
                result = client.request(url, timeout=3)
                if not result == None: break

            items = dom_parser.parse_dom(result,
                                         'a',
                                         attrs={'itemprop': 'url'})

            try:

                url = [
                    i.attrs['href'] for i in items if bool(
                        re.compile('"datePublished">%s' %
                                   premiered).search(i.content))
                ][0]
            except:
                url = None
                pass

            if url == None:
                url = [
                    i.attrs['href'] for i in items if bool(
                        re.compile(
                            '<span\s*>%s<.*?itemprop="episodeNumber">%s<\/span>'
                            % (season, episode)).search(i.content))
                ][0]

            url = url.encode('utf-8')

            return url
        except:
            return
Пример #21
0
    def sources(self, url, hostDict, hostprDict):

        try:
            sources = []
            if url == None: return sources

            req = urlparse.urljoin(self.base_link, url)

            for i in range(4):
                result = client.request(req, timeout=3)
                if not result == None: break

            dom = dom_parser.parse_dom(result,
                                       'div',
                                       attrs={
                                           'class': 'links',
                                           'id': 'noSubs'
                                       })
            result = dom[0].content
            links = re.compile(
                '<i class="fa fa-youtube link-logo"></i>([^<]+).*?href="([^"]+)"\s+class="watch',
                re.DOTALL).findall(result)
            random.shuffle(links)

            if debrid.status() == True:
                debrid_links = []
                for pair in links:
                    for r in debrid.debrid_resolvers:
                        if r.valid_url('', pair[0].strip()):
                            debrid_links.append(pair)
                links = debrid_links + links

            hostDict = hostDict + hostprDict

            conns = 0
            for pair in links:

                if conns > self.max_conns and len(sources) > self.min_srcs:
                    break

                host = pair[0].strip()
                link = pair[1]

                valid, host = source_utils.is_host_valid(host, hostDict)
                if not valid: continue

                link = urlparse.urljoin(self.base_link, link)
                for i in range(2):
                    result = client.request(link, timeout=3)
                    conns += 1
                    if not result == None: break

                try:
                    link = re.compile('href="([^"]+)"\s+class="action-btn'
                                      ).findall(result)[0]
                except:
                    continue

                try:
                    u_q, host, direct = source_utils.check_directstreams(
                        link, host)
                except:
                    continue

                link, quality = u_q[0]['url'], u_q[0]['quality']

                sources.append({
                    'source': host,
                    'quality': quality,
                    'language': 'en',
                    'url': link,
                    'direct': direct,
                    'debridonly': False
                })

            return sources
        except:
            return sources
Пример #22
0
 def resolve(self, url):
     r = client.request(url)
     r = dom_parser.parse_dom(r, 'div', {'class': 'link_under_video'})
     r = dom_parser.parse_dom(r, 'a', req='href')
     return r[0].attrs['href']
Пример #23
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url == None: return sources

            hostDict += [
                'akamaized.net', 'google.com', 'picasa.com', 'blogspot.com'
            ]
            result = client.request(url, timeout=10)

            dom = dom_parser.parse_dom(result, 'a', req='data-video')
            urls = [
                i.attrs['data-video']
                if i.attrs['data-video'].startswith('https') else 'https:' +
                i.attrs['data-video'] for i in dom
            ]

            for url in urls:
                dom = []
                if 'vidnode.net' in url:
                    result = client.request(url, timeout=10)
                    dom = dom_parser.parse_dom(result,
                                               'source',
                                               req=['src', 'label'])
                    dom = [
                        (i.attrs['src'] if i.attrs['src'].startswith('https')
                         else 'https:' + i.attrs['src'], i.attrs['label'])
                        for i in dom if i
                    ]
                elif 'ocloud.stream' in url:
                    result = client.request(url, timeout=10)
                    base = re.findall('<base href="([^"]+)">', result)[0]
                    hostDict += [base]
                    dom = dom_parser.parse_dom(result, 'a', req=['href', 'id'])
                    dom = [(i.attrs['href'].replace('./embed', base + 'embed'),
                            i.attrs['id']) for i in dom if i]
                    dom = [(re.findall("var\s*ifleID\s*=\s*'([^']+)",
                                       client.request(i[0]))[0], i[1])
                           for i in dom if i]
                if dom:
                    try:
                        for r in dom:
                            valid, hoster = source_utils.is_host_valid(
                                r[0], hostDict)

                            if not valid: continue
                            quality = source_utils.label_to_quality(r[1])
                            urls, host, direct = source_utils.check_directstreams(
                                r[0], hoster)
                            for x in urls:
                                if direct:
                                    size = source_utils.get_size(x['url'])
                                if size:
                                    sources.append({
                                        'source': host,
                                        'quality': quality,
                                        'language': 'en',
                                        'url': x['url'],
                                        'direct': direct,
                                        'debridonly': False,
                                        'info': size
                                    })
                                else:
                                    sources.append({
                                        'source': host,
                                        'quality': quality,
                                        'language': 'en',
                                        'url': x['url'],
                                        'direct': direct,
                                        'debridonly': False
                                    })
                    except:
                        pass
                else:
                    valid, hoster = source_utils.is_host_valid(url, hostDict)
                    if not valid: continue
                    try:
                        url.decode('utf-8')
                        sources.append({
                            'source': hoster,
                            'quality': 'SD',
                            'language': 'en',
                            'url': url,
                            'direct': False,
                            'debridonly': False
                        })
                    except:
                        pass
            return sources
        except:
            return sources
Пример #24
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            aliases = eval(data['aliases'])
            headers = {}

            if 'tvshowtitle' in data:
                episode = int(data['episode'])
                url = self.searchShow(data['tvshowtitle'], data['season'],
                                      aliases, headers)
            else:
                episode = 0
                url = self.searchMovie(data['title'], data['year'], aliases,
                                       headers)

            mid = re.findall('-(\d+)', url)[-1]

            try:
                headers = {'Referer': url}
                u = urlparse.urljoin(self.base_link, self.server_link % mid)
                r = client.request(u, headers=headers, XHR=True)
                r = json.loads(r)['html']
                r = client.parseDOM(r, 'div', attrs={'class': 'pas-list'})
                ids = client.parseDOM(r, 'li', ret='data-id')
                servers = client.parseDOM(r, 'li', ret='data-server')
                labels = client.parseDOM(r, 'a', ret='title')
                r = zip(ids, servers, labels)
                u = urlparse.urljoin(self.base_link, self.info_link % mid)
                quality = client.request(u, headers=headers)
                quality = dom_parser.parse_dom(quality,
                                               'div',
                                               attrs={'class': 'jtip-quality'
                                                      })[0].content
                if quality == "HD":
                    quality = "720p"
                for eid in r:
                    try:
                        try:
                            ep = re.findall('episode.*?(\d+).*?',
                                            eid[2].lower())[0]
                        except:
                            ep = 0
                        if (episode == 0) or (int(ep) == episode):
                            if eid[1] != '6':
                                url = urlparse.urljoin(
                                    self.base_link, self.embed_link % eid[0])
                                link = client.request(url)
                                link = json.loads(link)['src']
                                valid, host = source_utils.is_host_valid(
                                    link, hostDict)
                                sources.append({
                                    'source': host,
                                    'quality': quality,
                                    'language': 'en',
                                    'url': link,
                                    'info': [],
                                    'direct': False,
                                    'debridonly': False
                                })
                            else:
                                url = urlparse.urljoin(
                                    self.base_link,
                                    self.token_link % (eid[0], mid))
                                script = client.request(url)
                                if '$_$' in script:
                                    params = self.uncensored1(script)
                                elif script.startswith(
                                        '[]') and script.endswith('()'):
                                    params = self.uncensored2(script)
                                elif '_x=' in script:
                                    x = re.search('''_x=['"]([^"']+)''',
                                                  script).group(1)
                                    y = re.search('''_y=['"]([^"']+)''',
                                                  script).group(1)
                                    params = {'x': x, 'y': y}
                                else:
                                    raise Exception()

                                u = urlparse.urljoin(
                                    self.base_link, self.source_link %
                                    (eid[0], params['x'], params['y']))
                                r = client.request(u, XHR=True)
                                url = json.loads(r)['playlist'][0]['sources']
                                url = [i['file'] for i in url if 'file' in i]
                                url = [directstream.googletag(i) for i in url]
                                url = [i[0] for i in url if i]

                                for s in url:
                                    if 'lh3.googleusercontent.com' in s['url']:
                                        s['url'] = directstream.googleredirect(
                                            s['url'])

                                    sources.append({
                                        'source': 'gvideo',
                                        'quality': s['quality'],
                                        'language': 'en',
                                        'url': s['url'],
                                        'direct': True,
                                        'debridonly': False
                                    })
                    except:
                        pass
            except:
                pass

            return sources
        except:
            return sources
Пример #25
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)
            c = client.request(url, output='cookie')
            result = client.request(url)

            result = re.sub(r'[^\x00-\x7F]+', ' ', result)

            pages = dom_parser.parse_dom(result,
                                         'div',
                                         attrs={'class': 'item'},
                                         req='data-id')
            pages = [i.attrs['data-id'] for i in pages]

            for page in pages:
                try:
                    url = urlparse.urljoin(self.base_link, self.video_link)

                    result = client.request(url, post={'id': page}, cookie=c)
                    if not result: continue

                    url = dom_parser.parse_dom(result, 'iframe',
                                               req='src')[0].attrs['src']
                    if url.startswith('//'): url = 'http:' + url
                    if url.startswith('/'):
                        url = urlparse.urljoin(self.base_link, url)

                    valid, host = source_utils.is_host_valid(url, hostDict)
                    if valid:
                        sources.append({
                            'source': host,
                            'quality': 'HD',
                            'language': 'en',
                            'url': url,
                            'direct': False,
                            'debridonly': False
                        })

                    if '.asp' not in url: continue

                    result = client.request(url, cookie=c)

                    try:
                        url = dom_parser.parse_dom(result, 'iframe',
                                                   req='src')[0].attrs['src']
                        url = url.replace('https://href.li/?', '')
                        valid, host = source_utils.is_host_valid(url, hostDict)
                        if valid:
                            if host == 'gvideo':
                                ginfo = directstream.google(url)
                                for g in ginfo:
                                    sources.append({
                                        'source': host,
                                        'quality': g['quality'],
                                        'language': 'en',
                                        'url': g['url'],
                                        'direct': True,
                                        'debridonly': False
                                    })
                            else:
                                sources.append({
                                    'source': host,
                                    'quality': 'HD',
                                    'language': 'en',
                                    'url': url,
                                    'direct': False,
                                    'debridonly': False
                                })
                    except:
                        pass

                    captions = re.search(
                        '''["']?kind["']?\s*:\s*(?:\'|\")captions(?:\'|\")''',
                        result)
                    if not captions: continue

                    matches = [(match[0], match[1]) for match in re.findall(
                        '''["']?label\s*["']?\s*[:=]\s*["']?(?P<label>[^"',]+)["']?(?:[^}\]]+)["']?\s*file\s*["']?\s*[:=,]?\s*["'](?P<url>[^"']+)''',
                        result, re.DOTALL | re.I)]
                    matches += [(match[1], match[0]) for match in re.findall(
                        '''["']?\s*file\s*["']?\s*[:=,]?\s*["'](?P<url>[^"']+)(?:[^}>\]]+)["']?\s*label\s*["']?\s*[:=]\s*["']?(?P<label>[^"',]+)''',
                        result, re.DOTALL | re.I)]

                    result = [(source_utils.label_to_quality(x[0]),
                               x[1].replace('\/', '/')) for x in matches]
                    result = [(i[0], i[1]) for i in result
                              if not i[1].endswith('.vtt')]

                    for quality, url in result:
                        sources.append({
                            'source': 'gvideo',
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'direct': True,
                            'debridonly': False
                        })
                except:
                    pass

            return sources
        except:
            return sources
Пример #26
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        control.log("><><><><> PELISPEDIA SOURCE %s" % url)
        #sources.append({'source': i['source'], 'quality': i['quality'], 'provider': 'Pelispedia', 'url': i['url']})
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)
            r = dom_parser.parse_dom(r, 'div', {'class': 'repro'})

            r = dom_parser.parse_dom(r[0].content, 'iframe', req='src')
            f = r[0].attrs['src']

            r = client.request(f)
            r = dom_parser.parse_dom(r, 'div', {'id': 'botones'})
            r = dom_parser.parse_dom(r, 'a', req='href')
            r = [(i.attrs['href'], urlparse.urlparse(i.attrs['href']).netloc) for i in r]

            links = []

            for u, h in r:
                if not 'pelispedia' in h:
                    valid, host = source_utils.is_host_valid(u, hostDict)
                    if not valid: continue

                    links.append({'source': host, 'quality': 'SD', 'url': u})
                    continue

                result = client.request(u, headers={'Referer': f}, timeout='10')

                try:
                    if 'pelispedia' in h: raise Exception()

                    url = re.findall('sources\s*:\s*\[(.+?)\]', result)[0]
                    url = re.findall('file\s*:\s*(?:\"|\')(.+?)(?:\"|\')\s*,\s*label\s*:\s*(?:\"|\')(.+?)(?:\"|\')', url)
                    url = [i[0] for i in url if '720' in i[1]][0]

                    links.append({'source': 'cdn', 'quality': 'HD', 'url': url})
                except:
                    pass

                try:
                    url = re.findall('sources\s*:\s*\[(.+?)\]', result)[0]
                    url = re.findall('file\s*:\s*(?:\"|\')(.+?)(?:\"|\')', url)

                    for i in url:
                        try:
                            links.append({'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'url': i})
                        except:
                            pass
                except:
                    pass

                try:
                    post = re.findall('gkpluginsphp.*?link\s*:\s*"([^"]+)', result)[0]
                    post = urllib.urlencode({'link': post})

                    url = urlparse.urljoin(self.base_link, '/gkphp_flv/plugins/gkpluginsphp.php')
                    url = client.request(url, post=post, XHR=True, referer=u, timeout='10')
                    url = json.loads(url)['link']

                    links.append({'source': 'gvideo', 'quality': 'HD', 'url': url})
                except:
                    pass

                try:
                    post = re.findall('var\s+parametros\s*=\s*"([^"]+)', result)[0]

                    post = urlparse.parse_qs(urlparse.urlparse(post).query)['pic'][0]
                    post = urllib.urlencode({'sou': 'pic', 'fv': '25', 'url': post})

                    url = client.request(self.protect_link, post=post, XHR=True, timeout='10')
                    url = json.loads(url)[0]['url']

                    links.append({'source': 'cdn', 'quality': 'HD', 'url': url})
                except:
                    pass

                try:
                    if not jsunpack.detect(result): raise Exception()

                    result = jsunpack.unpack(result)
                    url = re.findall('sources\s*:\s*\[(.+?)\]', result)[0]
                    url = re.findall('file\s*:\s*.*?\'(.+?)\'', url)
                    for i in url:
                        try:
                            i = client.request(i, headers={'Referer': f}, output='geturl', timeout='10')
                            links.append({'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'url': i})
                        except:
                            pass
                except:
                    pass

                try:
                    post = re.findall('var\s+parametros\s*=\s*"([^"]+)', result)[0]

                    post = urlparse.parse_qs(urlparse.urlparse(post).query)['pic'][0]
                    token = 'eyJjdCI6InZGS3QySm9KRWRwU0k4SzZoZHZKL2c9PSIsIml2IjoiNDRkNmMwMWE0ZjVkODk4YThlYmE2MzU0NDliYzQ5YWEiLCJzIjoiNWU4MGUwN2UwMjMxNDYxOCJ9'
                    post = urllib.urlencode({'sou': 'pic', 'fv': '0', 'url': post, 'token': token})

                    url = client.request(self.protect_link, post=post, XHR=True, timeout='10')
                    js = json.loads(url)
                    url = [i['url'] for i in js]
                    for i in url:
                        try:
                            i = client.request(i, headers={'Referer': f}, output='geturl', timeout='10')
                            links.append({'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'url': i})
                        except:
                            pass
                except:
                    pass

            for i in links: sources.append({'source': i['source'], 'quality': i['quality'], 'url': i['url'], 'provider': 'Pelispedia'})

            return sources

        except Exception as e:
            control.log('ERROR PELISP %s' % e)
            return sources