Exemplo n.º 1
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url is None:
             return sources
         h = {'User-Agent': client.randomagent()}
         html = self.scraper.get(url, headers=h).content
         qual = re.compile('<span class="calidad2">(.+?)</span>',
                           flags=re.DOTALL | re.IGNORECASE).findall(html)[0]
         links = re.compile('<iframe src="(.+?)"',
                            flags=re.DOTALL | re.UNICODE | re.MULTILINE
                            | re.IGNORECASE).findall(html)
         for link in links:
             valid, host = source_utils.is_host_valid(link, hostDict)
             quality, info = source_utils.get_release_quality(qual, link)
             sources.append({
                 'source': host,
                 'quality': quality,
                 'language': 'en',
                 'info': info,
                 'url': link,
                 'direct': False,
                 'debridonly': False
             })
         return sources
     except:
         return sources
Exemplo n.º 2
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         hostDict = hostprDict + hostDict
         if url == None:
             return sources
         h = {'User-Agent': client.randomagent()}
         title = cleantitle.geturl(url['title']).replace('-', '+')
         url = urlparse.urljoin(self.base_link, self.search_link % title)
         r = self.scraper.get(url, headers=h)
         r = BeautifulSoup(r.text,
                           'html.parser').find('div', {'class': 'item'})
         r = r.find('a')['href']
         r = self.scraper.get(r, headers=h)
         r = BeautifulSoup(r.content, 'html.parser')
         quality = r.find('span', {'class': 'calidad2'}).text
         url = r.find('div', {'class': 'movieplay'}).find('iframe')['src']
         if not quality in ['1080p', '720p']:
             quality = 'SD'
         valid, host = source_utils.is_host_valid(url, hostDict)
         if valid:
             sources.append({
                 'source': host,
                 'quality': quality,
                 'language': 'en',
                 'url': url,
                 'direct': False,
                 'debridonly': False
             })
         return sources
     except:
         return sources
Exemplo n.º 3
0
 def resolve(self, url):
     if 'streamty.com' in url:
         h = {'User-Agent': client.randomagent()}
         html = self.scraper.get(url, headers=h).content
         packed = find_match(data, "text/javascript'>(eval.*?)\s*</script>")
         unpacked = jsunpack.unpack(packed)
         link = find_match(unpacked, 'file:"([^"]+)"')[0]
         return link
     return url
Exemplo n.º 4
0
 def _createSession(self, customHeaders={}):
     # Create a 'requests.Session' and try to spoof a header from a web browser.
     session = requests.Session()
     session.headers.update(
         {
             'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
             'User-Agent': customHeaders.get('UA', randomagent()),
             'Accept-Language': 'en-US,en;q=0.5',
             'Referer': customHeaders.get('referer', self.BASE_URL + '/'),
             'DNT': '1'
         }
     )
     return session
Exemplo n.º 5
0
 def _createSession(self, userAgent=None, cookies=None, referer=None):
     # Try to spoof a header from a web browser.
     session = requests.Session()
     session.headers.update(
         {
             'Accept': self.DEFAULT_ACCEPT,
             'User-Agent': userAgent if userAgent else randomagent(),
             'Accept-Language': 'en-US,en;q=0.5',
             'Referer': referer if referer else self.BASE_URL + '/',
             'DNT': '1'
         }
     )
     if cookies:
         session.cookies.update(cookies)
         session.cookies[''] = '__test' # See _getSearch() for more info on this.
     return session
Exemplo n.º 6
0
 def _createSession(self, userAgent=None, cookies=None, referer=None):
     # Try to spoof a header from a web browser.
     session = requests.Session()
     session.headers.update(
         {
             'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
             'User-Agent': userAgent if userAgent else randomagent(),
             'Accept-Language': 'en-US,en;q=0.5',
             'Referer': referer if referer else self.BASE_URL + '/',
             'Upgrade-Insecure-Requests': '1',
             'DNT': '1'
         }
     )
     if cookies:
         session.cookies.update(cookies)
     return session
Exemplo n.º 7
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         search_id = cleantitle.getsearch(title)
         url = urlparse.urljoin(self.base_link, self.search_link)
         url = url % (search_id.replace(':', ' ').replace(' ', '+'))
         h = {'User-Agent': client.randomagent()}
         r = self.scraper.get(url, headers=h).content
         z = re.compile('<item>(.+?)</item>',
                        flags=re.DOTALL | re.UNICODE | re.MULTILINE
                        | re.IGNORECASE).findall(r)
         for t in z:
             b = re.compile('<a rel="nofollow" href="(.+?)">(.+?)</a>',
                            flags=re.DOTALL | re.UNICODE | re.MULTILINE
                            | re.IGNORECASE).findall(t)
             for foundURL, foundTITLE in b:
                 if cleantitle.get(title) in cleantitle.get(foundTITLE):
                     return foundURL
         return
     except:
         return
Exemplo n.º 8
0
    def episode(self, data, imdb, tvdb, title, premiered, season, episode):
        try:
            session = self._createSession(randomagent())

            # Search with the TV show name and season number string.
            lowerTitle = data
            stringConstant, searchHTML = self._getSearch(lowerTitle + ' ' + season, session)

            soup = BeautifulSoup(searchHTML, 'html.parser')
            for div in soup.findAll('div', recursive=False):
                resultName = div.a.text.lower()
                if lowerTitle in resultName and season in resultName:
                    return {
                        'type': 'episode',
                        'episode': episode,
                        'pageURL': self.BASE_URL + div.a['href'],
                        'sConstant': stringConstant,
                        'UA': session.headers['User-Agent'],
                        'cookies': session.cookies.get_dict()
                    }
            return None # No results found.
        except:
            self._logException()
            return None
Exemplo n.º 9
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            session = self._createSession(randomagent())

            lowerTitle = title.lower()
            stringConstant, searchHTML = self._getSearch(lowerTitle, session)

            possibleTitles = set(
                (lowerTitle,) + tuple((alias['title'].lower() for alias in aliases) if aliases else ())
            )
            soup = BeautifulSoup(searchHTML, 'html.parser', parse_only=SoupStrainer('div', recursive=False))
            for div in soup:
                if div.span and (year in div.span.text) and (div.a.text.lower() in possibleTitles):
                    return {
                        'type': 'movie',
                        'pageURL': self.BASE_URL + div.a['href'],
                        'sConstant': stringConstant,
                        'UA': session.headers['User-Agent'],
                        'cookies': session.cookies.get_dict()
                    }
            return None # No results found.
        except:
            self._logException()
            return None
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if not url:
                return sources

            ref = urlparse.urljoin(self.base_link, url)
            url = urlparse.urljoin(
                self.base_link, self.ajax_link % re.findall('-(\w+)$', ref)[0])

            headers = {'Referer': ref, 'User-Agent': client.randomagent()}

            result = client.request(url, headers=headers, post='')
            result = base64.decodestring(result)
            result = json.loads(result).get('playinfo', [])

            if isinstance(result, basestring):
                result = result.replace('embed.html', 'index.m3u8')

                base_url = re.sub('index\.m3u8\?token=[\w\-]+[^/$]*', '',
                                  result)

                r = client.request(result, headers=headers)
                r = [(i[0], i[1]) for i in re.findall(
                    '#EXT-X-STREAM-INF:.*?RESOLUTION=\d+x(\d+)[^\n]+\n([^\n]+)',
                    r, re.DOTALL) if i]
                r = [(source_utils.label_to_quality(i[0]),
                      i[1] + source_utils.append_headers(headers)) for i in r]
                r = [{'quality': i[0], 'url': base_url + i[1]} for i in r]
                for i in r:
                    sources.append({
                        'source': 'CDN',
                        'quality': i['quality'],
                        'language': 'de',
                        'url': i['url'],
                        'direct': True,
                        'debridonly': False
                    })
            elif result:
                result = [i.get('link_mp4') for i in result]
                result = [i for i in result if i]
                for i in result:
                    try:
                        sources.append({
                            'source':
                            'gvideo',
                            'quality':
                            directstream.googletag(i)[0]['quality'],
                            'language':
                            'de',
                            'url':
                            i,
                            'direct':
                            True,
                            'debridonly':
                            False
                        })
                    except:
                        pass

            return sources
        except:
            return