Exemplo n.º 1
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         r = self.scraper.get(url).content
         try:
             match = re.compile('iframe id="odbIframe" src="(.+?)"').findall(r)
             for url in match:
                 host = url.split('//')[1].replace('www.', '')
                 host = host.split('/')[0].lower()
                 sources.append({
                     'source': host,
                     'quality': 'HD',
                     'language': 'en',
                     'url': url,
                     'direct': False,
                     'debridonly': False
                 })
         except Exception:
             failure = traceback.format_exc()
             log_utils.log('ODB - Exception: \n' + str(failure))
             return sources
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('ODB - Exception: \n' + str(failure))
         return sources
     return sources
Exemplo n.º 2
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.query(title)))

            if 'tvshowtitle' in data:
                html = self.scraper.get(url).content

                match = re.compile('class="post-item.+?href="(.+?)" title="(.+?)"', re.DOTALL).findall(html)
                for url, item_name in match:
                    if cleantitle.getsearch(title).lower() in cleantitle.getsearch(item_name).lower():
                        season_url = '%02d' % int(data['season'])
                        episode_url = '%02d' % int(data['episode'])
                        sea_epi = 'S%sE%s' % (season_url, episode_url)

                        result = self.scraper.get(url).content
                        regex = re.compile('href="(.+?)"', re.DOTALL).findall(result)
                        for ep_url in regex:
                            if sea_epi in ep_url:
                                quality, info = source_utils.get_release_quality(url)
                                sources.append({'source': 'CDN', 'quality': quality, 'language': 'en',
                                                'url': ep_url, 'direct': False, 'debridonly': False})
            else:
                html = self.scraper.get(url).content
                match = re.compile('<div class="thumbnail".+?href="(.+?)" title="(.+?)"', re.DOTALL).findall(html)

                for url, item_name in match:
                    if cleantitle.getsearch(title).lower() in cleantitle.getsearch(item_name).lower():
                        quality, info = source_utils.get_release_quality(url)
                        result = self.scraper.get(url).content
                        regex = re.compile('href="/download.php.+?link=(.+?)"', re.DOTALL).findall(result)

                        for link in regex:
                            if 'server=' not in link:
                                try:
                                    link = base64.b64decode(link)
                                except Exception:
                                    pass
                                try:
                                    host = link.split('//')[1].replace('www.', '')
                                    host = host.split('/')[0].lower()
                                except Exception:
                                    pass
                                if not self.filter_host(host):
                                    continue
                                sources.append({'source': host, 'quality': quality, 'language': 'en',
                                                'url': link, 'direct': False, 'debridonly': False})

            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('ExtraMovie - Exception: \n' + str(failure))
            return sources
Exemplo n.º 3
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         url = self.base_link + self.movie_link % imdb
         return url
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('ODB - Exception: \n' + str(failure))
         return
Exemplo n.º 4
0
 def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
     try:
         url = imdb
         return url
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('ODB - Exception: \n' + str(failure))
         return
Exemplo n.º 5
0
 def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
     try:
         url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
         url = urllib.urlencode(url)
         return url
     except:
         log_utils.log('filmsofts.com - Exception: \n' + str(traceback.format_exc()))
         return
Exemplo n.º 6
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         url = {'imdb': imdb, 'title': title, 'year': year}
         url = urllib.urlencode(url)
         return url
     except:
         log_utils.log(
             'Ran into problems making the "url" (dict of things)')
Exemplo n.º 7
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         url = urlparse.parse_qs(url)
         url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
         url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
         url = urllib.urlencode(url)
         return url
     except:
         log_utils.log('filmsofts.com - Exception: \n' + str(traceback.format_exc()))
         return
Exemplo n.º 8
0
 def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
     try:
         aliases.append({'country': 'us', 'title': tvshowtitle})
         url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year, 'aliases': aliases}
         url = urllib.urlencode(url)
         return url
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('ExtraMovie - Exception: \n' + str(failure))
         return
Exemplo n.º 9
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if not url:
             return
         imdb = url
         url = self.base_link + self.tv_link % (imdb, season, episode)
         return url
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('ODB - Exception: \n' + str(failure))
         return
Exemplo n.º 10
0
    def movie(self, imdb, title, localtitle, aliases, year):
        if debrid.status() is False:
            return

        try:
            url = {'imdb': imdb, 'title': title, 'year': year}
            url = urllib.urlencode(url)
            return url
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('TPB - Exception: \n' + str(failure))
            return
Exemplo n.º 11
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         return urllib.urlencode({
             'imdb': imdb,
             'title': title,
             'localtitle': localtitle,
             'year': year
         })
     except:
         failure = traceback.format_exc()
         log_utils.log('Library - Exception: \n' + str(failure))
         return
Exemplo n.º 12
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if url is None:
             return
         url = urlparse.parse_qs(url)
         url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
         url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
         url = urllib.urlencode(url)
         return url
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('ExtraMovie - Exception: \n' + str(failure))
         return
Exemplo n.º 13
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            r = self.scraper.get(url, headers={
                'referer': self.base_link
            }).content
            links = client.parseDOM(r,
                                    'a',
                                    ret='href',
                                    attrs={'target': '.+?'})
            links = [x for y, x in enumerate(links) if x not in links[:y]]

            for i in links:
                try:
                    url = i
                    url = proxy.parse(url)
                    url = urlparse.parse_qs(
                        urlparse.urlparse(url).query)['r'][0]
                    url = url.decode('base64')
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = re.findall(
                        '([\w]+[.][\w]+)$',
                        urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if host not in hostDict:
                        continue
                    host = host.encode('utf-8')
                    sources.append({
                        'source': host,
                        'quality': 'SD',
                        'language': 'en',
                        'url': url,
                        'direct': False,
                        'debridonly': False
                    })
                except:
                    pass

            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('XWatchSeries - Exception: \n' + str(failure))
            return sources
Exemplo n.º 14
0
def resolver(url, debrid):
    try:
        debrid_resolver = [
            resolver for resolver in debrid_resolvers
            if resolver.name == debrid
        ][0]

        debrid_resolver.login()
        _host, _media_id = debrid_resolver.get_host_and_id(url)
        stream_url = debrid_resolver.get_media_url(_host, _media_id)

        return stream_url
    except Exception as e:
        log_utils.log('%s Resolve Failure: %s' % (debrid, e),
                      log_utils.LOGWARNING)
        return None
Exemplo n.º 15
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url is None:
                return

            url = urlparse.parse_qs(url)
            url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
            url.update({
                'premiered': premiered,
                'season': season,
                'episode': episode
            })
            return urllib.urlencode(url)
        except:
            failure = traceback.format_exc()
            log_utils.log('Library - Exception: \n' + str(failure))
            return
Exemplo n.º 16
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url is None:
                return

            r = self.scraper.get(url, headers={
                'referer': self.base_link
            }).content

            r = client.parseDOM(r, 'li', attrs={'itemprop': 'episode'})

            t = cleantitle.get(title)

            r = [(client.parseDOM(i, 'a', ret='href'),
                  client.parseDOM(i, 'span', attrs={'itemprop': 'name'}),
                  re.compile('(\d{4}-\d{2}-\d{2})').findall(i)) for i in r]
            r = [(i[0], i[1][0].split('&nbsp;')[-1], i[2])
                 for i in r if i[1]] + [(i[0], None, i[2])
                                        for i in r if not i[1]]
            r = [(i[0], i[1], i[2][0])
                 for i in r if i[2]] + [(i[0], i[1], None)
                                        for i in r if not i[2]]
            r = [(i[0][0], i[1], i[2]) for i in r if i[0]]

            url = [
                i for i in r if t == cleantitle.get(i[1]) and premiered == i[2]
            ][:1]
            if not url:
                url = [i for i in r if t == cleantitle.get(i[1])]
            if len(url) > 1 or not url:
                url = [i for i in r if premiered == i[2]]
            if len(url) > 1 or not url:
                raise Exception()

            return url[0][0]
        except:
            failure = traceback.format_exc()
            log_utils.log('XWatchSeries - Exception: \n' + str(failure))
            return
Exemplo n.º 17
0
def request(url, close=True, redirect=True, error=False, proxy=None, post=None, headers=None, mobile=False, XHR=False, limit=None, referer=None, cookie=None, compression=True, output='', timeout='30'):
    try:
        if not url:
            return

        handlers = []

        if not proxy == None:
            handlers += [urllib2.ProxyHandler({'http':'%s' % (proxy)}), urllib2.HTTPHandler]
            opener = urllib2.build_opener(*handlers)
            opener = urllib2.install_opener(opener)


        if output == 'cookie' or output == 'extended' or not close == True:
            cookies = cookielib.LWPCookieJar()
            handlers += [urllib2.HTTPHandler(), urllib2.HTTPSHandler(), urllib2.HTTPCookieProcessor(cookies)]
            opener = urllib2.build_opener(*handlers)
            opener = urllib2.install_opener(opener)

        if (2, 7, 8) < sys.version_info < (2, 7, 12):
            try:
                import ssl; ssl_context = ssl.create_default_context()
                ssl_context.check_hostname = False
                ssl_context.verify_mode = ssl.CERT_NONE
                handlers += [urllib2.HTTPSHandler(context=ssl_context)]
                opener = urllib2.build_opener(*handlers)
                opener = urllib2.install_opener(opener)
            except:
                pass

        if url.startswith('//'): url = 'http:' + url

        _headers ={}
        try: _headers.update(headers)
        except: pass
        if 'User-Agent' in _headers:
            pass
        elif not mobile == True:
            #headers['User-Agent'] = agent()
            _headers['User-Agent'] = cache.get(randomagent, 1)
        else:
            _headers['User-Agent'] = 'Apple-iPhone/701.341'
        if 'Referer' in _headers:
            pass
        elif referer is not None:
            _headers['Referer'] = referer
        if not 'Accept-Language' in _headers:
            _headers['Accept-Language'] = 'en-US'
        if 'X-Requested-With' in _headers:
            pass
        elif XHR == True:
            _headers['X-Requested-With'] = 'XMLHttpRequest'
        if 'Cookie' in _headers:
            pass
        elif not cookie == None:
            _headers['Cookie'] = cookie
        if 'Accept-Encoding' in _headers:
            pass
        elif compression and limit is None:
            _headers['Accept-Encoding'] = 'gzip'


        if redirect == False:

            #old implementation
            #class NoRedirection(urllib2.HTTPErrorProcessor):
            #    def http_response(self, request, response): return response

            #opener = urllib2.build_opener(NoRedirection)
            #opener = urllib2.install_opener(opener)

            class NoRedirectHandler(urllib2.HTTPRedirectHandler):
                def http_error_302(self, req, fp, code, msg, headers):
                    infourl = urllib.addinfourl(fp, headers, req.get_full_url())
                    infourl.status = code
                    infourl.code = code
                    return infourl
                http_error_300 = http_error_302
                http_error_301 = http_error_302
                http_error_303 = http_error_302
                http_error_307 = http_error_302

            opener = urllib2.build_opener(NoRedirectHandler())
            urllib2.install_opener(opener)

            try: del _headers['Referer']
            except: pass

        if isinstance(post, dict):
            post = utils.byteify(post)
            post = urllib.urlencode(post)

        url = utils.byteify(url)

        request = urllib2.Request(url, data=post)
        _add_request_header(request, _headers)


        try:
            response = urllib2.urlopen(request, timeout=int(timeout))
        except urllib2.HTTPError as response:

            if response.code == 503:
                cf_result = response.read(5242880)
                try: encoding = response.info().getheader('Content-Encoding')
                except: encoding = None
                if encoding == 'gzip':
                    cf_result = gzip.GzipFile(fileobj=StringIO.StringIO(cf_result)).read()

                if 'cf-browser-verification' in cf_result:

                    netloc = '%s://%s' % (urlparse.urlparse(url).scheme, urlparse.urlparse(url).netloc)
                    
                    if not netloc.endswith('/'): netloc += '/'

                    ua = _headers['User-Agent']

                    cf = cache.get(cfcookie().get, 168, netloc, ua, timeout)

                    _headers['Cookie'] = cf

                    request = urllib2.Request(url, data=post)
                    _add_request_header(request, _headers)

                    response = urllib2.urlopen(request, timeout=int(timeout))
                else:
                    log_utils.log('Request-Error (%s): %s' % (str(response.code), url), log_utils.LOGDEBUG)
                    if error == False: return
            else:
                log_utils.log('Request-Error (%s): %s' % (str(response.code), url), log_utils.LOGDEBUG)
                if error == False: return


        if output == 'cookie':
            try: result = '; '.join(['%s=%s' % (i.name, i.value) for i in cookies])
            except: pass
            try: result = cf
            except: pass
            if close == True: response.close()
            return result

        elif output == 'geturl':
            result = response.geturl()
            if close == True: response.close()
            return result

        elif output == 'headers':
            result = response.headers
            if close == True: response.close()
            return result

        elif output == 'chunk':
            try: content = int(response.headers['Content-Length'])
            except: content = (2049 * 1024)
            if content < (2048 * 1024): return
            result = response.read(16 * 1024)
            if close == True: response.close()
            return result

        elif output == 'file_size':
            try: content = int(response.headers['Content-Length'])
            except: content = '0'
            response.close()
            return content
        
        if limit == '0':
            result = response.read(224 * 1024)
        elif not limit == None:
            result = response.read(int(limit) * 1024)
        else:
            result = response.read(5242880)

        try: encoding = response.info().getheader('Content-Encoding')
        except: encoding = None
        if encoding == 'gzip':
            result = gzip.GzipFile(fileobj=StringIO.StringIO(result)).read()


        if 'sucuri_cloudproxy_js' in result:
            su = sucuri().get(result)

            _headers['Cookie'] = su

            request = urllib2.Request(url, data=post)
            _add_request_header(request, _headers)

            response = urllib2.urlopen(request, timeout=int(timeout))

            if limit == '0':
                result = response.read(224 * 1024)
            elif not limit == None:
                result = response.read(int(limit) * 1024)
            else:
                result = response.read(5242880)

            try: encoding = response.info().getheader('Content-Encoding')
            except: encoding = None
            if encoding == 'gzip':
                result = gzip.GzipFile(fileobj=StringIO.StringIO(result)).read()

        if 'Blazingfast.io' in result and 'xhr.open' in result:
            netloc = '%s://%s' % (urlparse.urlparse(url).scheme, urlparse.urlparse(url).netloc)
            ua = _headers['User-Agent']
            _headers['Cookie'] = cache.get(bfcookie().get, 168, netloc, ua, timeout)

            result = _basic_request(url, headers=_headers, post=post, timeout=timeout, limit=limit)

        if output == 'extended':
            try: response_headers = dict([(item[0].title(), item[1]) for item in response.info().items()])
            except: response_headers = response.headers
            response_code = str(response.code)
            try: cookie = '; '.join(['%s=%s' % (i.name, i.value) for i in cookies])
            except: pass
            try: cookie = cf
            except: pass
            if close == True: response.close()
            return (result, response_code, response_headers, _headers, cookie)
        else:
            if close == True: response.close()
            return result
    except Exception as e:
        log_utils.log('Request-Error: (%s) => %s' % (str(e), url), log_utils.LOGDEBUG)
        return
Exemplo n.º 18
0
def __getTrakt(url, post=None):
    try:
        url = urlparse.urljoin(BASE_URL, url)
        post = json.dumps(post) if post else None
        headers = {
            'Content-Type': 'application/json',
            'trakt-api-key': V2_API_KEY,
            'trakt-api-version': 2
        }

        if getTraktCredentialsInfo():
            headers.update({
                'Authorization':
                'Bearer %s' % control.setting('trakt.token')
            })

        result = client.request(url,
                                post=post,
                                headers=headers,
                                output='extended',
                                error=True)

        resp_code = result[1]
        resp_header = result[2]
        result = result[0]

        if resp_code in [
                '500', '502', '503', '504', '520', '521', '522', '524'
        ]:
            log_utils.log('Temporary Trakt Error: %s' % resp_code,
                          log_utils.LOGWARNING)
            return
        elif resp_code in ['404']:
            log_utils.log('Object Not Found : %s' % resp_code,
                          log_utils.LOGWARNING)
            return


#        elif resp_code in ['429']:
#            log_utils.log('Trakt Rate Limit Reached: %s' % resp_code, log_utils.LOGWARNING)
#            return

        if resp_code not in ['401', '405']:
            return result, resp_header

        oauth = urlparse.urljoin(BASE_URL, '/oauth/token')
        opost = {
            'client_id': V2_API_KEY,
            'client_secret': CLIENT_SECRET,
            'redirect_uri': REDIRECT_URI,
            'grant_type': 'refresh_token',
            'refresh_token': control.setting('trakt.refresh')
        }

        result = client.request(oauth, post=json.dumps(opost), headers=headers)
        result = utils.json_loads_as_str(result)

        token, refresh = result['access_token'], result['refresh_token']

        control.setSetting(id='trakt.token', value=token)
        control.setSetting(id='trakt.refresh', value=refresh)

        headers['Authorization'] = 'Bearer %s' % token

        result = client.request(url,
                                post=post,
                                headers=headers,
                                output='extended',
                                error=True)
        return result[0], result[2]
    except Exception as e:
        log_utils.log('Unknown Trakt Error: %s' % e, log_utils.LOGWARNING)
        pass
Exemplo n.º 19
0
    def sources(self, url, hostDict, hostprDict):
        try:
            hostDict = hostDict + hostprDict

            sources = []
            query_bases = []
            options = []

            if url is None:
                return sources

            if not debrid.status():
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = (data['tvshowtitle']
                     if 'tvshowtitle' in data else data['title'])
            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            # tvshowtitle
            if 'tvshowtitle' in data:
                query_bases.append('%s ' %
                                   (data['tvshowtitle'].replace("-", "")))
                if 'year' in data:
                    query_bases.append('%s %s ' %
                                       (data['tvshowtitle'], data['year']))
                options.append('S%02dE%02d' %
                               (int(data['season']), int(data['episode'])))
                options.append('S%02d' % (int(data['season'])))
            else:
                query_bases.append('%s %s ' % (data['title'], data['year']))
                query_bases.append('%s ' % (data['title']))

            for option in options:
                for query_base in query_bases:
                    q = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '',
                               query_base + option)
                    q = q.replace("  ", " ").replace(" ", "+")
                    url = self.search_link % q
                    url = self.base_link + url + self.search_options
                    html = self.scraper.get(url)
                    if html.status_code == 200:
                        posts = client.parseDOM(html.content,
                                                "div",
                                                attrs={"class": "dtitle"})
                        for post in posts:
                            url = client.parseDOM(post, "a", ret='href')
                            if len(url) > 0:
                                html = self.scraper.get(url[0])
                                if html.status_code == 200:
                                    quotes = client.parseDOM(
                                        html.content,
                                        "div",
                                        attrs={"class": "quote"})
                                    for quote in quotes:
                                        hrefs = client.parseDOM(quote,
                                                                "a",
                                                                ret='href')
                                        if not hrefs:
                                            continue
                                        for href in hrefs:
                                            quality = source_utils.check_sd_url(
                                                href)
                                            href = href.encode('utf-8')
                                            valid, host = source_utils.is_host_valid(
                                                href, hostDict)
                                            if any(x in href for x in
                                                   ['.rar', '.zip', '.iso']):
                                                continue
                                            if not valid:
                                                continue
                                            if hdlr in href.upper(
                                            ) and cleantitle.get(
                                                    title) in cleantitle.get(
                                                        href):
                                                sources.append({
                                                    'source':
                                                    host,
                                                    'quality':
                                                    quality,
                                                    'language':
                                                    'en',
                                                    'url':
                                                    href,
                                                    'direct':
                                                    False,
                                                    'debridonly':
                                                    False
                                                })
                if len(sources) > 0:
                    return sources
            return sources
        except:
            log_utils.log('filmrls.com - Exception: \n' +
                          str(traceback.format_exc()))
            return sources
Exemplo n.º 20
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url == None or len(
                    url
            ) == 0:  # if no link returned in movie and tvshow searches, nothing to do here, return out.
                log_utils.log('inif?')
                return sources

            # Grab title and year (cant use imdb code here)
            url = urlparse.parse_qs(url)
            title = url['title'][0]
            year = url['year'][0]

            # Create search link
            searchlink = self.search_link = self.search_link + title + ' ' + year
            url = urlparse.urljoin(self.base_link, searchlink)

            html = self.scraper.get(url).content  # Get the HTML for the page
            soup = BeautifulSoup(html, "html.parser")
            # Find all search results and add to array
            results = soup.findAll("div", {"class": "result-item"})
            result_links = []
            for result in results:
                result_links.append(result.find("a", href=True)['href'])

            # Go over search results and find their sources
            for result_link in result_links:
                html = self.scraper.get(result_link).content
                soup = BeautifulSoup(html, "html.parser")
                javascripts = soup.findAll("script",
                                           {"type": "text/javascript"})
                # Lets keep going until we find the one we need
                ids_b64s = []
                for javascript in javascripts:
                    javascript = str(javascript)
                    if "var Player" in javascript and "LoadPlayer" in javascript:
                        # This is the right script
                        # Get the jwplayer-id
                        jw_id = 'jwplayer-' + re.search(
                            'jwplayer-(\d+)', javascript).groups(0)[0]
                        # Get weird b64 string
                        b64_string = re.search('(?<=jwplayer)(.*)(?="\);)',
                                               javascript).groups(0)[0]
                        # Parse into just the b64
                        b64_string = b64_string.split('","')[1]
                        ids_b64s.append([jw_id, b64_string])
                        break
            # Go get the video links
            for id_b64 in ids_b64s:
                the_id = id_b64[0]
                the_b64 = id_b64[1]
                post = {'id': the_id, 'data': b64_string}
                html = self.scraper.post(urlparse.urljoin(
                    self.base_link, '/wp-content/plugins/apiplayer/load.php'),
                                         data=post).content
                soup = BeautifulSoup(html, 'html.parser')
                javascripts = soup.findAll("script",
                                           {"type": "text/javascript"})
                links_qual = []
                for javascript in javascripts:
                    javascript = str(javascript)
                    if ").setup({" in javascript:
                        # This script contains the stuff
                        files = re.search('(?<=sources: \[)(.*)(?=])',
                                          javascript).groups()[0]
                        files = "[" + files + "]"
                        files = json.loads(files)
                        for f in files:
                            quality = f['label']
                            link = f['file']
                            links_qual.append([link, quality])

            for l_q in links_qual:
                link = l_q[0]
                quality = l_q[1]
                host = link.split('//')[1].replace('www.', '').split('/')[0]
                info = ''
                sources.append({
                    'source': host,
                    'quality': quality,
                    'language': 'en',
                    'url': link,
                    'info': info,
                    'direct': True,
                    'debridonly': False
                })
            return sources
        except Exception as e:
            log_utils.log('EXCEPTION MSG: ' + str(e))
            return sources
Exemplo n.º 21
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            content_type = 'episode' if 'tvshowtitle' in data else 'movie'

            years = (data['year'], str(int(data['year']) + 1),
                     str(int(data['year']) - 1))

            if content_type == 'movie':
                title = cleantitle.get(data['title'])
                localtitle = cleantitle.get(data['localtitle'])
                ids = [data['imdb']]

                r = control.jsonrpc(
                    '{"jsonrpc": "2.0", "method": "VideoLibrary.GetMovies", "params": {"filter":{"or": [{"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}]}, "properties": ["imdbnumber", "title", "originaltitle", "file"]}, "id": 1}'
                    % years)
                r = unicode(r, 'utf-8', errors='ignore')
                r = json.loads(r)['result']['movies']

                r = [
                    i for i in r if str(i['imdbnumber']) in ids or title in [
                        cleantitle.get(i['title'].encode('utf-8')),
                        cleantitle.get(i['originaltitle'].encode('utf-8'))
                    ]
                ]
                r = [
                    i for i in r
                    if not i['file'].encode('utf-8').endswith('.strm')
                ][0]

                r = control.jsonrpc(
                    '{"jsonrpc": "2.0", "method": "VideoLibrary.GetMovieDetails", "params": {"properties": ["streamdetails", "file"], "movieid": %s }, "id": 1}'
                    % str(r['movieid']))
                r = unicode(r, 'utf-8', errors='ignore')
                r = json.loads(r)['result']['moviedetails']
            elif content_type == 'episode':
                title = cleantitle.get(data['tvshowtitle'])
                localtitle = cleantitle.get(data['localtvshowtitle'])
                season, episode = data['season'], data['episode']
                ids = [data['imdb'], data['tvdb']]

                r = control.jsonrpc(
                    '{"jsonrpc": "2.0", "method": "VideoLibrary.GetTVShows", "params": {"filter":{"or": [{"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}]}, "properties": ["imdbnumber", "title"]}, "id": 1}'
                    % years)
                r = unicode(r, 'utf-8', errors='ignore')
                r = json.loads(r)['result']['tvshows']

                r = [
                    i for i in r if str(i['imdbnumber']) in ids or title in [
                        cleantitle.get(i['title'].encode('utf-8')),
                        cleantitle.get(i['originaltitle'].encode('utf-8'))
                    ]
                ][0]

                r = control.jsonrpc(
                    '{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodes", "params": {"filter":{"and": [{"field": "season", "operator": "is", "value": "%s"}, {"field": "episode", "operator": "is", "value": "%s"}]}, "properties": ["file"], "tvshowid": %s }, "id": 1}'
                    % (str(season), str(episode), str(r['tvshowid'])))
                r = unicode(r, 'utf-8', errors='ignore')
                r = json.loads(r)['result']['episodes']

                r = [
                    i for i in r
                    if not i['file'].encode('utf-8').endswith('.strm')
                ][0]

                r = control.jsonrpc(
                    '{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodeDetails", "params": {"properties": ["streamdetails", "file"], "episodeid": %s }, "id": 1}'
                    % str(r['episodeid']))
                r = unicode(r, 'utf-8', errors='ignore')
                r = json.loads(r)['result']['episodedetails']

            url = r['file'].encode('utf-8')

            try:
                quality = int(r['streamdetails']['video'][0]['width'])
            except:
                quality = -1

            quality = source_utils.label_to_quality(quality)

            info = []
            try:
                f = control.openFile(url)
                s = f.size()
                f.close()
                s = '%.2f GB' % (float(s) / 1024 / 1024 / 1024)
                info.append(s)
            except:
                pass
            try:
                e = urlparse.urlparse(url).path.split('.')[-1].upper()
                info.append(e)
            except:
                pass
            info = ' | '.join(info)
            info = info.encode('utf-8')

            sources.append({
                'source': '0',
                'quality': quality,
                'language': 'en',
                'url': url,
                'info': info,
                'local': True,
                'direct': True,
                'debridonly': False
            })

            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('Library - Exception: \n' + str(failure))
            return sources