Пример #1
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            t = cleantitle.get(data['tvshowtitle'])
            title = data['tvshowtitle']
            season = '%01d' % int(season)
            episode = '%01d' % int(episode)
            year = re.findall('(\d{4})', premiered)[0]
            years = [str(year), str(int(year) + 1), str(int(year) - 1)]

            r = cache.get(self.ymovies_info_season, 720, title, season)
            r = [(i[0],
                  re.findall('(.+?)\s+(?:-|)\s+season\s+(\d+)$', i[1].lower()))
                 for i in r]
            r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if i[1]]
            r = [
                i[0] for i in r
                if t == cleantitle.get(i[1]) and season == '%01d' % int(i[2])
            ][:2]
            r = [(i, re.findall('(\d+)', i)[-1]) for i in r]

            for i in r:
                try:
                    y, q = cache.get(self.ymovies_info, 9000, i[1])
                    if not y == year: raise Exception()
                    return urlparse.urlparse(
                        i[0]).path + '?episode=%01d' % int(episode)
                except:
                    pass
        except:
            return
Пример #2
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            t = cleantitle.get(data['tvshowtitle'])
            year = re.findall('(\d{4})', premiered)[0]
            years = [str(year), str(int(year) + 1), str(int(year) - 1)]
            season = '%01d' % int(season)
            episode = '%01d' % int(episode)

            q = self.search_link_2 % (urllib.quote_plus(
                '%s - Season %s' % (data['tvshowtitle'], season)))
            q = urlparse.urljoin(self.base_link, q)

            h = {'X-Requested-With': 'XMLHttpRequest'}
            u = urlparse.urljoin(self.base_link, self.search_link)
            p = urllib.urlencode(
                {'keyword': '%s - Season %s' % (data['tvshowtitle'], season)})

            r = self.request(u, headers=h, post=p)[0]

            try:
                r = json.loads(r)
            except:
                r = None

            if r == None:
                r = self.request(q, headers=None, post=None)[0]
                r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
                r = [(client.parseDOM(i, 'a', ret='href'),
                      client.parseDOM(i, 'a', ret='title')) for i in r]
                r = [(i[0][0], i[1][0]) for i in r if i[0] and i[1]]
            else:
                r = r['content']
                r = zip(
                    client.parseDOM(r,
                                    'a',
                                    ret='href',
                                    attrs={'class': 'ss-title'}),
                    client.parseDOM(r, 'a', attrs={'class': 'ss-title'}))

            r = [(i[0], re.findall('(.+?) - season (\d+)$', i[1].lower()))
                 for i in r]
            r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
            r = [i for i in r if t == cleantitle.get(i[1])]
            r = [i[0] for i in r if season == '%01d' % int(i[2])][:2]
            r = [(i, re.findall('(\d+)', i)[-1]) for i in r]

            for i in r:
                try:
                    y, q = cache.get(self.onemovies_info, 9000, i[1])
                    if not y in years: raise Exception()
                    return urlparse.urlparse(
                        i[0]).path + '?episode=%01d' % int(episode)
                except:
                    pass
        except:
            return
Пример #3
0
    def movie(self, imdb, title, year):
        try:
            t = cleantitle.get(title)

            q = '/search/%s.html' % (urllib.quote_plus(
                cleantitle.query(title)))
            q = urlparse.urljoin(self.base_link, q)

            for i in range(3):
                r = client.request(q)
                if not r == None: break

            r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
            r = [(client.parseDOM(i, 'a', ret='href'),
                  client.parseDOM(i, 'a', ret='title')) for i in r]
            r = [(i[0][0], i[1][0]) for i in r if i[0] and i[1]]
            r = [i[0] for i in r if t == cleantitle.get(i[1])][:2]
            r = [(i, re.findall('(\d+)', i)[-1]) for i in r]

            for i in r:
                try:
                    y, q = cache.get(self.ymovies_info, 9000, i[1])
                    if not y == year: raise Exception()
                    return urlparse.urlparse(i[0]).path
                except:
                    pass
        except:
            return
Пример #4
0
    def tvshow(self, imdb, tvdb, tvshowtitle, year):
        try:
            if (self.user == '' or self.password == ''): raise Exception()

            url = cache.get(self.ororo_tvcache, 120, self.user)
            url = [i[0] for i in url if imdb == i[1]][0]
            url = self.show_link % url

            return url
        except:
            return
Пример #5
0
    def movie(self, imdb, title, year):
        try:
            if (self.user == '' or self.password == ''): raise Exception()

            url = cache.get(self.ororo_moviecache, 60, self.user)
            url = [i[0] for i in url if imdb == i[1]][0]
            url = self.movie_link % url

            return url
        except:
            return
Пример #6
0
    def tvshow(self, imdb, tvdb, tvshowtitle, year):
        try:
            result = cache.get(self.dizigold_tvcache, 120)

            tvshowtitle = cleantitle.get(tvshowtitle)

            result = [i[0] for i in result if tvshowtitle == i[1]][0]

            url = urlparse.urljoin(self.base_link, result)
            url = urlparse.urlparse(url).path
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Пример #7
0
    def movie(self, imdb, title, year):
        try:
            t = cleantitle.get(title)

            q = self.search_link_2 % (urllib.quote_plus(
                cleantitle.query(title)))
            q = urlparse.urljoin(self.base_link, q)

            h = {'X-Requested-With': 'XMLHttpRequest'}
            u = urlparse.urljoin(self.base_link, self.search_link)
            p = urllib.urlencode({'keyword': title})

            r = self.request(u, headers=h, post=p)[0]

            try:
                r = json.loads(r)
            except:
                r = None

            if r == None:
                r = self.request(q, headers=None, post=None)[0]
                r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
                r = [(client.parseDOM(i, 'a', ret='href'),
                      client.parseDOM(i, 'a', ret='title')) for i in r]
                r = [(i[0][0], i[1][0]) for i in r if i[0] and i[1]]
            else:
                r = r['content']
                r = zip(
                    client.parseDOM(r,
                                    'a',
                                    ret='href',
                                    attrs={'class': 'ss-title'}),
                    client.parseDOM(r, 'a', attrs={'class': 'ss-title'}))

            r = [i[0] for i in r
                 if cleantitle.get(t) == cleantitle.get(i[1])][:2]
            r = [(i, re.findall('(\d+)', i)[-1]) for i in r]

            for i in r:
                try:
                    y, q = cache.get(self.onemovies_info, 9000, i[1])
                    if not y == year: raise Exception()
                    return urlparse.urlparse(i[0]).path
                except:
                    pass
        except:
            return
Пример #8
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            year = re.findall('(\d{4})', premiered)[0]
            season = '%01d' % int(season)
            episode = '%01d' % int(episode)
            tvshowtitle = '%s %s: Season %s' % (data['tvshowtitle'], year,
                                                season)

            url = cache.get(self.pidtv_tvcache, 120, tvshowtitle)

            if url == None: raise Exception()

            url += '?episode=%01d' % int(episode)
            url = url.encode('utf-8')
            return url
        except:
            return
Пример #9
0
    def request(self, endpoint, query = None):
        try:
            # Encode the queries, if there is any...
            if (query != None):
                query = '?' + urllib.urlencode(query)
            else:
                query = ''

            # Make the request
            request = self.api_url % (endpoint, query)

            # Send the request and get the response
            # Get the results from cache if available
            response = cache.get(client.request, 24, request)

            # Retrun the result as a dictionary
            return json.loads(response)
        except:
            pass

        return {}
Пример #10
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if url.startswith('http'): self.base_link = url

            url = urlparse.urljoin(self.base_link, url)
            url = referer = url.replace('/watching.html', '')

            try:
                url, episode = re.findall('(.+?)\?episode=(\d*)$', url)[0]
            except:
                episode = None

            vid_id = re.findall('-(\d+)', url)[-1]

            quality = cache.get(self.onemovies_info, 9000, vid_id)[1].lower()
            if quality == 'cam' or quality == 'ts': quality = 'CAM'
            elif quality == 'hd': quality = 'HD'
            else: quality = 'SD'

            try:
                headers = {
                    'X-Requested-With': 'XMLHttpRequest',
                    'Referer': url
                }

                u = urlparse.urljoin(self.base_link, self.server_link % vid_id)

                r = self.request(u, headers=headers, post=None)[0]

                r = client.parseDOM(r, 'div', attrs={'class': 'les-content'})
                r = zip(client.parseDOM(r, 'a', ret='onclick'),
                        client.parseDOM(r, 'a'))
                r = [(i[0], ''.join(re.findall('(\d+)', i[1])[:1])) for i in r]

                if not episode == None:
                    r = [i[0] for i in r if '%01d' % int(i[1]) == episode]
                else:
                    r = [i[0] for i in r]

                r = [re.findall('(\d+),(\d+)', i) for i in r]
                r = [i[0][:2] for i in r if len(i) > 0]

                links = []

                links += [{
                    'source': 'gvideo',
                    'url': self.direct_link + i[1],
                    'direct': True
                } for i in r if 2 <= int(i[0]) <= 11]

                links += [{
                    'source': 'openload.co',
                    'url': self.embed_link + i[1],
                    'direct': False
                } for i in r if i[0] == '14']

                links += [{
                    'source': 'videowood.tv',
                    'url': self.embed_link + i[1],
                    'direct': False
                } for i in r if i[0] == '12']

                head = '|' + urllib.urlencode(headers)

                for i in links:
                    sources.append({
                        'source':
                        i['source'],
                        'quality':
                        quality,
                        'language':
                        'en',
                        'url':
                        urlparse.urljoin(self.base_link, i['url']) + head,
                        'direct':
                        i['direct'],
                        'debridonly':
                        False
                    })
            except:
                pass

            return sources
        except:
            return sources
Пример #11
0
def cachesyncTVShows(timeout=0):
    indicators = cache.get(syncTVShows,
                           timeout,
                           control.setting('trakt.user').strip(),
                           table='trakt')
    return indicators
Пример #12
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() == False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])



            try:
                if not 'tvshowtitle' in data: raise Exception()

                links = []

                f = ['S%02dE%02d' % (int(data['season']), int(data['episode']))]
                t = data['tvshowtitle']

                q = base64.b64decode(self.search_link) + urllib.quote_plus('%s %s' % (t, f[0]))
                q = urlparse.urljoin(self.base_link, q)

                result = client.request(q)
                result = json.loads(result)
            except:
                links = result = []

            for i in result:
                try:
                    if not cleantitle.get(t) == cleantitle.get(i['showName']): raise Exception()

                    y = i['release']
                    y = re.compile('[\.|\(|\[|\s](\d{4}|S\d*E\d*)[\.|\)|\]|\s]').findall(y)[-1]
                    y = y.upper()
                    if not any(x == y for x in f): raise Exception()

                    quality = i['quality']

                    size = i['size']
                    size = float(size)/1024
                    size = '%.2f GB' % size

                    if 'X265' in quality: info = '%s | HEVC' % size
                    else: info = size

                    if '1080P' in quality: quality = '1080p'
                    elif quality in ['720P', 'WEBDL']: quality = 'HD'
                    else: quality = 'SD'

                    url = i['links']
                    for x in url.keys(): links.append({'url': url[x], 'quality': quality, 'info': info})
                except:
                    pass

            for i in links:
                try:
                    url = i['url']
                    if len(url) > 1: raise Exception()
                    url = url[0].encode('utf-8')

                    host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if not host in hostprDict: raise Exception()
                    host = host.encode('utf-8')

                    sources.append({'source': host, 'quality': i['quality'], 'language': 'en', 'url': url, 'info': i['info'], 'direct': False, 'debridonly': True})
                except:
                    pass



            try:
                hostDict2 = [(i.rsplit('.', 1)[0], i) for i in hostDict]

                q = ('/tv/a-z/%s', data['tvshowtitle']) if 'tvshowtitle' in data else ('/movies/a-z/%s', data['title'])
                q = q[0] % re.sub('^THE\s+|^A\s+', '', q[1].strip().upper())[0]

                url = cache.get(self.directdl_cache, 120, q)
                url = [i[0] for i in url if data['imdb'] == i[1]][0]
                url = urlparse.urljoin(base64.b64decode(self.b_link), url)

                try: v = urlparse.parse_qs(urlparse.urlparse(url).query)['v'][0]
                except: v = None

                if v == None:
                    result = self.request(url)
                    url = re.compile('(/ip[.]php.+?>)%01dx%02d' % (int(data['season']), int(data['episode']))).findall(result)[0]
                    url = re.compile('(/ip[.]php.+?)>').findall(url)[-1]
                    url = urlparse.urljoin(base64.b64decode(self.b_link), url)

                url = urlparse.parse_qs(urlparse.urlparse(url).query)['v'][0]

                u = base64.b64decode(self.u_link) % url ; r = base64.b64decode(self.r_link) % url
                j = base64.b64decode(self.j_link) ; p = base64.b64decode(self.p_link)

                result = self.request(u, referer=r)

                secret = re.compile('lastChild\.value="([^"]+)"(?:\s*\+\s*"([^"]+))?').findall(result)[0]
                secret = ''.join(secret)

                t = re.compile('"&t=([^"]+)').findall(result)[0]

                s_start = re.compile('(?:\s+|,)s\s*=(\d+)').findall(result)[0]
                m_start = re.compile('(?:\s+|,)m\s*=(\d+)').findall(result)[0]

                img = re.compile('<iframe[^>]*src="([^"]+)').findall(result)
                img = img[0] if len(img) > 0 else '0'
                img = urllib.unquote(img)

                result = client.parseDOM(result, 'div', attrs = {'class': 'ripdiv'})
                result = [(re.compile('<b>(.*?)</b>').findall(i), i) for i in result]
                result = [(i[0][0], i[1].split('<p>')) for i in result if len(i[0]) > 0]
                result = [[(i[0], x) for x in i[1]] for i in result]
                result = sum(result, [])
            except:
                result = []

            for i in result:
                try:
                    quality = i[0]
                    if any(x in quality for x in ['1080p', '720p', 'HD']): quality = 'HD'
                    else: quality = 'SD'

                    host = client.parseDOM(i[1], 'a')[-1]
                    host = re.sub('\s|<.+?>|</.+?>|.+?#\d*:', '', host)
                    host = host.strip().rsplit('.', 1)[0].lower()
                    host = [x[1] for x in hostDict2 if host == x[0]][0]
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    s = int(s_start) + random.randint(3, 1000)
                    m = int(m_start) + random.randint(21, 1000)
                    id = client.parseDOM(i[1], 'a', ret='onclick')[-1]
                    id = re.compile('[(](.+?)[)]').findall(id)[0]
                    url = j % (id, t) + '|' + p % (id, s, m, secret, t)
                    url += '|%s' % urllib.urlencode({'Referer': u, 'Img': img})
                    url = url.encode('utf-8')

                    sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': True})
                except:
                    pass

            return sources
        except:
            return sources
Пример #13
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() == False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle']
            season = '%01d' % int(data['season'])
            episode = '%02d' % int(data['episode'])

            r = cache.get(self.ddlseries_tvcache, 120)

            r = [(i[0], i[3]) for i in r
                 if cleantitle.get(title) == cleantitle.get(i[1])
                 and season == i[2]]

            links = []

            for url, quality in r:
                try:
                    link = client.request(url)
                    vidlinks = client.parseDOM(link,
                                               'span',
                                               attrs={'class': 'overtr'})[0]
                    match = re.compile('href="([^"]+)[^>]*>\s*Episode\s+(\d+)<'
                                       ).findall(vidlinks)
                    match = [(i[0], quality) for i in match if episode == i[1]]
                    links += match
                except:
                    pass

            for url, quality in links:
                try:
                    if "protect-links" in url:
                        redirect = client.request(url)
                        url = re.findall('<a href="(.*?)" target="_blank">',
                                         redirect)
                        url = url[0]

                    host = re.findall(
                        '([\w]+[.][\w]+)$',
                        urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if not host in hostprDict: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'direct': False,
                        'debridonly': True
                    })
                except:
                    pass

            return sources
        except:
            return sources
Пример #14
0
def request(url,
            close=True,
            redirect=True,
            error=False,
            proxy=None,
            post=None,
            headers=None,
            mobile=False,
            limit=None,
            referer=None,
            cookie=None,
            output='',
            timeout='30'):
    try:
        handlers = []

        if not proxy == None:
            handlers += [
                urllib2.ProxyHandler({'http': '%s' % (proxy)}),
                urllib2.HTTPHandler
            ]
            opener = urllib2.build_opener(*handlers)
            opener = urllib2.install_opener(opener)

        if output == 'cookie' or output == 'extended' or not close == True:
            cookies = cookielib.LWPCookieJar()
            handlers += [
                urllib2.HTTPHandler(),
                urllib2.HTTPSHandler(),
                urllib2.HTTPCookieProcessor(cookies)
            ]
            opener = urllib2.build_opener(*handlers)
            opener = urllib2.install_opener(opener)

        try:
            if sys.version_info < (2, 7, 9): raise Exception()
            import ssl
            ssl_context = ssl.create_default_context()
            ssl_context.check_hostname = False
            ssl_context.verify_mode = ssl.CERT_NONE
            handlers += [urllib2.HTTPSHandler(context=ssl_context)]
            opener = urllib2.build_opener(*handlers)
            opener = urllib2.install_opener(opener)
        except:
            pass

        try:
            headers.update(headers)
        except:
            headers = {}
        if 'User-Agent' in headers:
            pass
        elif not mobile == True:
            #headers['User-Agent'] = agent()
            headers['User-Agent'] = cache.get(randomagent, 1)
        else:
            headers['User-Agent'] = 'Apple-iPhone/701.341'
        if 'Referer' in headers:
            pass
        elif referer == None:
            headers['Referer'] = '%s://%s/' % (urlparse.urlparse(url).scheme,
                                               urlparse.urlparse(url).netloc)
        else:
            headers['Referer'] = referer
        if not 'Accept-Language' in headers:
            headers['Accept-Language'] = 'en-US'
        if 'Cookie' in headers:
            pass
        elif not cookie == None:
            headers['Cookie'] = cookie

        if redirect == False:

            class NoRedirection(urllib2.HTTPErrorProcessor):
                def http_response(self, request, response):
                    return response

            opener = urllib2.build_opener(NoRedirection)
            opener = urllib2.install_opener(opener)

            try:
                del headers['Referer']
            except:
                pass

        request = urllib2.Request(url, data=post, headers=headers)

        try:
            response = urllib2.urlopen(request, timeout=int(timeout))
        except urllib2.HTTPError as response:

            if response.code == 503:
                if 'cf-browser-verification' in response.read(5242880):

                    netloc = '%s://%s' % (urlparse.urlparse(url).scheme,
                                          urlparse.urlparse(url).netloc)

                    ua = headers['User-Agent']

                    cf = cache.get(cfcookie().get, 168, netloc, ua, timeout)

                    headers['Cookie'] = cf

                    request = urllib2.Request(url, data=post, headers=headers)

                    response = urllib2.urlopen(request, timeout=int(timeout))

                elif error == False:
                    return

            elif error == False:
                return

        if output == 'cookie':
            try:
                result = '; '.join(
                    ['%s=%s' % (i.name, i.value) for i in cookies])
            except:
                pass
            try:
                result = cf
            except:
                pass
            if close == True: response.close()
            return result

        elif output == 'geturl':
            result = response.geturl()
            if close == True: response.close()
            return result

        elif output == 'headers':
            result = response.headers
            if close == True: response.close()
            return result

        elif output == 'chunk':
            try:
                content = int(response.headers['Content-Length'])
            except:
                content = (2049 * 1024)
            if content < (2048 * 1024): return
            result = response.read(16 * 1024)
            if close == True: response.close()
            return result

        if limit == '0':
            result = response.read(224 * 1024)
        elif not limit == None:
            result = response.read(int(limit) * 1024)
        else:
            result = response.read(5242880)

        if 'sucuri_cloudproxy_js' in result:
            su = sucuri().get(result)

            headers['Cookie'] = su

            request = urllib2.Request(url, data=post, headers=headers)

            response = urllib2.urlopen(request, timeout=int(timeout))

            if limit == '0':
                result = response.read(224 * 1024)
            elif not limit == None:
                result = response.read(int(limit) * 1024)
            else:
                result = response.read(5242880)

        if output == 'extended':
            response_headers = response.headers
            response_code = str(response.code)
            try:
                cookie = '; '.join(
                    ['%s=%s' % (i.name, i.value) for i in cookies])
            except:
                pass
            try:
                cookie = cf
            except:
                pass
            if close == True: response.close()
            return (result, response_code, response_headers, headers, cookie)
        else:
            if close == True: response.close()
            return result
    except:
        return