Ejemplo n.º 1
0
    def get_movie(self, imdb, title, year):
        try:
            query = self.search_link % (urllib.quote_plus(title))
            query = urlparse.urljoin(self.base_link, query)

            result = client.source(query)
            result = client.parseDOM(result, "div", attrs = { "class": "home_post_cont.+?" })

            title = cleantitle.movie(title)
            years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]

            result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "img", ret="title")[0]) for i in result]
            result = [(i[0], client.replaceHTMLCodes(i[1])) for i in result]
            result = [(i[0], client.parseDOM(i[1], "a")) for i in result]
            result = [(i[0], i[1][0]) for i in result if len(i[1]) > 0]
            result = [i for i in result if title == cleantitle.movie(i[1])]
            result = [i[0] for i in result if any(x in i[1] for x in years)][0]

            try: url = re.compile('//.+?(/.+)').findall(result)[0]
            except: url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Ejemplo n.º 2
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None:
                return sources

            result = ""
            links = [self.link_1, self.link_2, self.link_3]
            for base_link in links:
                result = client.source(urlparse.urljoin(base_link, url), headers=self.headers)
                if "link_name" in str(result):
                    break

            result = result.decode("iso-8859-1").encode("utf-8")
            result = result.replace("\n", "")

            quality = re.compile(">Links - Quality(.+?)<").findall(result)[0]
            quality = quality.strip()
            if quality == "CAM" or quality == "TS":
                quality = "CAM"
            elif quality == "SCREENER":
                quality = "SCR"
            else:
                quality = "SD"

            links = client.parseDOM(result, "div", attrs={"id": "links"})[0]
            links = client.parseDOM(links, "ul")

            for i in links:
                try:
                    host = client.parseDOM(i, "li", attrs={"id": "link_name"})[-1]
                    try:
                        host = client.parseDOM(host, "span", attrs={"class": "google-src-text"})[0]
                    except:
                        pass
                    host = host.strip().lower()
                    if not host in hostDict:
                        raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode("utf-8")

                    url = client.parseDOM(i, "a", ret="href")[0]
                    url = client.replaceHTMLCodes(url)
                    try:
                        url = urlparse.parse_qs(urlparse.urlparse(url).query)["u"][0]
                    except:
                        pass
                    if not url.startswith("http"):
                        url = urlparse.urljoin(self.base_link, url)
                    url = url.encode("utf-8")

                    sources.append({"source": host, "quality": quality, "provider": "Movie25", "url": url})
                except:
                    pass

            return sources
        except:
            return sources
Ejemplo n.º 3
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)
            result = result.decode('iso-8859-1').encode('utf-8')
            links = client.parseDOM(result, "div", attrs={"class": "lang"})[0]
            links = client.parseDOM(links,
                                    "div",
                                    attrs={"class": "movie_link.+?"})

            fmt = [client.parseDOM(i, "h4")[0] for i in links]
            fmt = [re.findall('\w+', i.lower()) for i in fmt]
            fmt = sum(fmt, [])

            if any(x in ['dvdscr', 'r5', 'r6'] for x in fmt): quality = 'SCR'
            elif any(x in [
                    'camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts',
                    'cam', 'ts'
            ] for x in fmt):
                quality = 'CAM'
            else:
                quality = 'SD'

            for i in links:
                try:
                    host = client.parseDOM(i, "a", attrs={"target": ".+?"})[0]
                    host = host.split('.', 1)[0]
                    host = host.strip().lower()
                    if not host in hostDict: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    url = client.parseDOM(i, "a", ret="href")[0]
                    url = client.replaceHTMLCodes(url)
                    url = urlparse.urljoin(self.base_link, url)
                    url = url.encode('utf-8')

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'provider': 'Vidics',
                        'url': url
                    })
                except:
                    pass

            return sources
        except:
            return sources
Ejemplo n.º 4
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            content = re.compile('/\d{4}/\d{2}/').findall(url)

            if len(content) > 0: url = urlparse.urljoin(self.tvbase_link, url)
            else: url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)
            links = client.parseDOM(result,
                                    "td",
                                    attrs={"class": "even tdhost"})
            links += client.parseDOM(result,
                                     "td",
                                     attrs={"class": "odd tdhost"})

            q = re.compile('<label>Quality</label>(.+?)<').findall(result)
            if len(q) > 0: q = q[0]
            else: q = ''

            if q.endswith(('CAM', 'TS')): quality = 'CAM'
            else: quality = 'SD'

            for i in links:
                try:
                    host = client.parseDOM(i, "a")[0]
                    host = host.split('<', 1)[0]
                    host = host.rsplit('.', 1)[0].split('.', 1)[-1]
                    host = host.strip().lower()
                    if not host in hostDict: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    url = client.parseDOM(i, "a", ret="href")[0]
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'provider': 'WSO',
                        'url': url
                    })
                except:
                    pass

            return sources
        except:
            return sources
Ejemplo n.º 5
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)
            result = client.parseDOM(result, "div", attrs={"class":
                                                           "links"})[0]
            result = client.parseDOM(result, "tr")
            result = [(client.parseDOM(i, "td", attrs={"class":
                                                       "quality_td"})[0],
                       client.parseDOM(i, "a", ret="href")[-1])
                      for i in result]

            ts_quality = ['CAM', 'TS']
            links = [
                i for i in result if not any(x in i[0] for x in ts_quality)
            ]
            if len(links) == 0: links = result

            for i in links:
                try:
                    url = i[1]
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = re.sub(
                        '.+?/exit/\d*-|[.].+?[.]html|http://(|www[.])|/.+|[.].+$',
                        '', i[1])
                    host = host.strip().lower()
                    if not host in hostDict: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    if any(x in i[0] for x in ts_quality): quality = 'CAM'
                    else: quality = 'SD'

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'provider': 'Moviestorm',
                        'url': url
                    })
                except:
                    pass

            return sources
        except:
            return sources
Ejemplo n.º 6
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            result = ''
            links = [self.link_1, self.link_2, self.link_3]
            for base_link in links:
                result = client.source(urlparse.urljoin(base_link, url), headers=self.headers)
                if 'original-title' in str(result): break

            links = client.parseDOM(result, "tr", attrs = { "id": "pt.+?" })

            for i in links:
                try:
                    lang = re.compile('<img src=[\'|\"|\s|\<]*(.+?)[\'|\"|\s|\>]').findall(i)[1]
                    if not 'English' in lang: raise Exception()

                    host = re.compile('<img src=[\'|\"|\s|\<]*(.+?)[\'|\"|\s|\>]').findall(i)[0]
                    host = host.rsplit('.', 1)[0].rsplit('.', 1)[0].rsplit('/', 1)[-1]
                    host = host.strip().lower()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    if '>Cam<' in i or '>TS<' in i: quality = 'CAM'
                    elif '>HD<' in i and host in hosthdDict: quality = 'HD'
                    else: quality = 'SD'

                    if quality == 'HD' and not host in hosthdDict: raise Exception()
                    if quality == 'SD' and not host in hostDict: raise Exception()

                    if '>3D<' in i: info = '3D'
                    else: info = ''

                    url = re.compile('href=[\'|\"|\s|\<]*(.+?)[\'|\"|\s|\>]').findall(i)[0]
                    url = client.replaceHTMLCodes(url)
                    try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
                    except: pass
                    if url.startswith('http'): url = urlparse.urlparse(url).path
                    if not url.startswith('http'): url = urlparse.urljoin(self.base_link, url)
                    url = url.encode('utf-8')

                    sources.append({'source': host, 'quality': quality, 'provider': 'Iwatchonline', 'url': url, 'info': info})
                except:
                    pass

            return sources
        except:
            return sources
Ejemplo n.º 7
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = cloudflare.source(url)

            result = result.decode('iso-8859-1').encode('utf-8')
            result = result.replace('\n', '')

            quality = re.compile('>Links - Quality(.+?)<').findall(result)[0]
            quality = quality.strip()
            if quality == 'CAM' or quality == 'TS': quality = 'CAM'
            elif quality == 'SCREENER': quality = 'SCR'
            else: quality = 'SD'

            links = client.parseDOM(result, "div", attrs={"id": "links"})[0]
            links = client.parseDOM(links, "ul")

            for i in links:
                try:
                    host = client.parseDOM(i, "li", attrs={"id":
                                                           "link_name"})[-1]
                    host = host.strip().lower()
                    if not host in hostDict: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    url = client.parseDOM(i, "a", ret="href")[0]
                    url = client.replaceHTMLCodes(url)
                    url = urlparse.urljoin(self.base_link, url)
                    url = url.encode('utf-8')

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'provider': 'Movie25',
                        'url': url
                    })
                except:
                    pass

            return sources
        except:
            return sources
Ejemplo n.º 8
0
    def get_episode(self, url, imdb, tvdb, title, date, season, episode):
        try:
            if url == None: return

            season = '%01d' % int(season)
            episode = '%01d' % int(episode)

            query = '%s "Season %s" "Episode %s"' % (url, season, episode)
            query = urlparse.urljoin(self.base_link, self.tvsearch_link + urllib.quote_plus(query))

            result = cloudflare.source(query)

            r = client.parseDOM(result, "li", attrs = { "class": "first element.+?" })
            r += client.parseDOM(result, "li", attrs = { "class": "element.+?" })

            show = cleantitle.tv(url)
            result = [(client.parseDOM(i, "a", ret="href"), re.compile('>(.+?): Season (\d*), Episode (\d*)<').findall(i)) for i in r]
            result = [(i[0][0], i[1][0][0], i[1][0][1], i[1][0][2]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
            result = [i for i in result if season == '%01d' % int(i[2]) and episode == '%01d' % int(i[3])]
            result = [i[0] for i in result if show == cleantitle.tv(i[1])][0]

            try: url = re.compile('//.+?(/.+)').findall(result)[0]
            except: url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Ejemplo n.º 9
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = cloudflare.source(url)
            result = client.parseDOM(result, "a", ret="href")

            u = [i for i in result if '.php' in i and  'i=' in i][0]
            u = client.replaceHTMLCodes(u)
            u = urlparse.parse_qs(urlparse.urlparse(u).query)['i'][0]
 
            url = gkplugins.decrypter(198,128).decrypt(u,base64.urlsafe_b64decode('b3F5czkzZEswc2FEY3pRNW9NSTE='),'ECB').split('\0')[0]
            url = resolvers.request(url)

            if not type(url) == list: raise Exception()

            for i in url: sources.append({'source': 'GVideo', 'quality': i['quality'], 'provider': 'YIFYstream', 'url': i['url']})

            return sources
        except:
            return sources
Ejemplo n.º 10
0
    def get_episode(self, url, imdb, tvdb, title, date, season, episode):
        try:
            if url == None:
                return

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)
            if not "menu season-tabs" in result:
                cookie = client.source(self.sign_link, post=self.key_link, output="cookie")
                result = client.source(url, cookie=cookie)

            result = client.parseDOM(
                result, "a", ret="data-href", attrs={"href": "#%01d-%01d" % (int(season), int(episode))}
            )[0]

            try:
                url = re.compile("//.+?(/.+)").findall(result)[0]
            except:
                url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode("utf-8")
            return url
        except:
            return
Ejemplo n.º 11
0
    def resolve(self, url):
        try:
            url = urlparse.urlparse(url).path

            result = ''
            links = [self.link_1, self.link_2, self.link_3]
            for base_link in links:
                result = client.request(urlparse.urljoin(base_link, url),
                                        headers=self.headers)
                if 'frame' in str(result): break

            url = re.compile(
                'class=[\'|\"]*frame.+?src=[\'|\"|\s|\<]*(.+?)[\'|\"|\s|\>]'
            ).findall(result)[0]
            url = client.replaceHTMLCodes(url)
            try:
                url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
            except:
                pass
            try:
                url = urlparse.parse_qs(urlparse.urlparse(url).query)['url'][0]
            except:
                pass

            url = resolvers.request(url)
            return url
        except:
            return
Ejemplo n.º 12
0
    def get_show(self, imdb, tvdb, show, show_alt, year):


        # import logging
        # LOG_FILENAME = '/home/keeganmccallum/test.log'
        # logging.basicConfig(filename=LOG_FILENAME,level=logging.DEBUG)

        # logging.debug(','.join([imdb, tvdb, show, show_alt, year]))

        try:
            result = client.source(self.base_link)
            if not "'index show'" in result:
                cookie = client.source(self.sign_link, post=self.key_link, output='cookie')
                result = client.source(self.base_link, cookie=cookie)

            result = client.parseDOM(result, "div", attrs = { "class": "index show" })
            result = [(client.parseDOM(i, "a", attrs = { "class": "name" })[0], client.parseDOM(i, "span", attrs = { "class": "value" })[0], client.parseDOM(i, "a", ret="href")[0]) for i in result]

            shows = [cleantitle.tv(show), cleantitle.tv(show_alt)]
            years = [str(year), str(int(year)+1), str(int(year)-1)]
            result = [i for i in result if any(x in i[1] for x in years)]
            result = [i[2] for i in result if any(x == cleantitle.tv(i[0]) for x in shows)][0]

            try: url = re.compile('//.+?(/.+)').findall(result)[0]
            except: url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Ejemplo n.º 13
0
    def resolve(self, url):
        try:
            url = urlparse.urlparse(url).path

            result = ''
            links = [self.link_1, self.link_2, self.link_3]
            for base_link in links:
                result = client.request(urlparse.urljoin(base_link, url),
                                        headers=self.headers)
                if 'showvideo' in str(result): break

            result = result.decode('iso-8859-1').encode('utf-8')

            url = client.parseDOM(result, "div", attrs={"id": "showvideo"})[0]
            url = url.replace('<IFRAME', '<iframe').replace(' SRC=', ' src=')
            url = client.parseDOM(url, "iframe", ret="src")[0]
            url = client.replaceHTMLCodes(url)
            try:
                url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
            except:
                pass
            try:
                url = urlparse.parse_qs(urlparse.urlparse(url).query)['url'][0]
            except:
                pass

            url = resolvers.request(url)
            return url
        except:
            return
Ejemplo n.º 14
0
    def get_episode(self, url, imdb, tvdb, title, date, season, episode):
        try:
            if url == None: return

            query = self.tvbase_link + self.index_link
            post = urllib.urlencode({'a': 'retrieve', 'c': 'result', 'p': '{"KeyWord":"%s","Page":"1","NextToken":""}' % url})

            result = client.source(query, post=post)
            result = result.decode('iso-8859-1').encode('utf-8')
            result = client.parseDOM(result, "tr")

            show = cleantitle.tv(url)
            season = '%01d' % int(season)
            episode = '%02d' % int(episode)
            result = [client.parseDOM(i, "h1")[0] for i in result]
            result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a")[0]) for i in result]
            result = [(i[0], re.sub('\sSeason(|\s)\d*.+', '', i[1]), re.compile('\sSeason *(\d*) *').findall(i[1])[0]) for i in result]
            result = [i for i in result if show == cleantitle.tv(i[1])]
            result = [i[0] for i in result if season == i[2]][0]

            url = result.split('v=', 1)[-1]
            url = '%s|%s' % (url, episode)
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Ejemplo n.º 15
0
    def get_movie(self, imdb, title, year):
        try:
            query = self.search_link % (urllib.quote_plus(title))
            query = urlparse.urljoin(self.base_link, query)

            result = client.source(query)
            if result == None: result = client.source(self.agent_link + urllib.quote_plus(query))

            result = result.replace('\r','').replace('\n','').replace('\t','')

            result = re.compile('(<div id="*\d*.+?</div>)').findall(result)

            title = cleantitle.movie(title)
            years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)]

            result = [(re.compile('id="*(\d*)"*').findall(i), re.compile('<h4>(.+?)</h4>').findall(i), re.compile('Releasedatum *: *(\d{4})').findall(i)) for i in result]
            result = [(i[0][0], i[1][0], i[2][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0]
            result = [(i[0], i[1].rsplit('</span>')[0].split('>')[-1].strip(), i[2]) for i in result]
            result = [i for i in result if title == cleantitle.movie(i[1])]
            result = [i[0] for i in result if any(x in i[2] for x in years)][0]

            try: url = re.compile('//.+?(/.+)').findall(result)[0]
            except: url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Ejemplo n.º 16
0
    def get_show(self, imdb, tvdb, show, show_alt, year):
        try:
            query = re.sub("^THE\s+|^A\s+", "", show.strip().upper())[0]
            if not query.isalpha():
                query = "1"
            query = self.tvsearch_link % query

            result = ""
            links = [self.link_1, self.link_2, self.link_3]
            for base_link in links:
                result = client.source(urlparse.urljoin(base_link, query), headers=self.headers)
                if "Donate" in str(result):
                    break

            result = result.decode("iso-8859-1").encode("utf-8")
            result = re.compile("id=%s>.+?href=(.+?)>" % imdb).findall(result)[0]

            url = client.replaceHTMLCodes(result)
            try:
                url = urlparse.parse_qs(urlparse.urlparse(url).query)["u"][0]
            except:
                pass
            url = "%s?%s" % (urlparse.urlparse(url).path, urlparse.urlparse(url).query)
            url = url.encode("utf-8")
            return url
        except:
            return
Ejemplo n.º 17
0
    def get_episode(self, url, imdb, tvdb, title, date, season, episode):
        try:
            if url == None: return

            season = '%01d' % int(season)
            episode = '%01d' % int(episode)

            query = '%s "Season %s" "Episode %s"' % (url, season, episode)
            query = urlparse.urljoin(self.base_link, self.tvsearch_link + urllib.quote_plus(query))

            result = cloudflare.source(query)

            result = client.parseDOM(result, "header", attrs = { "class": "entry-header" })

            show = cleantitle.tv(url)

            result = [(client.parseDOM(i, "a", ret="href"), client.parseDOM(i, "a")) for i in result]
            result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
            result = [(i[0], re.compile('(.+?): Season (\d*).+?Episode (\d*)').findall(i[1])) for i in result]
            result = [(i[0], i[1][0][0], i[1][0][1], i[1][0][2]) for i in result if len(i[1]) > 0]
            result = [i for i in result if season == '%01d' % int(i[2]) and episode == '%01d' % int(i[3])]
            result = [i[0] for i in result if show == cleantitle.tv(i[1])][0]

            url = client.replaceHTMLCodes(result)
            try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
            except: pass
            url = urlparse.urlparse(url).path
            url = url.encode('utf-8')
            return url
        except:
            return
Ejemplo n.º 18
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = cloudflare.source(url)
            
            result = client.parseDOM(result, "a", ret="href")

            url = [i for i in result if '.php' in i and 'i=' in i][0]
            url = client.replaceHTMLCodes(url)
            url = urlparse.parse_qs(urlparse.urlparse(url).query)['i'][0]
            url = base64.decodestring(url)

            url = resolvers.request(url)

            if not type(url) == list: raise Exception()

            for i in url: sources.append({'source': 'GVideo', 'quality': i['quality'], 'provider': 'YIFYstream', 'url': i['url']})

            return sources
        except:
            return sources
Ejemplo n.º 19
0
    def get_movie(self, imdb, title, year):
        try:
            search = 'http://www.omdbapi.com/?i=tt%s' % imdb
            search = client.source(search)
            search = json.loads(search)
            country = [i.strip() for i in search['Country'].split(',')]
            if not 'India' in country: return

            languages = ['hindi', 'tamil', 'telugu', 'malayalam']
            language = [i.strip().lower() for i in search['Language'].split(',')]
            language = [i for i in language if any(x == i for x in languages)][0]

            query = self.search_link % (urllib.quote_plus(title), language)
            query = urlparse.urljoin(self.base_link, query)

            result = client.source(query)
            result = client.parseDOM(result, "div", attrs = { "class": "search-category" })
            result = [i for i in result if 'Movies' in client.parseDOM(i, "p")[0]][0]
            result = client.parseDOM(result, "li")

            title = cleantitle.movie(title)
            years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]
            result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a")[0]) for i in result]
            r = [i for i in result if any(x in i[1] for x in years)]
            if not len(r) == 0: result = r
            result = [i[0] for i in result if title == cleantitle.movie(i[1])][0]

            try: url = re.compile('//.+?(/.+)').findall(result)[0]
            except: url = result
            url = url.replace('../', '/')
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Ejemplo n.º 20
0
    def get_show(self, imdb, tvdb, show, show_alt, year):
        try:
            query = re.sub('^THE\s+|^A\s+', '', show.strip().upper())[0]
            if not query.isalpha(): query = '1'
            query = self.tvsearch_link % query

            result = ''
            links = [self.link_1, self.link_2, self.link_3]
            for base_link in links:
                result = client.source(urlparse.urljoin(base_link, query),
                                       headers=self.headers)
                if 'Donate' in str(result): break

            result = result.decode('iso-8859-1').encode('utf-8')
            result = re.compile('id=%s>.+?href=(.+?)>' %
                                imdb).findall(result)[0]

            url = client.replaceHTMLCodes(result)
            try:
                url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
            except:
                pass
            url = '%s?%s' % (urlparse.urlparse(url).path,
                             urlparse.urlparse(url).query)
            url = url.encode('utf-8')
            return url
        except:
            return
Ejemplo n.º 21
0
    def resolve(self, url):
        try:
            url = urlparse.urlparse(url).path

            result = ""
            links = [self.link_1, self.link_2, self.link_3]
            for base_link in links:
                result = client.request(urlparse.urljoin(base_link, url), headers=self.headers)
                if "showvideo" in str(result):
                    break

            result = result.decode("iso-8859-1").encode("utf-8")

            url = client.parseDOM(result, "div", attrs={"id": "showvideo"})[0]
            url = url.replace("<IFRAME", "<iframe").replace(" SRC=", " src=")
            url = client.parseDOM(url, "iframe", ret="src")[0]
            url = client.replaceHTMLCodes(url)
            try:
                url = urlparse.parse_qs(urlparse.urlparse(url).query)["u"][0]
            except:
                pass
            try:
                url = urlparse.parse_qs(urlparse.urlparse(url).query)["url"][0]
            except:
                pass

            url = resolvers.request(url)
            return url
        except:
            return
Ejemplo n.º 22
0
    def get_movie(self, imdb, title, year):
        try:
            query = self.search_link % urllib.quote_plus(title)
            query = urlparse.urljoin(self.base_link, query)

            result = cloudflare.source(query)

            result = result.decode("iso-8859-1").encode("utf-8")
            result = client.parseDOM(result, "div", attrs={"class": "movie_table"})

            title = cleantitle.movie(title)
            years = ["(%s)" % str(year), "(%s)" % str(int(year) + 1), "(%s)" % str(int(year) - 1)]
            result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a", ret="title")[1]) for i in result]
            result = [i for i in result if title == cleantitle.movie(i[1])]
            result = [i[0] for i in result if any(x in i[1] for x in years)][0]

            url = client.replaceHTMLCodes(result)
            try:
                url = urlparse.parse_qs(urlparse.urlparse(url).query)["u"][0]
            except:
                pass
            url = urlparse.urlparse(url).path
            url = url.encode("utf-8")
            return url
        except:
            return
Ejemplo n.º 23
0
    def get_movie(self, imdb, title, year):
        try:
            query = self.search_link % urllib.quote_plus(title)
            query = urlparse.urljoin(self.base_link, query)

            result = cloudflare.source(query)

            result = result.decode('iso-8859-1').encode('utf-8')
            result = client.parseDOM(result,
                                     "div",
                                     attrs={"class": "movie_table"})

            title = cleantitle.movie(title)
            years = [
                '(%s)' % str(year),
                '(%s)' % str(int(year) + 1),
                '(%s)' % str(int(year) - 1)
            ]
            result = [(client.parseDOM(i, "a", ret="href")[0],
                       client.parseDOM(i, "a", ret="title")[1])
                      for i in result]
            result = [i for i in result if title == cleantitle.movie(i[1])]
            result = [i[0] for i in result if any(x in i[1] for x in years)][0]

            url = client.replaceHTMLCodes(result)
            try:
                url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
            except:
                pass
            url = urlparse.urlparse(url).path
            url = url.encode('utf-8')
            return url
        except:
            return
Ejemplo n.º 24
0
    def get_movie(self, imdb, title, year):
        try:
            query = self.search_link % (urllib.quote_plus(title))
            query = urlparse.urljoin(self.base_link, query)

            result = sucuri.source(query)
            result = re.compile('showResult\((.*)\)').findall(result)[0]
            result = json.loads(result)
            result = result['feed']['entry']

            title = cleantitle.movie(title)
            years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)]

            result = [i for i in result if 'movies' in [x['term'].lower() for x in i['category']]]
            result = [[x for x in i['link'] if x['rel'] == 'alternate' and x['type'] == 'text/html'][0] for i in result]
            result = [(i['href'], i['title']) for i in result]
            result = [(i[0], re.compile('(.+?) (\d{4})(.+)').findall(i[1])) for i in result]
            result = [(i[0], i[1][0][0], i[1][0][1], i[1][0][2]) for i in result if len(i[1]) > 0]
            result = [(i[0], i[1], i[2]) for i in result if not 'TS' in i[3] and not 'CAM' in i[3]]
            result = [i for i in result if title == cleantitle.movie(i[1])]
            result = [i[0] for i in result if any(x in i[2] for x in years)][0]

            try: url = re.compile('//.+?(/.+)').findall(result)[0]
            except: url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Ejemplo n.º 25
0
    def get_show(self, imdb, tvdb, show, show_alt, year):
        try:
            result = client.source(self.base_link)
            if not "'index show'" in result:
                cookie = client.source(self.sign_link, post=self.key_link, output="cookie")
                result = client.source(self.base_link, cookie=cookie)

            result = client.parseDOM(result, "div", attrs={"class": "index show"})
            result = [
                (
                    client.parseDOM(i, "a", attrs={"class": "name"})[0],
                    client.parseDOM(i, "span", attrs={"class": "value"})[0],
                    client.parseDOM(i, "a", ret="href")[0],
                )
                for i in result
            ]

            shows = [cleantitle.tv(show), cleantitle.tv(show_alt)]
            years = [str(year), str(int(year) + 1), str(int(year) - 1)]
            result = [i for i in result if any(x in i[1] for x in years)]
            result = [i[2] for i in result if any(x == cleantitle.tv(i[0]) for x in shows)][0]

            try:
                url = re.compile("//.+?(/.+)").findall(result)[0]
            except:
                url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode("utf-8")
            return url
        except:
            return
Ejemplo n.º 26
0
    def get_movie(self, imdb, title, year):
        try:
            query = urlparse.urljoin(self.base_link, self.moviesearch_link + urllib.quote_plus(title))

            result = cloudflare.source(query)
            if result == None: result = client.source(self.agent_link + urllib.quote_plus(query))

            result = result.replace('> ', '>').replace(' <', '<')

            r = client.parseDOM(result, "li", attrs = { "class": "first element.+?" })
            r += client.parseDOM(result, "li", attrs = { "class": "element.+?" })

            title = cleantitle.movie(title)
            years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]
            result = [(client.parseDOM(i, "a", ret="href"), re.compile('>(.+?\(\d{4}\))<').findall(i)) for i in r]
            result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
            result = [(i[0], i[1].split('>')[-1]) for i in result]
            result = [i for i in result if title == cleantitle.movie(i[1])]
            result = [i[0] for i in result if any(x in i[1] for x in years)][0]

            url = client.replaceHTMLCodes(result)
            try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
            except: pass
            url = urlparse.urlparse(url).path
            url = url.encode('utf-8')
            return url
        except:
            return
Ejemplo n.º 27
0
    def get_movie(self, imdb, title, year):
        try:
            query = self.search_link % (urllib.quote_plus(title))
            query = urlparse.urljoin(self.base_link, query)

            result = client.source(query)
            result = client.parseDOM(result,
                                     "div",
                                     attrs={"class": "hover-group.+?"})

            title = cleantitle.movie(title)
            years = [
                '>%s<' % str(year),
                '>%s<' % str(int(year) + 1),
                '>%s<' % str(int(year) - 1)
            ]
            result = [(client.parseDOM(i, "a", ret="data-movieid")[0],
                       client.parseDOM(i, "h5")[-1], client.parseDOM(i,
                                                                     "p")[-1])
                      for i in result]
            result = [i for i in result if title == cleantitle.movie(i[1])]
            result = [i[0] for i in result if any(x in i[2] for x in years)][0]

            try:
                url = re.compile('//.+?(/.+)').findall(result)[0]
            except:
                url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Ejemplo n.º 28
0
    def get_episode(self, url, imdb, tvdb, title, date, season, episode):
        try:
            if url == None: return

            title = url
            hdlr = 'S%02dE%02d' % (int(season), int(episode))

            query = self.search_link % (urllib.quote_plus('%s "%s"' %
                                                          (title, hdlr)))
            query = urlparse.urljoin(self.tvbase_link, query)

            result = client.source(query)
            result = client.parseDOM(result,
                                     "header",
                                     attrs={"class": "post-title"})

            title = cleantitle.tv(title)
            result = [(client.parseDOM(i, "a",
                                       ret="href"), client.parseDOM(i, "a"))
                      for i in result]
            result = [(i[0][0], i[1][0]) for i in result
                      if len(i[0]) > 0 and len(i[1]) > 0]
            result = [(i[0], re.compile('(.+?) (S\d*E\d*)').findall(i[1]))
                      for i in result]
            result = [(i[0], i[1][0][0], i[1][0][1]) for i in result
                      if len(i[1]) > 0]
            result = [i for i in result if title == cleantitle.tv(i[1])]
            result = [i[0] for i in result if hdlr == i[2]][0]

            url = result.replace(self.tvbase_link, '')
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Ejemplo n.º 29
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = cloudflare.source(url)

            result = client.parseDOM(result, "a", ret="href")

            url = [i for i in result if '.php' in i and 'i=' in i][0]
            url = client.replaceHTMLCodes(url)
            url = urlparse.parse_qs(urlparse.urlparse(url).query)['i'][0]
            url = base64.decodestring(url)

            url = resolvers.request(url)

            if not type(url) == list: raise Exception()

            for i in url:
                sources.append({
                    'source': 'GVideo',
                    'quality': i['quality'],
                    'provider': 'YIFYstream',
                    'url': i['url']
                })

            return sources
        except:
            return sources
Ejemplo n.º 30
0
    def get_episode(self, url, imdb, tvdb, title, date, season, episode):
        try:
            if url == None: return

            title = url
            hdlr = 'S%02dE%02d' % (int(season), int(episode))

            query = self.search_link % (urllib.quote_plus('%s "%s"' % (title, hdlr)))
            query = urlparse.urljoin(self.tvbase_link, query)

            result = client.source(query)
            result = client.parseDOM(result, "header", attrs = { "class": "post-title" })

            title = cleantitle.tv(title)
            result = [(client.parseDOM(i, "a", ret="href"), client.parseDOM(i, "a")) for i in result]
            result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
            result = [(i[0], re.compile('(.+?) (S\d*E\d*)').findall(i[1])) for i in result]
            result = [(i[0], i[1][0][0], i[1][0][1]) for i in result if len(i[1]) > 0]
            result = [i for i in result if title == cleantitle.tv(i[1])]
            result = [i[0] for i in result if hdlr == i[2]][0]

            url = result.replace(self.tvbase_link, '')
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Ejemplo n.º 31
0
    def get_show(self, imdb, tvdb, show, show_alt, year):
        try:
            result = client.source(self.base_link)
            if not "'index show'" in result:
                cookie = client.source(self.sign_link,
                                       post=self.key_link,
                                       output='cookie')
                result = client.source(self.base_link, cookie=cookie)

            result = client.parseDOM(result,
                                     "div",
                                     attrs={"class": "index show"})
            result = [(client.parseDOM(i, "a", attrs={"class": "name"})[0],
                       client.parseDOM(i, "span", attrs={"class": "value"})[0],
                       client.parseDOM(i, "a", ret="href")[0]) for i in result]

            shows = [cleantitle.tv(show), cleantitle.tv(show_alt)]
            years = [str(year), str(int(year) + 1), str(int(year) - 1)]
            result = [i for i in result if any(x in i[1] for x in years)]
            result = [
                i[2] for i in result
                if any(x == cleantitle.tv(i[0]) for x in shows)
            ][0]

            try:
                url = re.compile('//.+?(/.+)').findall(result)[0]
            except:
                url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Ejemplo n.º 32
0
    def resolve(self, url):
        try:
            url = url.replace('/json/', '/')
            url = urlparse.urlparse(url).path

            class NoRedirection(urllib2.HTTPErrorProcessor):
                def http_response(self, request, response):
                    return response

            result = ''
            links = [self.link_1, self.link_2, self.link_3]
            for base_link in links:
                try:
                    opener = urllib2.build_opener(NoRedirection)
                    opener.addheaders = [('User-Agent', 'Apple-iPhone')]
                    opener.addheaders = [('Referer', base_link + url)]
                    response = opener.open(base_link + url)
                    result = response.read()
                    response.close()
                except:
                    result = ''
                if 'myButton' in result: break

            url = re.compile('class=[\'|\"]*myButton.+?href=[\'|\"|\s|\<]*(.+?)[\'|\"|\s|\>]').findall(result)[0]
            url = client.replaceHTMLCodes(url)
            try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
            except: pass
            try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['url'][0]
            except: pass

            url = resolvers.request(url)
            return url
        except:
            return
Ejemplo n.º 33
0
    def get_episode(self, url, imdb, tvdb, title, date, season, episode):
        try:
            if url == None: return

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)
            if not "menu season-tabs" in result:
                cookie = client.source(self.sign_link,
                                       post=self.key_link,
                                       output='cookie')
                result = client.source(url, cookie=cookie)

            result = client.parseDOM(
                result,
                "a",
                ret="data-href",
                attrs={"href": "#%01d-%01d" % (int(season), int(episode))})[0]

            try:
                url = re.compile('//.+?(/.+)').findall(result)[0]
            except:
                url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Ejemplo n.º 34
0
    def get_movie(self, imdb, title, year):
        try:
            query = urlparse.urljoin(self.base_link, self.search_link)
            post = urllib.urlencode({'action': 'ajaxy_sf', 'sf_value': title})

            result = client.source(query, post=post)
            result = result.replace('&#8211;', '-').replace('&#8217;', '\'')
            result = json.loads(result)
            result = result['post']['all']

            title = cleantitle.movie(title)
            result = [
                i['post_link'] for i in result
                if title == cleantitle.movie(i['post_title'])
            ][0]

            check = client.source(result)
            if not str('tt' + imdb) in check: raise Exception()

            try:
                url = re.compile('//.+?(/.+)').findall(result)[0]
            except:
                url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Ejemplo n.º 35
0
    def get_episode(self, url, imdb, tvdb, title, date, season, episode):
        if url == None: return

        url = self.episode_link % (url, int(season), int(episode))
        url = client.replaceHTMLCodes(url)
        url = url.encode('utf-8')
        return url
Ejemplo n.º 36
0
    def get_movie(self, imdb, title, year):
        try:
            query = self.search_link % (urllib.quote_plus(title))
            query = urlparse.urljoin(self.base_link, query)

            result = client.source(query)
            result = client.parseDOM(result, "div", attrs = { "id": "post-.+?" })

            title = cleantitle.movie(title)
            years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)]

            result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a", ret="title")[0], client.parseDOM(i, "div", attrs = { "class": "status status-year" }), client.parseDOM(i, "div", attrs = { "class": "mark-8" })) for i in result]
            result = [(i[0], i[1], i[2][0], i[3]) for i in result if len(i[2]) > 0]
            result = [(i[0], i[1], i[2], i[3], re.compile('Season (\d*)$').findall(i[1])) for i in result]
            result = [(i[0], i[1], i[2], i[3]) for i in result if len(i[4]) == 0]
            result = [(i[0], i[1], i[2]) for i in result if len(i[3]) == 0]
            result = [i for i in result if title == cleantitle.movie(i[1])]
            result = [i[0] for i in result if any(x in i[2] for x in years)][0]

            try: url = re.compile('//.+?(/.+)').findall(result)[0]
            except: url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Ejemplo n.º 37
0
    def get_movie(self, imdb, title, year):
        try:
            query = urlparse.urljoin(self.base_link, self.moviesearch_link + urllib.quote_plus(title))

            result = cloudflare.source(query)

            r = client.parseDOM(result, "li", attrs = { "class": "first element.+?" })
            r += client.parseDOM(result, "li", attrs = { "class": "element.+?" })
            r += client.parseDOM(result, "header", attrs = { "class": "entry-header" })

            title = cleantitle.movie(title)
            years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]

            result = [(client.parseDOM(i, "a", ret="href"), client.parseDOM(i, "a")) for i in r]
            result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
            result = [(i[0], re.sub('<.+?>', '', i[1])) for i in result]
            result = [i for i in result if title == cleantitle.movie(i[1])]
            result = [i[0] for i in result if any(x in i[1] for x in years)][0]

            try: url = re.compile('//.+?(/.+)').findall(result)[0]
            except: url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Ejemplo n.º 38
0
    def get_show(self, imdb, tvdb, show, show_alt, year):
        try:
            query = self.search_link
            post = urllib.urlencode({'searchquery': show, 'searchin': '2'})

            result = ''
            links = [self.link_1, self.link_3]
            for base_link in links:
                result = client.source(urlparse.urljoin(base_link, query), post=post, headers=self.headers)
                if 'widget search-page' in str(result): break

            result = client.parseDOM(result, "div", attrs = { "class": "widget search-page" })[0]
            result = client.parseDOM(result, "td")

            shows = [cleantitle.tv(show), cleantitle.tv(show_alt)]
            years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]
            result = [(client.parseDOM(i, "a", ret="href")[-1], client.parseDOM(i, "a")[-1]) for i in result]
            result = [i for i in result if any(x == cleantitle.tv(i[1]) for x in shows)]
            result = [i[0] for i in result if any(x in i[1] for x in years)][0]

            url = client.replaceHTMLCodes(result)
            try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
            except: pass
            url = urlparse.urlparse(url).path
            url = url.encode('utf-8')
            return url
        except:
            return
Ejemplo n.º 39
0
    def get_movie(self, imdb, title, year):
        try:
            query = self.search_link % (urllib.quote_plus(re.sub(r'[\W_]+', ' ', title)))
            query = urlparse.urljoin(self.base_link, query)

            result = cloudflare.source(query)
            result = result.replace('&#8211;','-').replace('&#8217;','\'')
            result = client.parseDOM(result, "ul", attrs = { "class": "listing-videos.+?" })[0]
            result = client.parseDOM(result, "li", attrs = { "class": ".+?" })

            title = cleantitle.movie(title)
            years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)]
            result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a", ret="title")[0]) for i in result]
            result = [(i[0], re.sub('\s(\(|)(\d{4})(.+)', '', i[1]), re.compile('(\d{4})').findall(i[1])) for i in result]
            result = [(i[0], i[1], i[2][0]) for i in result if len(i[2]) > 0]
            result = [i for i in result if title == cleantitle.movie(i[1])]
            result = [i[0] for i in result if any(x in i[2] for x in years)][0]

            try: url = re.compile('//.+?(/.+)').findall(result)[0]
            except: url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Ejemplo n.º 40
0
    def get_episode(self, url, imdb, tvdb, title, date, season, episode):
        if url == None: return

        url = self.episode_link % (url, int(season), int(episode))
        url = client.replaceHTMLCodes(url)
        url = url.encode('utf-8')
        return url
Ejemplo n.º 41
0
    def get_episode(self, url, imdb, tvdb, title, date, season, episode):
        try:
            if url == None: return

            season = '%01d' % int(season)
            episode = '%01d' % int(episode)

            query = '%s "Season %s" "Episode %s"' % (url, season, episode)
            query = urlparse.urljoin(self.base_link, self.tvsearch_link + urllib.quote_plus(query))

            result = cloudflare.source(query)

            r = client.parseDOM(result, "li", attrs = { "class": "first element.+?" })
            r += client.parseDOM(result, "li", attrs = { "class": "element.+?" })

            show = cleantitle.tv(url)
            result = [(client.parseDOM(i, "a", ret="href"), re.compile('>(.+?): Season (\d*), Episode (\d*)<').findall(i)) for i in r]
            result = [(i[0][0], i[1][0][0], i[1][0][1], i[1][0][2]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
            result = [i for i in result if season == '%01d' % int(i[2]) and episode == '%01d' % int(i[3])]
            result = [i[0] for i in result if show == cleantitle.tv(i[1])][0]

            try: url = re.compile('//.+?(/.+)').findall(result)[0]
            except: url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Ejemplo n.º 42
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = cloudflare.source(url)
            result = client.parseDOM(result, "a", ret="href")

            u = [i for i in result if '.php' in i and  'i=' in i][0]
            u = client.replaceHTMLCodes(u)
            u = urlparse.parse_qs(urlparse.urlparse(u).query)['i'][0]
 
            url = gkplugins.decrypter(198,128).decrypt(u,base64.urlsafe_b64decode('b3F5czkzZEswc2FEY3pRNW9NSTE='),'ECB').split('\0')[0]
            url = resolvers.request(url)

            if not type(url) == list: raise Exception()

            for i in url: sources.append({'source': 'GVideo', 'quality': i['quality'], 'provider': 'YIFYstream', 'url': i['url']})

            return sources
        except:
            return sources
Ejemplo n.º 43
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            result = cloudflare.source(url)

            result = result.decode("iso-8859-1").encode("utf-8")
            result = result.replace("\n", "")

            quality = re.compile(">Links - Quality(.+?)<").findall(result)[0]
            quality = quality.strip()
            if quality == "CAM" or quality == "TS":
                quality = "CAM"
            elif quality == "SCREENER":
                quality = "SCR"
            else:
                quality = "SD"

            links = client.parseDOM(result, "div", attrs={"id": "links"})[0]
            links = client.parseDOM(links, "ul")

            for i in links:
                try:
                    host = client.parseDOM(i, "li", attrs={"id": "link_name"})[-1]
                    host = host.strip().lower()
                    if not host in hostDict:
                        raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode("utf-8")

                    url = client.parseDOM(i, "a", ret="href")[0]
                    url = client.replaceHTMLCodes(url)
                    url = urlparse.urljoin(self.base_link, url)
                    url = url.encode("utf-8")

                    sources.append({"source": host, "quality": quality, "provider": "Movie25", "url": url})
                except:
                    pass

            return sources
        except:
            return sources
Ejemplo n.º 44
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            result = ''
            links = [self.link_1, self.link_2, self.link_3]
            for base_link in links:
                result = client.source(urlparse.urljoin(base_link, url), headers=self.headers)
                if 'choose_tabs' in str(result): break

            result = result.decode('iso-8859-1').encode('utf-8')
            links = client.parseDOM(result, "tbody")

            for i in links:
                try:
                    u = client.parseDOM(i, "a", ret="href")[0]
                    u = client.replaceHTMLCodes(u)
                    try: u = urlparse.parse_qs(urlparse.urlparse(u).query)['u'][0]
                    except: pass

                    host = urlparse.parse_qs(urlparse.urlparse(u).query)['domain'][0]
                    host = base64.urlsafe_b64decode(host.encode('utf-8'))
                    host = host.rsplit('.', 1)[0]
                    host = host.strip().lower()
                    if not host in hostDict: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    url = urlparse.parse_qs(urlparse.urlparse(u).query)['url'][0]
                    url = base64.urlsafe_b64decode(url.encode('utf-8'))
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    quality = client.parseDOM(i, "span", ret="class")[0]
                    if quality == 'quality_cam' or quality == 'quality_ts': quality = 'CAM'
                    elif quality == 'quality_dvd': quality = 'SD'
                    else:  raise Exception()

                    sources.append({'source': host, 'quality': quality, 'provider': 'Primewire', 'url': url})
                except:
                    pass

            return sources
        except:
            return sources
Ejemplo n.º 45
0
    def get_movie(self, imdb, title, year):
        try:
            query = self.search_link % urllib.quote_plus(title)

            result = ''
            links = [self.link_1, self.link_2, self.link_3]
            for base_link in links:
                result = client.source(urlparse.urljoin(base_link, query),
                                       headers=self.headers)
                if 'movie_table' in str(result): break

            result = result.decode('iso-8859-1').encode('utf-8')
            result = client.parseDOM(result,
                                     "div",
                                     attrs={"class": "movie_table"})

            title = cleantitle.movie(title)
            years = [
                '(%s)' % str(year),
                '(%s)' % str(int(year) + 1),
                '(%s)' % str(int(year) - 1)
            ]
            result = [(client.parseDOM(i, "a", ret="href")[0],
                       client.parseDOM(i, "a", ret="title")[1])
                      for i in result]
            result = [i for i in result if any(x in i[1] for x in years)]

            result = [(client.replaceHTMLCodes(i[0]), i[1]) for i in result]
            try:
                result = [
                    (urlparse.parse_qs(urlparse.urlparse(i[0]).query)['u'][0],
                     i[1]) for i in result
                ]
            except:
                pass
            result = [(urlparse.urlparse(i[0]).path, i[1]) for i in result]

            match = [i[0] for i in result if title == cleantitle.movie(i[1])]

            match2 = [i[0] for i in result]
            match2 = [x for y, x in enumerate(match2) if x not in match2[:y]]
            if match2 == []: return

            for i in match2[:10]:
                try:
                    if len(match) > 0:
                        url = match[0]
                        break
                    result = client.source(base_link + i, headers=self.headers)
                    if str('tt' + imdb) in str(result):
                        url = i
                        break
                except:
                    pass

            url = url.encode('utf-8')
            return url
        except:
            return
Ejemplo n.º 46
0
 def get_show(self, imdb, tvdb, show, show_alt, year):
     try:
         url = show
         url = client.replaceHTMLCodes(url)
         url = url.encode('utf-8')
         return url
     except:
         return
Ejemplo n.º 47
0
 def get_movie(self, imdb, title, year):
     try:
         url = '%s %s' % (title, year)
         url = client.replaceHTMLCodes(url)
         url = url.encode('utf-8')
         return url
     except:
         return
Ejemplo n.º 48
0
 def get_movie(self, imdb, title, year):
     try:
         url = '%s %s' % (title, year)
         url = client.replaceHTMLCodes(url)
         url = url.encode('utf-8')
         return url
     except:
         return
Ejemplo n.º 49
0
 def get_show(self, imdb, tvdb, show, show_alt, year):
     try:
         url = show
         url = client.replaceHTMLCodes(url)
         url = url.encode('utf-8')
         return url
     except:
         return
Ejemplo n.º 50
0
    def get_episode(self, url, imdb, tvdb, title, date, season, episode):
        if url == None: return

        url = url.replace('/watch-','/tv-')
        url += '/season-%01d-episode-%01d' % (int(season), int(episode))
        url = client.replaceHTMLCodes(url)
        url = url.encode('utf-8')
        return url
Ejemplo n.º 51
0
def resolve(url):
    try:
        data = str(url).replace('\r','').replace('\n','').replace('\t','')
        doregex = re.compile('\$doregex\[(.+?)\]').findall(data)

        for i in range(0, 5):
            for x in doregex:
                try:
                    if not '$doregex[%s]' % x in data: raise Exception()

                    regex = re.compile('<regex>(.+?)</regex>').findall(data)
                    regex = [r for r in regex if '<name>%s</name>' % x in r][0]

                    if '$doregex' in regex: raise Exception()

                    expres = re.compile('<expres>(.+?)</expres>').findall(regex)[0]

                    try: referer = re.compile('<referer>(.+?)</referer>').findall(regex)[0]
                    except: referer = ''
                    referer = urllib.unquote_plus(referer)
                    referer = client.replaceHTMLCodes(referer)
                    referer = referer.encode('utf-8')

                    page = re.compile('<page>(.+?)</page>').findall(regex)[0]
                    page = urllib.unquote_plus(page)
                    page = client.replaceHTMLCodes(page)
                    page = page.encode('utf-8')

                    result = client.request(page, referer=referer)
                    result = str(result).replace('\r','').replace('\n','').replace('\t','')
                    result = str(result).replace('\/','/')

                    r = re.compile(expres).findall(result)[0]
                    data = data.replace('$doregex[%s]' % x, r)
                except:
                    pass

        url = re.compile('(.+?)<regex>').findall(data)[0]
        url = client.replaceHTMLCodes(url)
        url = url.encode('utf-8')

        if not '$doregex' in url: return url
    except:
        return
Ejemplo n.º 52
0
    def get_show(self, imdb, tvdb, show, show_alt, year):
        try:
            if (self.user == '' or self.password == ''): raise Exception()

            url = show
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Ejemplo n.º 53
0
    def get_movie(self, imdb, title, year):
        try:
            if (self.user == '' or self.password == ''): raise Exception()

            url = '%s %s' % (title, year)
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Ejemplo n.º 54
0
    def get_episode(self, url, imdb, tvdb, title, date, season, episode):
        try:
            if url == None: return

            url = '%s S%02dE%02d' % (url, int(season), int(episode))
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Ejemplo n.º 55
0
    def get_movie(self, imdb, title, year):
        try:
            query = self.search_link % (urllib.quote_plus(title))

            result = client.source(query)

            title = cleantitle.movie(title)
            years = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1)
            ]

            result = client.parseDOM(result, "h3", attrs={"class": ".+?"})
            result = [(client.parseDOM(i, "a",
                                       ret="href"), client.parseDOM(i, "a"))
                      for i in result]
            result = [(i[0][0], i[1][-1]) for i in result
                      if len(i[0]) > 0 and len(i[1]) > 0]
            result = [
                i for i in result
                if any(x in i[0] for x in years) or any(x in i[1]
                                                        for x in years)
            ]
            result = [
                i[0] for i in result if title in cleantitle.movie(i[0])
                or title in cleantitle.movie(i[1])
            ][0]

            result = result.replace('/tag/', '/')
            result = cloudflare.source(result)

            r = client.parseDOM(result, "title")[0]

            t = re.sub('(\.|\_|\(|\[|\s)(\d{4}|3D)(\.|\_|\)|\]|\s)(.+)', '', r)
            if not title == cleantitle.movie(t): raise Exception()

            y = re.compile('[\.|\_|\(|\[|\s](\d{4})[\.|\_|\)|\]|\s]').findall(
                r)[0]
            if not any(x == y for x in years): raise Exception()

            result = client.parseDOM(result,
                                     "link",
                                     ret="href",
                                     attrs={"rel": "canonical"})[0]

            try:
                url = re.compile('//.+?(/.+)').findall(result)[0]
            except:
                url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Ejemplo n.º 56
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)
            result = result.decode('iso-8859-1').encode('utf-8')

            links = client.parseDOM(result, "table", attrs = { "class": "link_ite.+?" })

            for i in links:
                try:
                    url = client.parseDOM(i, "a", ret="href")
                    if len(url) > 1: raise Exception()
                    url = url[0].split('gtfo=', 1)[-1].split('&', 1)[0]
                    url = base64.urlsafe_b64decode(url.encode('utf-8'))
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = urlparse.urlparse(url).netloc
                    host = host.replace('www.', '').replace('embed.', '')
                    host = host.rsplit('.', 1)[0]
                    host = host.lower()
                    if not host in hostDict: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    quality = client.parseDOM(i, "div", attrs = { "class": "quality" })
                    if any(x in ['[CAM]', '[TS]'] for x in quality): quality = 'CAM'
                    else:  quality = 'SD'
                    quality = quality.encode('utf-8')

                    sources.append({'source': host, 'quality': quality, 'provider': 'Watchfree', 'url': url})
                except:
                    pass

            return sources
        except:
            return sources
Ejemplo n.º 57
0
    def get_episode(self, url, imdb, tvdb, title, date, season, episode):
        try:
            if url == None: return

            if (self.user == '' or self.password == ''): raise Exception()

            url = '%s S%02dE%02d' % (url, int(season), int(episode))
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return