Esempio n. 1
0
    def get_show(self, imdb, tvdb, show, show_alt, year):
        try:
            result = client.source(self.base_link)
            if not "'index show'" in result:
                cookie = client.source(self.sign_link, post=self.key_link, output="cookie")
                result = client.source(self.base_link, cookie=cookie)

            result = client.parseDOM(result, "div", attrs={"class": "index show"})
            result = [
                (
                    client.parseDOM(i, "a", attrs={"class": "name"})[0],
                    client.parseDOM(i, "span", attrs={"class": "value"})[0],
                    client.parseDOM(i, "a", ret="href")[0],
                )
                for i in result
            ]

            shows = [cleantitle.tv(show), cleantitle.tv(show_alt)]
            years = [str(year), str(int(year) + 1), str(int(year) - 1)]
            result = [i for i in result if any(x in i[1] for x in years)]
            result = [i[2] for i in result if any(x == cleantitle.tv(i[0]) for x in shows)][0]

            try:
                url = re.compile("//.+?(/.+)").findall(result)[0]
            except:
                url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode("utf-8")
            return url
        except:
            return
Esempio n. 2
0
    def get_movie(self, imdb, title, year):
        try:
            search = 'http://www.omdbapi.com/?i=tt%s' % imdb
            search = client.source(search)
            search = json.loads(search)
            country = [i.strip() for i in search['Country'].split(',')]
            if not 'India' in country: return

            languages = ['hindi', 'tamil', 'telugu', 'malayalam']
            language = [i.strip().lower() for i in search['Language'].split(',')]
            language = [i for i in language if any(x == i for x in languages)][0]

            query = self.search_link % (urllib.quote_plus(title), language)
            query = urlparse.urljoin(self.base_link, query)

            result = client.source(query)
            result = client.parseDOM(result, "div", attrs = { "class": "search-category" })
            result = [i for i in result if 'Movies' in client.parseDOM(i, "p")[0]][0]
            result = client.parseDOM(result, "li")

            title = cleantitle.movie(title)
            years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]
            result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a")[0]) for i in result]
            r = [i for i in result if any(x in i[1] for x in years)]
            if not len(r) == 0: result = r
            result = [i[0] for i in result if title == cleantitle.movie(i[1])][0]

            try: url = re.compile('//.+?(/.+)').findall(result)[0]
            except: url = result
            url = url.replace('../', '/')
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Esempio n. 3
0
    def get_movie(self, imdb, title, year):
        try:
            query = urlparse.urljoin(self.base_link, self.search_link)
            post = urllib.urlencode({'action': 'ajaxy_sf', 'sf_value': title})

            result = client.source(query, post=post)
            result = result.replace('–', '-').replace('’', '\'')
            result = json.loads(result)
            result = result['post']['all']

            title = cleantitle.movie(title)
            result = [
                i['post_link'] for i in result
                if title == cleantitle.movie(i['post_title'])
            ][0]

            check = client.source(result)
            if not str('tt' + imdb) in check: raise Exception()

            try:
                url = re.compile('//.+?(/.+)').findall(result)[0]
            except:
                url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
    def get_movie(self, imdb, title, year):
        try:
            query = self.search_link % (urllib.quote_plus(title))
            query = urlparse.urljoin(self.base_link, query)

            result = client.source(query)
            if result == None: result = client.source(self.agent_link + urllib.quote_plus(query))

            result = result.replace('\r','').replace('\n','').replace('\t','')

            result = re.compile('(<div id="*\d*.+?</div>)').findall(result)

            title = cleantitle.movie(title)
            years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)]

            result = [(re.compile('id="*(\d*)"*').findall(i), re.compile('<h4>(.+?)</h4>').findall(i), re.compile('Releasedatum *: *(\d{4})').findall(i)) for i in result]
            result = [(i[0][0], i[1][0], i[2][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0]
            result = [(i[0], i[1].rsplit('</span>')[0].split('>')[-1].strip(), i[2]) for i in result]
            result = [i for i in result if title == cleantitle.movie(i[1])]
            result = [i[0] for i in result if any(x in i[2] for x in years)][0]

            try: url = re.compile('//.+?(/.+)').findall(result)[0]
            except: url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Esempio n. 5
0
    def get_episode(self, url, imdb, tvdb, title, date, season, episode):
        try:
            if url == None: return

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)
            if not "menu season-tabs" in result:
                cookie = client.source(self.sign_link,
                                       post=self.key_link,
                                       output='cookie')
                result = client.source(url, cookie=cookie)

            result = client.parseDOM(
                result,
                "a",
                ret="data-href",
                attrs={"href": "#%01d-%01d" % (int(season), int(episode))})[0]

            try:
                url = re.compile('//.+?(/.+)').findall(result)[0]
            except:
                url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Esempio n. 6
0
    def get_show(self, imdb, tvdb, show, show_alt, year):


        # import logging
        # LOG_FILENAME = '/home/keeganmccallum/test.log'
        # logging.basicConfig(filename=LOG_FILENAME,level=logging.DEBUG)

        # logging.debug(','.join([imdb, tvdb, show, show_alt, year]))

        try:
            result = client.source(self.base_link)
            if not "'index show'" in result:
                cookie = client.source(self.sign_link, post=self.key_link, output='cookie')
                result = client.source(self.base_link, cookie=cookie)

            result = client.parseDOM(result, "div", attrs = { "class": "index show" })
            result = [(client.parseDOM(i, "a", attrs = { "class": "name" })[0], client.parseDOM(i, "span", attrs = { "class": "value" })[0], client.parseDOM(i, "a", ret="href")[0]) for i in result]

            shows = [cleantitle.tv(show), cleantitle.tv(show_alt)]
            years = [str(year), str(int(year)+1), str(int(year)-1)]
            result = [i for i in result if any(x in i[1] for x in years)]
            result = [i[2] for i in result if any(x == cleantitle.tv(i[0]) for x in shows)][0]

            try: url = re.compile('//.+?(/.+)').findall(result)[0]
            except: url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Esempio n. 7
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, self.footer_link % url)
            result = client.source(url)

            url = client.parseDOM(result, "a", ret="href")
            url = [i for i in url if 'play/' in i][0]
            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)

            url = client.parseDOM(result,
                                  "source",
                                  ret="src",
                                  attrs={"type": "video/.+?"})[0]
            if '1080p' in url: quality = '1080p'
            else: quality = 'HD'

            sources.append({
                'source': 'Sweflix',
                'quality': quality,
                'provider': 'Sweflix',
                'url': url
            })
            return sources
        except:
            return sources
Esempio n. 8
0
    def get_movie(self, imdb, title, year):
        try:
            query = self.search_link % (urllib.quote_plus(title))
            query = urlparse.urljoin(self.base_link, query)

            result = client.source(query)
            result = json.loads(result)
            result = result['data']['films']

            title = cleantitle.movie(title)
            years = [
                '(%s)' % str(year),
                '(%s)' % str(int(year) + 1),
                '(%s)' % str(int(year) - 1)
            ]
            result = [(i['id'], i['title'].encode('utf-8')) for i in result]
            result = [i for i in result if title == cleantitle.movie(i[1])][:2]
            result = [(i[0], self.base_link + self.detail_link % i[0])
                      for i in result]
            result = [(i[0], client.source(i[1])) for i in result]
            result = [(i[0], json.loads(i[1])['data']['state'])
                      for i in result]
            result = [i[0] for i in result if any(x in i[1] for x in years)][0]

            url = str(result)
            url = url.encode('utf-8')
            return url
        except:
            return
Esempio n. 9
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)
            u = client.parseDOM(result,
                                "meta",
                                ret="content",
                                attrs={"property": "og:url"})[0]
            links = re.compile(
                '<a href="([?]link_id=.+?)".+?>(.+?)</a>').findall(result)
            links = [u + i[0] for i in links if 'server' in i[1].lower()]

            for u in links[:3]:
                try:
                    result = client.source(u)

                    url = client.parseDOM(result,
                                          "source",
                                          ret="src",
                                          attrs={"type": "video/.+?"})
                    if len(url) > 0:
                        i = googleplus.tag(url[0])[0]
                        sources.append({
                            'source': 'GVideo',
                            'quality': i['quality'],
                            'provider': 'Clickplay',
                            'url': i['url']
                        })

                    url = re.compile(
                        'proxy[.]link=clickplay[*](.+?)"').findall(result)[-1]
                    url = gkplugins.decrypter(198, 128).decrypt(
                        url,
                        base64.urlsafe_b64decode(
                            'bW5pcUpUcUJVOFozS1FVZWpTb00='),
                        'ECB').split('\0')[0]

                    if 'google' in url: source = 'GVideo'
                    elif 'vk.com' in url: source = 'VK'
                    else: raise Exception()

                    url = resolvers.request(url)
                    for i in url:
                        sources.append({
                            'source': source,
                            'quality': i['quality'],
                            'provider': 'Clickplay',
                            'url': i['url']
                        })
                except:
                    pass

            return sources
        except:
            return sources
Esempio n. 10
0
    def get_show(self, imdb, tvdb, show, show_alt, year):
        try:
            query = self.search_link % (urllib.quote_plus(show))
            query = urlparse.urljoin(self.base_link, query)

            result = client.source(query)
            result = json.loads(result)
            result = result['data']['films']

            shows = [cleantitle.tv(show), cleantitle.tv(show_alt)]
            years = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1)
            ]
            result = [(i['id'], i['title'].encode('utf-8')) for i in result]
            result = [
                i for i in result
                if any(x == cleantitle.tv(i[1]) for x in shows)
            ][:2]
            result = [(i[0], self.base_link + self.detail_link % i[0])
                      for i in result]
            result = [(i[0], client.source(i[1])) for i in result]
            result = [(i[0], json.loads(i[1])['data']['state'])
                      for i in result]
            result = [i[0] for i in result if any(x in i[1] for x in years)][0]

            url = str(result)
            url = url.encode('utf-8')
            return url
        except:
            return
Esempio n. 11
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)

            video_id = re.compile('video_id *= *[\'|\"](.+?)[\'|\"]').findall(result)[0]
            post = urllib.urlencode({'video_id': video_id})

            result = client.source(urlparse.urljoin(self.base_link, self.info_link), post=post)

            u = [i for i in result.split('&') if 'google' in i][0]
            u = urllib.unquote_plus(u)
            u = [urllib.unquote_plus(i.split('|')[-1]) for i in u.split(',')]
            u = [googleplus.tag(i)[0] for i in u]
            u = [i for i in u if i['quality'] in ['1080p', 'HD']]

            for i in u: sources.append({'source': 'GVideo', 'quality': i['quality'], 'provider': 'Afdah', 'url': i['url']})

            return sources
        except:
            return sources
Esempio n. 12
0
    def get_episode(self, url, imdb, tvdb, title, date, season, episode):
        try:
            if url == None:
                return

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)
            if not "menu season-tabs" in result:
                cookie = client.source(self.sign_link, post=self.key_link, output="cookie")
                result = client.source(url, cookie=cookie)

            result = client.parseDOM(
                result, "a", ret="data-href", attrs={"href": "#%01d-%01d" % (int(season), int(episode))}
            )[0]

            try:
                url = re.compile("//.+?(/.+)").findall(result)[0]
            except:
                url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode("utf-8")
            return url
        except:
            return
Esempio n. 13
0
    def get_show(self, imdb, tvdb, show, show_alt, year):
        try:
            result = client.source(self.base_link)
            if not "'index show'" in result:
                cookie = client.source(self.sign_link,
                                       post=self.key_link,
                                       output='cookie')
                result = client.source(self.base_link, cookie=cookie)

            result = client.parseDOM(result,
                                     "div",
                                     attrs={"class": "index show"})
            result = [(client.parseDOM(i, "a", attrs={"class": "name"})[0],
                       client.parseDOM(i, "span", attrs={"class": "value"})[0],
                       client.parseDOM(i, "a", ret="href")[0]) for i in result]

            shows = [cleantitle.tv(show), cleantitle.tv(show_alt)]
            years = [str(year), str(int(year) + 1), str(int(year) - 1)]
            result = [i for i in result if any(x in i[1] for x in years)]
            result = [
                i[2] for i in result
                if any(x == cleantitle.tv(i[0]) for x in shows)
            ][0]

            try:
                url = re.compile('//.+?(/.+)').findall(result)[0]
            except:
                url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Esempio n. 14
0
    def get_movie(self, imdb, title, year):
        try:
            query = self.search_link % urllib.quote_plus(title)

            result = ''
            links = [self.link_1, self.link_2, self.link_3]
            for base_link in links:
                result = client.source(urlparse.urljoin(base_link, query),
                                       headers=self.headers)
                if 'movie_table' in str(result): break

            result = result.decode('iso-8859-1').encode('utf-8')
            result = client.parseDOM(result,
                                     "div",
                                     attrs={"class": "movie_table"})

            title = cleantitle.movie(title)
            years = [
                '(%s)' % str(year),
                '(%s)' % str(int(year) + 1),
                '(%s)' % str(int(year) - 1)
            ]
            result = [(client.parseDOM(i, "a", ret="href")[0],
                       client.parseDOM(i, "a", ret="title")[1])
                      for i in result]
            result = [i for i in result if any(x in i[1] for x in years)]

            result = [(client.replaceHTMLCodes(i[0]), i[1]) for i in result]
            try:
                result = [
                    (urlparse.parse_qs(urlparse.urlparse(i[0]).query)['u'][0],
                     i[1]) for i in result
                ]
            except:
                pass
            result = [(urlparse.urlparse(i[0]).path, i[1]) for i in result]

            match = [i[0] for i in result if title == cleantitle.movie(i[1])]

            match2 = [i[0] for i in result]
            match2 = [x for y, x in enumerate(match2) if x not in match2[:y]]
            if match2 == []: return

            for i in match2[:10]:
                try:
                    if len(match) > 0:
                        url = match[0]
                        break
                    result = client.source(base_link + i, headers=self.headers)
                    if str('tt' + imdb) in str(result):
                        url = i
                        break
                except:
                    pass

            url = url.encode('utf-8')
            return url
        except:
            return
Esempio n. 15
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            content = re.compile('(.+?)\sS\d*E\d*$').findall(url)

            if len(content) == 0:
                query = urlparse.urljoin(self.base_link,
                                         self.source_link % url)

                result = client.source(query)
                result = json.loads(result)
                result = result['films'][0]['film_link']
            else:
                url, ep = re.compile('(.+?)\s(S\d*E\d*)$').findall(url)[0]
                query = urlparse.urljoin(self.base_link,
                                         self.source_link % url)

                result = client.source(query)
                result = json.loads(result)
                result = result['films']
                result = [
                    i['film_link'] for i in result
                    if ep in i['film_name'].encode('utf-8').upper()
                ][0]

            result = re.compile('(.+?)#(\d*)#').findall(result)

            try:
                url = [i[0] for i in result if str(i[1]) == '1080'][0]
                sources.append({
                    'source': 'GVideo',
                    'quality': '1080p',
                    'provider': 'GVcenter',
                    'url': url
                })
            except:
                pass
            try:
                url = [i[0] for i in result if str(i[1]) == '720'][0]
                sources.append({
                    'source': 'GVideo',
                    'quality': 'HD',
                    'provider': 'GVcenter',
                    'url': url
                })
            except:
                pass

            return sources
        except:
            return sources
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None:
                return sources

            query = re.compile("(\d*)").findall(url)[0]
            query = urlparse.urljoin(self.base_link, self.content_link % query)
            query += self.__extra()

            result = client.source(query)
            result = json.loads(result)
            result = self.__decrypt(self.data_key, result["data"])
            result = json.loads(result)
            result = result["listvideos"]

            content = re.compile("(.+?)\sS\d*E\d*$").findall(url)

            if len(content) == 0:
                links = [i["film_id"] for i in result]
            else:
                ep = re.compile(".+?\s(S\d*E\d*)$").findall(url)[0]
                links = [i["film_id"] for i in result if ep in i["film_name"].encode("utf-8").upper()]

            for l in links[:3]:
                try:
                    url = urlparse.urljoin(self.base_link, self.source_link % l)
                    url += self.__extra()

                    url = client.source(url)
                    url = json.loads(url)

                    url = self.__decrypt(self.data_key, url["data"])
                    url = json.loads(url)["videos"]
                    url = [self.__decrypt(self.film_key, i["film_link"]) for i in url]

                    url = "#".join(url)
                    url = url.split("#")
                    url = [i for i in url if "http" in i and "google" in i]
                    url = [googleplus.tag(i)[0] for i in url]

                    for i in url:
                        sources.append(
                            {"source": "GVideo", "quality": i["quality"], "provider": "GVcenter", "url": i["url"]}
                        )
                except:
                    pass

            return sources
        except:
            return sources
Esempio n. 17
0
    def get_movie(self, imdb, title, year):
        try:
            query = self.search_link % urllib.quote_plus(title)

            result = ""
            links = [self.link_1, self.link_2, self.link_3]
            for base_link in links:
                result = client.source(urlparse.urljoin(base_link, query), headers=self.headers)
                if "movie_table" in str(result):
                    break

            result = result.decode("iso-8859-1").encode("utf-8")
            result = client.parseDOM(result, "div", attrs={"class": "movie_table"})

            title = cleantitle.movie(title)
            years = ["(%s)" % str(year), "(%s)" % str(int(year) + 1), "(%s)" % str(int(year) - 1)]
            result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a", ret="title")[1]) for i in result]
            result = [i for i in result if any(x in i[1] for x in years)]

            result = [(client.replaceHTMLCodes(i[0]), i[1]) for i in result]
            try:
                result = [(urlparse.parse_qs(urlparse.urlparse(i[0]).query)["u"][0], i[1]) for i in result]
            except:
                pass
            result = [(urlparse.urlparse(i[0]).path, i[1]) for i in result]

            match = [i[0] for i in result if title == cleantitle.movie(i[1])]

            match2 = [i[0] for i in result]
            match2 = [x for y, x in enumerate(match2) if x not in match2[:y]]
            if match2 == []:
                return

            for i in match2[:10]:
                try:
                    if len(match) > 0:
                        url = match[0]
                        break
                    result = client.source(base_link + i, headers=self.headers)
                    if str("tt" + imdb) in str(result):
                        url = i
                        break
                except:
                    pass

            url = url.encode("utf-8")
            return url
        except:
            return
Esempio n. 18
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            content = re.compile('(.+?)\sS\d*E\d*$').findall(url)

            if len(content) == 0:
                query = urlparse.urljoin(self.base_link, self.detail_link % url)

                result = client.source(query)
                result = json.loads(result)
                result = result['data']['chapters'][0]['id']
            else:
                url, s, e = re.compile('(.+?)\sS(\d*)E(\d*)$').findall(url)[0]
                ep = 'S%02dE%03d' % (int(s), int(e))

                query = urlparse.urljoin(self.base_link, self.detail_link % url)

                result = client.source(query)
                result = json.loads(result)
                result = result['data']['chapters']
                result = [i['id'] for i in result if ep in i['title'].encode('utf-8').upper()][0]

            url = urlparse.urljoin(self.base_link, self.stream_link % result)
            result = client.source(url)
            result = json.loads(result)['data']

            for i in result:
                try:
                    if not i['server'] == 'ggvideo': raise Exception()

                    quality = i['quality'].replace('720p', 'HD')
                    if not quality in ['1080p', 'HD']: quality = 'SD'

                    key = base64.b64decode('cXdlcnR5dWlvcGFzZGZnaGprbHp4YzEyMzQ1Njc4OTA=')
                    decrypter = pyaes.Decrypter(pyaes.AESModeOfOperationCBC(key, '\0' * 16))
                    url = base64.decodestring(i['stream'])
                    url = decrypter.feed(url) + decrypter.feed()

                    sources.append({'source': 'GVideo', 'quality': quality, 'provider': 'PlayBox', 'url': url})
                except:
                    pass

            return sources
        except:
            return sources
Esempio n. 19
0
    def get_movie(self, imdb, title, year):
        try:
            query = urlparse.urljoin(self.base_link, self.moviesearch_link + urllib.quote_plus(title))

            result = cloudflare.source(query)
            if result == None: result = client.source(self.agent_link + urllib.quote_plus(query))

            result = result.replace('> ', '>').replace(' <', '<')

            r = client.parseDOM(result, "li", attrs = { "class": "first element.+?" })
            r += client.parseDOM(result, "li", attrs = { "class": "element.+?" })

            title = cleantitle.movie(title)
            years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]
            result = [(client.parseDOM(i, "a", ret="href"), re.compile('>(.+?\(\d{4}\))<').findall(i)) for i in r]
            result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
            result = [(i[0], i[1].split('>')[-1]) for i in result]
            result = [i for i in result if title == cleantitle.movie(i[1])]
            result = [i[0] for i in result if any(x in i[1] for x in years)][0]

            url = client.replaceHTMLCodes(result)
            try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
            except: pass
            url = urlparse.urlparse(url).path
            url = url.encode('utf-8')
            return url
        except:
            return
Esempio n. 20
0
    def get_movie(self, imdb, title, year):
        try:
            query = self.search_link % (urllib.quote_plus(title))
            query = urlparse.urljoin(self.base_link, query)

            result = client.source(query)
            result = client.parseDOM(result,
                                     "div",
                                     attrs={"class": "hover-group.+?"})

            title = cleantitle.movie(title)
            years = [
                '>%s<' % str(year),
                '>%s<' % str(int(year) + 1),
                '>%s<' % str(int(year) - 1)
            ]
            result = [(client.parseDOM(i, "a", ret="data-movieid")[0],
                       client.parseDOM(i, "h5")[-1], client.parseDOM(i,
                                                                     "p")[-1])
                      for i in result]
            result = [i for i in result if title == cleantitle.movie(i[1])]
            result = [i[0] for i in result if any(x in i[2] for x in years)][0]

            try:
                url = re.compile('//.+?(/.+)').findall(result)[0]
            except:
                url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Esempio n. 21
0
    def get_show(self, imdb, tvdb, show, show_alt, year):
        try:
            query = re.sub("^THE\s+|^A\s+", "", show.strip().upper())[0]
            if not query.isalpha():
                query = "1"
            query = self.tvsearch_link % query

            result = ""
            links = [self.link_1, self.link_2, self.link_3]
            for base_link in links:
                result = client.source(urlparse.urljoin(base_link, query), headers=self.headers)
                if "Donate" in str(result):
                    break

            result = result.decode("iso-8859-1").encode("utf-8")
            result = re.compile("id=%s>.+?href=(.+?)>" % imdb).findall(result)[0]

            url = client.replaceHTMLCodes(result)
            try:
                url = urlparse.parse_qs(urlparse.urlparse(url).query)["u"][0]
            except:
                pass
            url = "%s?%s" % (urlparse.urlparse(url).path, urlparse.urlparse(url).query)
            url = url.encode("utf-8")
            return url
        except:
            return
Esempio n. 22
0
    def get_episode(self, url, imdb, tvdb, title, date, season, episode):
        try:
            if url == None: return

            query = self.tvbase_link + self.index_link
            post = urllib.urlencode({'a': 'retrieve', 'c': 'result', 'p': '{"KeyWord":"%s","Page":"1","NextToken":""}' % url})

            result = client.source(query, post=post)
            result = result.decode('iso-8859-1').encode('utf-8')
            result = client.parseDOM(result, "tr")

            show = cleantitle.tv(url)
            season = '%01d' % int(season)
            episode = '%02d' % int(episode)
            result = [client.parseDOM(i, "h1")[0] for i in result]
            result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a")[0]) for i in result]
            result = [(i[0], re.sub('\sSeason(|\s)\d*.+', '', i[1]), re.compile('\sSeason *(\d*) *').findall(i[1])[0]) for i in result]
            result = [i for i in result if show == cleantitle.tv(i[1])]
            result = [i[0] for i in result if season == i[2]][0]

            url = result.split('v=', 1)[-1]
            url = '%s|%s' % (url, episode)
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Esempio n. 23
0
    def get_show(self, imdb, tvdb, show, show_alt, year):
        try:
            query = re.sub('^THE\s+|^A\s+', '', show.strip().upper())[0]
            if not query.isalpha(): query = '1'
            query = self.tvsearch_link % query

            result = ''
            links = [self.link_1, self.link_2, self.link_3]
            for base_link in links:
                result = client.source(urlparse.urljoin(base_link, query),
                                       headers=self.headers)
                if 'Donate' in str(result): break

            result = result.decode('iso-8859-1').encode('utf-8')
            result = re.compile('id=%s>.+?href=(.+?)>' %
                                imdb).findall(result)[0]

            url = client.replaceHTMLCodes(result)
            try:
                url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
            except:
                pass
            url = '%s?%s' % (urlparse.urlparse(url).path,
                             urlparse.urlparse(url).query)
            url = url.encode('utf-8')
            return url
        except:
            return
Esempio n. 24
0
    def get_show(self, imdb, tvdb, show, show_alt, year):
        try:
            query = urlparse.urljoin(
                self.base_link, self.search_link % (urllib.quote_plus(show)))

            result = client.source(query)
            result = json.loads(result)
            result = result['categories']

            shows = [cleantitle.tv(show), cleantitle.tv(show_alt)]
            years = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1)
            ]
            result = [(i['catalog_id'], i['catalog_name'].encode('utf-8'))
                      for i in result]
            result = [(i[0], re.compile('(.+?) [(](.+?)[)]$').findall(i[1])[0])
                      for i in result]
            result = [(i[0], i[1][0],
                       re.compile('(\d{4})').findall(i[1][1])[0])
                      for i in result]
            result = [
                i for i in result
                if any(x == cleantitle.tv(i[1]) for x in shows)
            ]
            result = [i[0] for i in result if any(x in i[2] for x in years)][0]

            url = str(result)
            url = url.encode('utf-8')
            return url
        except:
            return
Esempio n. 25
0
    def get_episode(self, url, imdb, tvdb, title, date, season, episode):
        try:
            if url == None: return

            title = url
            hdlr = 'S%02dE%02d' % (int(season), int(episode))

            query = self.search_link % (urllib.quote_plus('%s "%s"' % (title, hdlr)))
            query = urlparse.urljoin(self.tvbase_link, query)

            result = client.source(query)
            result = client.parseDOM(result, "header", attrs = { "class": "post-title" })

            title = cleantitle.tv(title)
            result = [(client.parseDOM(i, "a", ret="href"), client.parseDOM(i, "a")) for i in result]
            result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
            result = [(i[0], re.compile('(.+?) (S\d*E\d*)').findall(i[1])) for i in result]
            result = [(i[0], i[1][0][0], i[1][0][1]) for i in result if len(i[1]) > 0]
            result = [i for i in result if title == cleantitle.tv(i[1])]
            result = [i[0] for i in result if hdlr == i[2]][0]

            url = result.replace(self.tvbase_link, '')
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Esempio n. 26
0
    def get_movie(self, imdb, title, year):
        try:
            query = urlparse.urljoin(
                self.base_link, self.search_link % (urllib.quote_plus(title)))
            query += self.__extra()

            result = client.source(query)
            result = json.loads(result)
            result = self.__decrypt(self.data_key, result['data'])
            result = json.loads(result)
            result = result['categories']

            title = cleantitle.movie(title)
            years = [
                '(%s)' % str(year),
                '(%s)' % str(int(year) + 1),
                '(%s)' % str(int(year) - 1)
            ]
            result = [(i['catalog_id'], i['catalog_name'].encode('utf-8'),
                       str(i['type_film'])) for i in result]
            result = [i for i in result if i[2] == '0']
            result = [i for i in result if title == cleantitle.movie(i[1])]
            result = [i[0] for i in result if any(x in i[1] for x in years)][0]

            url = str(result)
            url = url.encode('utf-8')
            return url
        except:
            return
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)
            result = client.source(url)

            fmt = re.compile('<strong>Quality</strong>.+?<strong>(.+?)</strong>').findall(result)
            if len(fmt) > 0: fmt = (' '.join((fmt[0].decode("utf-8").lower().strip()).split())).split(' ')

            if any(x in ['dvdscr', 'r5', 'r6'] for x in fmt): quality = 'SCR'
            elif any(x in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'ts'] for x in fmt): quality = 'CAM'
            else: quality = 'HD'

            result = client.parseDOM(result, "div", attrs = { "class": "video-embed" })[0]
            url = re.compile('hashkey=(.+?)[\'|\"]').findall(result)
            url += re.compile('[?]ref=(.+?)[\'|\"]').findall(result)
            url = self.videomega_link % url[0]

            url = videomega.resolve(url)
            if url == None: raise Exception()

            sources.append({'source': 'Videomega', 'quality': quality, 'provider': 'Onlinemovies', 'url': url})

            return sources
        except:
            return sources
Esempio n. 28
0
    def get_show(self, imdb, tvdb, show, show_alt, year):
        try:
            query = self.search_link
            post = urllib.urlencode({'searchquery': show, 'searchin': '2'})

            result = ''
            links = [self.link_1, self.link_3]
            for base_link in links:
                result = client.source(urlparse.urljoin(base_link, query), post=post, headers=self.headers)
                if 'widget search-page' in str(result): break

            result = client.parseDOM(result, "div", attrs = { "class": "widget search-page" })[0]
            result = client.parseDOM(result, "td")

            shows = [cleantitle.tv(show), cleantitle.tv(show_alt)]
            years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]
            result = [(client.parseDOM(i, "a", ret="href")[-1], client.parseDOM(i, "a")[-1]) for i in result]
            result = [i for i in result if any(x == cleantitle.tv(i[1]) for x in shows)]
            result = [i[0] for i in result if any(x in i[1] for x in years)][0]

            url = client.replaceHTMLCodes(result)
            try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
            except: pass
            url = urlparse.urlparse(url).path
            url = url.encode('utf-8')
            return url
        except:
            return
Esempio n. 29
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)

            quality = re.compile('Quality *: *(.+)').findall(result)
            quality = 'SD' if len(quality) == 0 else quality[0]
            quality = re.sub('<.+?>', '', quality).strip().upper()

            if quality == 'SD': quality = 'SD'
            elif quality == 'HD': quality = 'HD'
            else: quality = 'CAM'

            url = client.parseDOM(result, "iframe", ret="src")
            url = [i for i in url if 'filepup' in i][0]
            url = filepup.resolve(url)
            if url == None: raise Exception()

            sources.append({'source': 'Filepup', 'quality': quality, 'provider': 'Movienight', 'url': url})

            return sources
        except:
            return sources
Esempio n. 30
0
    def get_movie(self, imdb, title, year):
        try:
            query = self.search_link % (urllib.quote_plus(title))
            query = urlparse.urljoin(self.base_link, query)

            result = client.source(query)
            result = client.parseDOM(result, "div", attrs = { "id": "post-.+?" })

            title = cleantitle.movie(title)
            years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)]

            result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a", ret="title")[0], client.parseDOM(i, "div", attrs = { "class": "status status-year" }), client.parseDOM(i, "div", attrs = { "class": "mark-8" })) for i in result]
            result = [(i[0], i[1], i[2][0], i[3]) for i in result if len(i[2]) > 0]
            result = [(i[0], i[1], i[2], i[3], re.compile('Season (\d*)$').findall(i[1])) for i in result]
            result = [(i[0], i[1], i[2], i[3]) for i in result if len(i[4]) == 0]
            result = [(i[0], i[1], i[2]) for i in result if len(i[3]) == 0]
            result = [i for i in result if title == cleantitle.movie(i[1])]
            result = [i[0] for i in result if any(x in i[2] for x in years)][0]

            try: url = re.compile('//.+?(/.+)').findall(result)[0]
            except: url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Esempio n. 31
0
    def get_movie(self, imdb, title, year):
        try:
            query = self.search_link % (urllib.quote_plus(title))
            query = urlparse.urljoin(self.base_link, query)

            result = client.source(query)
            result = client.parseDOM(result,
                                     "ul",
                                     attrs={"class": "listing-videos.+?"})[0]
            result = client.parseDOM(result, "li", attrs={"class": ".+?"})

            title = cleantitle.movie(title)
            years = [
                '(%s)' % str(year),
                '(%s)' % str(int(year) + 1),
                '(%s)' % str(int(year) - 1)
            ]
            result = [(client.parseDOM(i, "a", ret="href")[0],
                       client.parseDOM(i, "a", ret="title")[0])
                      for i in result]
            result = [i for i in result if title == cleantitle.movie(i[1])]
            result = [i[0] for i in result if any(x in i[1] for x in years)][0]

            try:
                url = re.compile('//.+?(/.+)').findall(result)[0]
            except:
                url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Esempio n. 32
0
    def get_movie(self, imdb, title, year):
        try:
            query = self.search_link % (urllib.quote_plus(title))
            query = urlparse.urljoin(self.base_link, query)

            result = client.source(query)
            result = client.parseDOM(result, "div", attrs = { "class": "home_post_cont.+?" })

            title = cleantitle.movie(title)
            years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]

            result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "img", ret="title")[0]) for i in result]
            result = [(i[0], client.replaceHTMLCodes(i[1])) for i in result]
            result = [(i[0], client.parseDOM(i[1], "a")) for i in result]
            result = [(i[0], i[1][0]) for i in result if len(i[1]) > 0]
            result = [i for i in result if title == cleantitle.movie(i[1])]
            result = [i[0] for i in result if any(x in i[1] for x in years)][0]

            try: url = re.compile('//.+?(/.+)').findall(result)[0]
            except: url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Esempio n. 33
0
    def get_episode(self, url, imdb, tvdb, title, date, season, episode):
        try:
            if url == None: return

            season = '%01d' % int(season)
            episode = '%01d' % int(episode)

            query = '%s "Season %s" "Episode %s"' % (url, season, episode)
            query = urlparse.urljoin(self.base_link, self.tvsearch_link + urllib.quote_plus(query))

            result = cloudflare.source(query)
            if result == None: result = client.source(self.agent_link + urllib.quote_plus(query))

            result = result.replace('> ', '>').replace(' <', '<')

            r = client.parseDOM(result, "li", attrs = { "class": "first element.+?" })
            r += client.parseDOM(result, "li", attrs = { "class": "element.+?" })

            show = cleantitle.tv(url)
            result = [(client.parseDOM(i, "a", ret="href"), re.compile('>(.+?): Season (\d*), Episode (\d*)<').findall(i)) for i in r]
            result = [(i[0][0], i[1][-1][0].split('>')[-1], i[1][-1][1], i[1][-1][2]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
            result = [i for i in result if season == '%01d' % int(i[2]) and episode == '%01d' % int(i[3])]
            result = [i[0] for i in result if show == cleantitle.tv(i[1])][0]

            url = client.replaceHTMLCodes(result)
            try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
            except: pass
            url = urlparse.urlparse(url).path
            url = url.encode('utf-8')
            return url
        except:
            return
Esempio n. 34
0
    def get_episode(self, url, imdb, tvdb, title, date, season, episode):
        try:
            if url == None: return

            title = url
            hdlr = 'S%02dE%02d' % (int(season), int(episode))

            query = self.search_link % (urllib.quote_plus('%s "%s"' %
                                                          (title, hdlr)))
            query = urlparse.urljoin(self.tvbase_link, query)

            result = client.source(query)
            result = client.parseDOM(result,
                                     "header",
                                     attrs={"class": "post-title"})

            title = cleantitle.tv(title)
            result = [(client.parseDOM(i, "a",
                                       ret="href"), client.parseDOM(i, "a"))
                      for i in result]
            result = [(i[0][0], i[1][0]) for i in result
                      if len(i[0]) > 0 and len(i[1]) > 0]
            result = [(i[0], re.compile('(.+?) (S\d*E\d*)').findall(i[1]))
                      for i in result]
            result = [(i[0], i[1][0][0], i[1][0][1]) for i in result
                      if len(i[1]) > 0]
            result = [i for i in result if title == cleantitle.tv(i[1])]
            result = [i[0] for i in result if hdlr == i[2]][0]

            url = result.replace(self.tvbase_link, '')
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Esempio n. 35
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = cloudflare.source(url)
            if result == None: result = client.source(self.agent_link + urllib.quote_plus(url))

            result = client.parseDOM(result, "a", ret="href")

            u = [i for i in result if '.php' in i and  'i=' in urllib.unquote_plus(i) ][0]
            u = client.replaceHTMLCodes(u)
            try: u = urlparse.parse_qs(urlparse.urlparse(u).query)['u'][0]
            except: pass
            u = urlparse.parse_qs(urlparse.urlparse(u).query)['i'][0]
 
            url = gkplugins.decrypter(198,128).decrypt(u,base64.urlsafe_b64decode('b3F5czkzZEswc2FEY3pRNW9NSTE='),'ECB').split('\0')[0]
            url = resolvers.request(url)
 
            if not type(url) == list: raise Exception()

            for i in url: sources.append({'source': 'GVideo', 'quality': i['quality'], 'provider': 'YIFYstream', 'url': i['url']})

            return sources
        except:
            return sources
Esempio n. 36
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)
            headers = {'User-Agent': 'android-async-http/1.4.1 (http://loopj.com/android-async-http)'}

            par = urlparse.parse_qs(urlparse.urlparse(url).query)
            try: num = int(par['h'][0]) + int(par['u'][0]) + int(par['y'][0])
            except: num = int(par['id'][0]) + 537

            result = client.source(url, headers=headers)
            result = json.loads(result)
            try: result = result['langs']
            except: pass
            i = [i for i in result if i['lang'] in ['en', '']][0]

            url = (str(int(i['apple']) + num), str(int(i['google']) + num), i['microsoft'])
            url = vk.resolve(self.vk_link % url)

            for i in url: sources.append({'source': 'VK', 'quality': i['quality'], 'provider': 'VKBox', 'url': i['url']})

            return sources
        except:
            return sources
Esempio n. 37
0
    def get_show(self, imdb, tvdb, show, show_alt, year):
        try:
            result = ''
            links = [self.link_1, self.link_2, self.link_3]
            for base_link in links:
                result = client.source(urlparse.urljoin(base_link, self.key_link), headers=self.headers)
                if 'searchform' in str(result): break

            key = client.parseDOM(result, "input", ret="value", attrs = { "name": "key" })[0]
            query = self.tvsearch_link % (urllib.quote_plus(re.sub('\'', '', show)), key)

            result = client.source(urlparse.urljoin(base_link, query), headers=self.headers)
            result = result.decode('iso-8859-1').encode('utf-8')
            result = client.parseDOM(result, "div", attrs = { "class": "index_item.+?" })

            shows = ['watch' + cleantitle.tv(show), 'watch' + cleantitle.tv(show_alt)]
            years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]
            result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a", ret="title")[0]) for i in result]
            result = [i for i in result if any(x in i[1] for x in years)]

            result = [(client.replaceHTMLCodes(i[0]), i[1]) for i in result]
            try: result = [(urlparse.parse_qs(urlparse.urlparse(i[0]).query)['u'][0], i[1]) for i in result]
            except: pass
            result = [(urlparse.urlparse(i[0]).path, i[1]) for i in result]

            match = [i[0] for i in result if any(x == cleantitle.tv(i[1]) for x in shows)]

            match2 = [i[0] for i in result]
            match2 = [x for y,x in enumerate(match2) if x not in match2[:y]]
            if match2 == []: return

            for i in match2[:5]:
                try:
                    if len(match) > 0:
                        url = match[0]
                        break
                    result = client.source(base_link + i, headers=self.headers)
                    if str('tt' + imdb) in str(result):
                        url = i
                        break
                except:
                    pass

            url = url.encode('utf-8')
            return url
        except:
            return
Esempio n. 38
0
    def get_show(self, imdb, tvdb, show, show_alt, year):
        try:
            query = self.search_link % (str(int(year)-1), str(int(year)+1), urllib.quote_plus(show))

            result = ''
            links = [self.link_1, self.link_2, self.link_3]
            for base_link in links:
                result = client.source(urlparse.urljoin(base_link, query), headers=self.headers)
                if 'episode-summary' in str(result): break

            result = result.decode('iso-8859-1').encode('utf-8')
            result = client.parseDOM(result, "div", attrs = { "class": "episode-summary" })[0]
            result = client.parseDOM(result, "tr")

            shows = [cleantitle.tv(show), cleantitle.tv(show_alt)]
            years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]
            result = [(re.compile('href=[\'|\"|\s|\<]*(.+?)[\'|\"|\s|\>]').findall(i)[0], client.parseDOM(i, "a")[-1]) for i in result]
            result = [(i[0], re.sub('<.+?>|</.+?>','', i[1])) for i in result]
            result = [i for i in result if any(x in i[1] for x in years)]

            result = [(client.replaceHTMLCodes(i[0]), i[1]) for i in result]
            try: result = [(urlparse.parse_qs(urlparse.urlparse(i[0]).query)['u'][0], i[1]) for i in result]
            except: pass
            result = [(urlparse.urlparse(i[0]).path, i[1]) for i in result]

            match = [i[0] for i in result if any(x == cleantitle.tv(i[1]) for x in shows)]

            match2 = [i[0] for i in result]
            match2 = [x for y,x in enumerate(match2) if x not in match2[:y]]
            if match2 == []: return

            for i in match2[:5]:
                try:
                    if len(match) > 0:
                        url = match[0]
                        break
                    result = client.source(base_link + i, headers=self.headers)
                    if str('tt' + imdb) in str(result):
                        url = i
                        break
                except:
                    pass

            url = url.encode('utf-8')
            return url
        except:
            return
Esempio n. 39
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None:
                return sources

            result = ""
            links = [self.link_1, self.link_2, self.link_3]
            for base_link in links:
                result = client.source(urlparse.urljoin(base_link, url), headers=self.headers)
                if "link_name" in str(result):
                    break

            result = result.decode("iso-8859-1").encode("utf-8")
            result = result.replace("\n", "")

            quality = re.compile(">Links - Quality(.+?)<").findall(result)[0]
            quality = quality.strip()
            if quality == "CAM" or quality == "TS":
                quality = "CAM"
            elif quality == "SCREENER":
                quality = "SCR"
            else:
                quality = "SD"

            links = client.parseDOM(result, "div", attrs={"id": "links"})[0]
            links = client.parseDOM(links, "ul")

            for i in links:
                try:
                    host = client.parseDOM(i, "li", attrs={"id": "link_name"})[-1]
                    try:
                        host = client.parseDOM(host, "span", attrs={"class": "google-src-text"})[0]
                    except:
                        pass
                    host = host.strip().lower()
                    if not host in hostDict:
                        raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode("utf-8")

                    url = client.parseDOM(i, "a", ret="href")[0]
                    url = client.replaceHTMLCodes(url)
                    try:
                        url = urlparse.parse_qs(urlparse.urlparse(url).query)["u"][0]
                    except:
                        pass
                    if not url.startswith("http"):
                        url = urlparse.urljoin(self.base_link, url)
                    url = url.encode("utf-8")

                    sources.append({"source": host, "quality": quality, "provider": "Movie25", "url": url})
                except:
                    pass

            return sources
        except:
            return sources
Esempio n. 40
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link , url)
            result = client.source(url)

            url = client.parseDOM(result, "div", attrs = { "class": "fullwindowlink" })[0]
            url = client.parseDOM(url, "a", ret="href")[0]
            url = urlparse.urljoin(self.base_link , url)

            result = client.source(url)
            result = client.parseDOM(result, "body")[0]

            post = re.compile('movie_player_file *= *"(.+?)"').findall(result)[0]
            post = urllib.urlencode({'url': post})

            url = client.parseDOM(result, "script", ret="src", attrs = { "type": ".+?" })[0]
            url = client.source(url)
            url = url.replace('\n','')
            url = re.compile('getServerHost.+?return\s+"(.+?)"').findall(url)[0]

            headers = { 'Host': 'hdmoviezone.net',
            'Connection': 'keep-alive',
            'Accept': 'text/html, */*; q=0.01',
            'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
            'Origin': self.base_link }

            result = client.source(url, post=post, headers=headers)
            result = json.loads(result)
            result = result['content']

            links = [i['url'] for i in result]

            for url in links:
                try:
                    i = googleplus.tag(url)[0]
                    sources.append({'source': 'GVideo', 'quality': i['quality'], 'provider': 'Moviezone', 'url': i['url']})
                except:
                    pass

            return sources
        except:
            return sources
Esempio n. 41
0
    def get_movie(self, imdb, title, year):
        try:
            query = self.search_link % (urllib.quote_plus(title))

            result = client.source(query)

            title = cleantitle.movie(title)
            years = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1)
            ]

            result = client.parseDOM(result, "h3", attrs={"class": ".+?"})
            result = [(client.parseDOM(i, "a",
                                       ret="href"), client.parseDOM(i, "a"))
                      for i in result]
            result = [(i[0][0], i[1][-1]) for i in result
                      if len(i[0]) > 0 and len(i[1]) > 0]
            result = [
                i for i in result
                if any(x in i[0] for x in years) or any(x in i[1]
                                                        for x in years)
            ]
            result = [
                i[0] for i in result if title in cleantitle.movie(i[0])
                or title in cleantitle.movie(i[1])
            ][0]

            result = result.replace('/tag/', '/')
            result = cloudflare.source(result)

            r = client.parseDOM(result, "title")[0]

            t = re.sub('(\.|\_|\(|\[|\s)(\d{4}|3D)(\.|\_|\)|\]|\s)(.+)', '', r)
            if not title == cleantitle.movie(t): raise Exception()

            y = re.compile('[\.|\_|\(|\[|\s](\d{4})[\.|\_|\)|\]|\s]').findall(
                r)[0]
            if not any(x == y for x in years): raise Exception()

            result = client.parseDOM(result,
                                     "link",
                                     ret="href",
                                     attrs={"rel": "canonical"})[0]

            try:
                url = re.compile('//.+?(/.+)').findall(result)[0]
            except:
                url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Esempio n. 42
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)
            result = result.decode('iso-8859-1').encode('utf-8')
            links = client.parseDOM(result, "div", attrs={"class": "lang"})[0]
            links = client.parseDOM(links,
                                    "div",
                                    attrs={"class": "movie_link.+?"})

            fmt = [client.parseDOM(i, "h4")[0] for i in links]
            fmt = [re.findall('\w+', i.lower()) for i in fmt]
            fmt = sum(fmt, [])

            if any(x in ['dvdscr', 'r5', 'r6'] for x in fmt): quality = 'SCR'
            elif any(x in [
                    'camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts',
                    'cam', 'ts'
            ] for x in fmt):
                quality = 'CAM'
            else:
                quality = 'SD'

            for i in links:
                try:
                    host = client.parseDOM(i, "a", attrs={"target": ".+?"})[0]
                    host = host.split('.', 1)[0]
                    host = host.strip().lower()
                    if not host in hostDict: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    url = client.parseDOM(i, "a", ret="href")[0]
                    url = client.replaceHTMLCodes(url)
                    url = urlparse.urljoin(self.base_link, url)
                    url = url.encode('utf-8')

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'provider': 'Vidics',
                        'url': url
                    })
                except:
                    pass

            return sources
        except:
            return sources
Esempio n. 43
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)
            u = client.parseDOM(result, "meta", ret="content", attrs = { "property": "og:url" })[0]
            links = re.compile('<a href="([?]link_id=.+?)".+?>(.+?)</a>').findall(result)
            links = [u + i[0]  for i in links if 'server' in i[1].lower()]

            for u in links[:3]:
                try:
                    result = client.source(u)

                    url = client.parseDOM(result, "source", ret="src", attrs = { "type": "video/.+?" })
                    if len(url) > 0:
                        i = googleplus.tag(url[0])[0]
                        sources.append({'source': 'GVideo', 'quality': i['quality'], 'provider': 'Clickplay', 'url': i['url']})

                    url = re.compile('proxy[.]link=clickplay[*](.+?)"').findall(result)[-1]

                    key = base64.b64decode('bW5pcUpUcUJVOFozS1FVZWpTb00=')
                    decrypter = pyaes.Decrypter(pyaes.AESModeOfOperationECB(key + (24 - len(key)) * '\0'))
                    url = url.decode('hex')
                    url = decrypter.feed(url) + decrypter.feed()

                    if 'google' in url: source = 'GVideo'
                    elif 'vk.com' in url: source = 'VK'
                    else: raise Exception()

                    url = resolvers.request(url)
                    for i in url: sources.append({'source': source, 'quality': i['quality'], 'provider': 'Clickplay', 'url': i['url']})
                except:
                    pass

            return sources
        except:
            return sources
Esempio n. 44
0
    def get_episode(self, url, imdb, tvdb, title, date, season, episode):
        try:
            show, year = re.compile('(.+?) [(](\d{4})[)]$').findall(url)[0]

            query = self.search_link % (urllib.quote_plus(show))
            query = urlparse.urljoin(self.base_link, query)

            result = client.source(query)
            result = client.parseDOM(result, "div", attrs = { "id": "post-.+?" })

            show = cleantitle.tv(show)
            season = '%01d' % int(season)
            episode = '%01d' % int(episode)
            years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)]

            result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a", ret="title")[0], client.parseDOM(i, "div", attrs = { "class": "status status-year" })) for i in result]
            result = [x for y,x in enumerate(result) if x not in result[:y]]
            result = [(i[0], i[1], i[2][0]) for i in result if len(i[2]) > 0]
            result = [(i[0], re.compile('(.+?) Season (\d*)$').findall(i[1]), i[2]) for i in result]
            result = [(i[0], i[1][0][0], i[1][0][1], i[2]) for i in result if len(i[1]) > 0]
            result = [i for i in result if show == cleantitle.tv(i[1])]
            result = [i for i in result if season == i[2]]
            result = [(i[0], i[1], str(int(i[3]) - int(i[2]) + 1)) for i in result]
            result = [i[0] for i in result if any(x in i[2] for x in years)][0]

            url = urlparse.urljoin(self.base_link, result)

            result = client.source(url)
            result = client.parseDOM(result, "div", attrs = { "id": "episode_show" })[0]
            result = re.compile('(<a.+?</a>)').findall(result)
            result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a")[0]) for i in result]
            result = [i[0] for i in result if episode == i[1]][0]

            try: url = re.compile('//.+?(/.+)').findall(result)[0]
            except: url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Esempio n. 45
0
    def get_show(self, imdb, tvdb, show, show_alt, year):

        # import logging
        # LOG_FILENAME = '/home/keeganmccallum/test.log'
        # logging.basicConfig(filename=LOG_FILENAME,level=logging.DEBUG)

        # logging.debug(','.join([imdb, tvdb, show, show_alt, year]))

        try:
            result = client.source(self.base_link)
            if not "'index show'" in result:
                cookie = client.source(self.sign_link,
                                       post=self.key_link,
                                       output='cookie')
                result = client.source(self.base_link, cookie=cookie)

            result = client.parseDOM(result,
                                     "div",
                                     attrs={"class": "index show"})
            result = [(client.parseDOM(i, "a", attrs={"class": "name"})[0],
                       client.parseDOM(i, "span", attrs={"class": "value"})[0],
                       client.parseDOM(i, "a", ret="href")[0]) for i in result]

            shows = [cleantitle.tv(show), cleantitle.tv(show_alt)]
            years = [str(year), str(int(year) + 1), str(int(year) - 1)]
            result = [i for i in result if any(x in i[1] for x in years)]
            result = [
                i[2] for i in result
                if any(x == cleantitle.tv(i[0]) for x in shows)
            ][0]

            try:
                url = re.compile('//.+?(/.+)').findall(result)[0]
            except:
                url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Esempio n. 46
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)
            result = client.parseDOM(result, "div", attrs={"class":
                                                           "links"})[0]
            result = client.parseDOM(result, "tr")
            result = [(client.parseDOM(i, "td", attrs={"class":
                                                       "quality_td"})[0],
                       client.parseDOM(i, "a", ret="href")[-1])
                      for i in result]

            ts_quality = ['CAM', 'TS']
            links = [
                i for i in result if not any(x in i[0] for x in ts_quality)
            ]
            if len(links) == 0: links = result

            for i in links:
                try:
                    url = i[1]
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = re.sub(
                        '.+?/exit/\d*-|[.].+?[.]html|http://(|www[.])|/.+|[.].+$',
                        '', i[1])
                    host = host.strip().lower()
                    if not host in hostDict: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    if any(x in i[0] for x in ts_quality): quality = 'CAM'
                    else: quality = 'SD'

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'provider': 'Moviestorm',
                        'url': url
                    })
                except:
                    pass

            return sources
        except:
            return sources
Esempio n. 47
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            t = url.split('v=', 1)[-1].rsplit('&', 1)[0] 
            url = self.video_link % t

            result = ''
            links = [self.link_1, self.link_2, self.link_3]
            for base_link in links:
                result = client.source(urlparse.urljoin(base_link, url), headers=self.headers)
                if 'ripdiv' in str(result): break

            result = result.decode('iso-8859-1').encode('utf-8')
            sec = re.compile('lastChild[.]value="(.+?)"').findall(result)[0]
            links = client.parseDOM(result, "div", attrs = { "class": "ripdiv" })

            hd = [i for i in links if '>HD 720p<' in i]
            sd = [i for i in links if '>DVDRip / Standard Def<' in i]
            if len(sd) == 0: sd = [i for i in links if '>DVD Screener<' in i]
            if len(sd) == 0: sd = [i for i in links if '>R5/R6 DVDRip<' in i]

            if len(hd) > 0: hd = hd[0].split('<p>')
            if len(sd) > 0: sd = sd[0].split('<p>')
            links = [(i, 'HD') for i in hd] + [(i, 'SD') for i in sd]

            for i in links:
                try:
                    quality = i[1]

                    host = client.parseDOM(i[0], "a")[-1]
                    host = re.sub('\s|<.+?>|</.+?>|.+?#\d*:', '', host)
                    host = host.strip().lower()
                    if quality == 'HD' and not host in hosthdDict: raise Exception()
                    if quality == 'SD' and not host in hostDict: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    url = client.parseDOM(i[0], "a", ret="onclick")[-1]
                    url = re.compile('[(](.+?)[)]').findall(url)[0]
                    url = 'id=%s&t=%s&sec=%s&s=%s&m=%s&cap=&iqs=&url=' % (url, t, sec, random.randrange(5, 50), random.randrange(100, 300) * -1)
                    url = url.encode('utf-8')

                    sources.append({'source': host, 'quality': quality, 'provider': 'Icefilms', 'url': url})
                except:
                    pass

            return sources
        except:
            return sources
Esempio n. 48
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            content = re.compile('/\d{4}/\d{2}/').findall(url)

            if len(content) > 0: url = urlparse.urljoin(self.tvbase_link, url)
            else: url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)
            links = client.parseDOM(result,
                                    "td",
                                    attrs={"class": "even tdhost"})
            links += client.parseDOM(result,
                                     "td",
                                     attrs={"class": "odd tdhost"})

            q = re.compile('<label>Quality</label>(.+?)<').findall(result)
            if len(q) > 0: q = q[0]
            else: q = ''

            if q.endswith(('CAM', 'TS')): quality = 'CAM'
            else: quality = 'SD'

            for i in links:
                try:
                    host = client.parseDOM(i, "a")[0]
                    host = host.split('<', 1)[0]
                    host = host.rsplit('.', 1)[0].split('.', 1)[-1]
                    host = host.strip().lower()
                    if not host in hostDict: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    url = client.parseDOM(i, "a", ret="href")[0]
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'provider': 'WSO',
                        'url': url
                    })
                except:
                    pass

            return sources
        except:
            return sources
Esempio n. 49
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            content = re.compile('(.+?)\sS\d*E\d*$').findall(url)

            if len(content) == 0:
                query = urlparse.urljoin(self.base_link, self.source_link % url)

                result = client.source(query)
                result = json.loads(result)
                result = result['films'][0]['film_link']
            else:
                url, ep = re.compile('(.+?)\s(S\d*E\d*)$').findall(url)[0]
                query = urlparse.urljoin(self.base_link, self.source_link % url)

                result = client.source(query)
                result = json.loads(result)
                result = result['films']
                result = [i['film_link'] for i in result if ep in i['film_name'].encode('utf-8').upper()][0]

            result = re.compile('(.+?)#(\d*)#').findall(result)

            try:
                url = [i[0] for i in result if str(i[1]) == '1080'][0]
                sources.append({'source': 'GVideo', 'quality': '1080p', 'provider': 'GVcenter', 'url': url})
            except:
                pass
            try:
                url = [i[0] for i in result if str(i[1]) == '720'][0]
                sources.append({'source': 'GVideo', 'quality': 'HD', 'provider': 'GVcenter', 'url': url})
            except:
                pass

            return sources
        except:
            return sources
Esempio n. 50
0
    def get_movie(self, imdb, title, year):
        try:
            search = "http://www.omdbapi.com/?i=tt%s" % imdb
            search = client.source(search)
            search = json.loads(search)
            country = [i.strip() for i in search["Country"].split(",")]
            if not "India" in country:
                return

            languages = ["hindi", "tamil", "telugu", "malayalam"]
            language = [i.strip().lower() for i in search["Language"].split(",")]
            language = [i for i in language if any(x == i for x in languages)][0]

            query = self.search_link % (urllib.quote_plus(title), language)
            query = urlparse.urljoin(self.base_link, query)

            result = client.source(query)
            result = client.parseDOM(result, "div", attrs={"class": "search-category"})
            result = [i for i in result if "Movies" in client.parseDOM(i, "p")[0]][0]
            result = client.parseDOM(result, "li")

            title = cleantitle.movie(title)
            years = ["(%s)" % str(year), "(%s)" % str(int(year) + 1), "(%s)" % str(int(year) - 1)]
            result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a")[0]) for i in result]
            r = [i for i in result if any(x in i[1] for x in years)]
            if not len(r) == 0:
                result = r
            result = [i[0] for i in result if title == cleantitle.movie(i[1])][0]

            try:
                url = re.compile("//.+?(/.+)").findall(result)[0]
            except:
                url = result
            url = url.replace("../", "/")
            url = client.replaceHTMLCodes(url)
            url = url.encode("utf-8")
            return url
        except:
            return
Esempio n. 51
0
    def get_episode(self, url, imdb, tvdb, title, date, season, episode):
        try:
            if url == None: return

            tvdb_link = self.tvdb_link % (self.tvdb_key, tvdb, int(season), int(episode))
            result = client.source(tvdb_link)

            num = client.parseDOM(result, "absolute_number")[0]
            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)
            result = result.decode('iso-8859-1').encode('utf-8')
            result = client.parseDOM(result, "tr", attrs = { "class": "" })
            result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "td", attrs = { "class": "epnum" })[0]) for i in result]
            result = [i[0] for i in result if num == i[1]][0]

            try: url = re.compile('//.+?(/.+)').findall(result)[0]
            except: url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Esempio n. 52
0
    def get_episode(self, url, imdb, tvdb, title, date, season, episode):
        try:
            if url == None: return

            season = '%01d' % int(season)
            episode = '%01d' % int(episode)

            query = '%s "Season %s" "Episode %s"' % (url, season, episode)
            query = urlparse.urljoin(
                self.base_link, self.tvsearch_link + urllib.quote_plus(query))

            result = cloudflare.source(query)
            if result == None:
                result = client.source(self.agent_link +
                                       urllib.quote_plus(query))

            result = result.replace('> ', '>').replace(' <', '<')

            r = client.parseDOM(result,
                                "li",
                                attrs={"class": "first element.+?"})
            r += client.parseDOM(result, "li", attrs={"class": "element.+?"})

            show = cleantitle.tv(url)
            result = [
                (client.parseDOM(i, "a", ret="href"),
                 re.compile('>(.+?): Season (\d*), Episode (\d*)<').findall(i))
                for i in r
            ]
            result = [(i[0][0], i[1][-1][0].split('>')[-1], i[1][-1][1],
                       i[1][-1][2]) for i in result
                      if len(i[0]) > 0 and len(i[1]) > 0]
            result = [
                i for i in result if season == '%01d' %
                int(i[2]) and episode == '%01d' % int(i[3])
            ]
            result = [i[0] for i in result if show == cleantitle.tv(i[1])][0]

            url = client.replaceHTMLCodes(result)
            try:
                url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
            except:
                pass
            url = urlparse.urlparse(url).path
            url = url.encode('utf-8')
            return url
        except:
            return
Esempio n. 53
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)
            result = client.source(url)

            fmt = re.compile(
                '<strong>Quality</strong>.+?<strong>(.+?)</strong>').findall(
                    result)
            if len(fmt) > 0:
                fmt = (' '.join((fmt[0].decode("utf-8").lower().strip()
                                 ).split())).split(' ')

            if any(x in ['dvdscr', 'r5', 'r6'] for x in fmt): quality = 'SCR'
            elif any(x in [
                    'camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts',
                    'cam', 'ts'
            ] for x in fmt):
                quality = 'CAM'
            else:
                quality = 'HD'

            result = client.parseDOM(result,
                                     "div",
                                     attrs={"class": "video-embed"})[0]
            url = re.compile('hashkey=(.+?)[\'|\"]').findall(result)
            url += re.compile('[?]ref=(.+?)[\'|\"]').findall(result)
            url = self.videomega_link % url[0]

            url = videomega.resolve(url)
            if url == None: raise Exception()

            sources.append({
                'source': 'Videomega',
                'quality': quality,
                'provider': 'Onlinemovies',
                'url': url
            })

            return sources
        except:
            return sources
Esempio n. 54
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = cloudflare.source(url)
            if result == None:
                result = client.source(self.agent_link +
                                       urllib.quote_plus(url))

            result = client.parseDOM(result, "a", ret="href")

            u = [
                i for i in result
                if '.php' in i and 'i=' in urllib.unquote_plus(i)
            ][0]
            u = client.replaceHTMLCodes(u)
            try:
                u = urlparse.parse_qs(urlparse.urlparse(u).query)['u'][0]
            except:
                pass
            u = urlparse.parse_qs(urlparse.urlparse(u).query)['i'][0]

            url = gkplugins.decrypter(198, 128).decrypt(
                u, base64.urlsafe_b64decode('b3F5czkzZEswc2FEY3pRNW9NSTE='),
                'ECB').split('\0')[0]
            url = resolvers.request(url)

            if not type(url) == list: raise Exception()

            for i in url:
                sources.append({
                    'source': 'GVideo',
                    'quality': i['quality'],
                    'provider': 'YIFYstream',
                    'url': i['url']
                })

            return sources
        except:
            return sources
Esempio n. 55
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            result = client.source(urlparse.urljoin(self.base_link, url))

            quality = client.parseDOM(result, "title")[0]
            if '[CAM]' in quality or '[TS]' in quality: quality = 'CAM'
            elif '[SCREENER]' in quality: quality = 'SCR'
            else: quality = 'HD'

            result = client.parseDOM(result,
                                     "div",
                                     attrs={"class": "video-embed"})[0]

            url = None

            enigma = client.parseDOM(result, "span", ret="data-enigmav")
            if len(enigma) > 0:
                url = enigma[0].decode("unicode-escape")
                url = re.compile('file *: *"(.+?)"').findall(url)[-1]
                url += '|Referer=%s' % urllib.quote_plus(self.base_link)

            mega = re.compile('hashkey=([\w]+)').findall(result)
            mega += re.compile('ref=[\'|\"](.+?)[\'|\"]').findall(result)
            if len(mega) > 0:
                url = self.videomega_link % mega[0]
                url = videomega.resolve(url)

            if url == None: raise Exception()

            sources.append({
                'source': 'Videomega',
                'quality': quality,
                'provider': 'MoviesHD',
                'url': url
            })

            return sources
        except:
            return sources
Esempio n. 56
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)
            result = result.decode('iso-8859-1').encode('utf-8')

            links = client.parseDOM(result, "table", attrs = { "class": "link_ite.+?" })

            for i in links:
                try:
                    url = client.parseDOM(i, "a", ret="href")
                    if len(url) > 1: raise Exception()
                    url = url[0].split('gtfo=', 1)[-1].split('&', 1)[0]
                    url = base64.urlsafe_b64decode(url.encode('utf-8'))
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = urlparse.urlparse(url).netloc
                    host = host.replace('www.', '').replace('embed.', '')
                    host = host.rsplit('.', 1)[0]
                    host = host.lower()
                    if not host in hostDict: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    quality = client.parseDOM(i, "div", attrs = { "class": "quality" })
                    if any(x in ['[CAM]', '[TS]'] for x in quality): quality = 'CAM'
                    else:  quality = 'SD'
                    quality = quality.encode('utf-8')

                    sources.append({'source': host, 'quality': quality, 'provider': 'Watchfree', 'url': url})
                except:
                    pass

            return sources
        except:
            return sources
Esempio n. 57
0
    def get_show(self, imdb, tvdb, show, show_alt, year):
        try:
            query = self.search_link
            post = urllib.urlencode({'searchquery': show, 'searchin': '2'})

            result = ''
            links = [self.link_1, self.link_3]
            for base_link in links:
                result = client.source(urlparse.urljoin(base_link, query),
                                       post=post,
                                       headers=self.headers)
                if 'widget search-page' in str(result): break

            result = client.parseDOM(result,
                                     "div",
                                     attrs={"class": "widget search-page"})[0]
            result = client.parseDOM(result, "td")

            shows = [cleantitle.tv(show), cleantitle.tv(show_alt)]
            years = [
                '(%s)' % str(year),
                '(%s)' % str(int(year) + 1),
                '(%s)' % str(int(year) - 1)
            ]
            result = [(client.parseDOM(i, "a", ret="href")[-1],
                       client.parseDOM(i, "a")[-1]) for i in result]
            result = [
                i for i in result
                if any(x == cleantitle.tv(i[1]) for x in shows)
            ]
            result = [i[0] for i in result if any(x in i[1] for x in years)][0]

            url = client.replaceHTMLCodes(result)
            try:
                url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
            except:
                pass
            url = urlparse.urlparse(url).path
            url = url.encode('utf-8')
            return url
        except:
            return