Esempio n. 1
0
def resolve(url):
    try:
        result = client.request(url, mobile=True, close=False)

        try:
            post = {}
            f = client.parseDOM(result, 'Form', attrs = {'method': 'POST'})[0]
            f = f.replace('"submit"', '"hidden"')
            k = client.parseDOM(f, 'input', ret='name', attrs = {'type': 'hidden'})
            for i in k: post.update({i: client.parseDOM(f, 'input', ret='value', attrs = {'name': i})[0]})

        except:
            post=None

        for i in range(0, 10):
            try:
                result = client.request(url, post=post, mobile=True, close=False)
                result = result.replace('\n','')

                result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
                result = jsunpack.unpack(result)

                result = re.compile('sources *: *\[.+?\]').findall(result)[-1]
                result = re.compile('file *: *"(http.+?)"').findall(result)

                url = [i for i in result if not '.m3u8' in i]
                if len(url) > 0: return '%s|Referer=%s' % (url[0], urllib.quote_plus('http://vidzi.tv/nplayer/jwplayer.flash.swf'))
                url = [i for i in result if '.m3u8' in i]
                if len(url) > 0: return url[0]
            except:
                time.sleep(1)
    except:
        return
def resolve(url):
    try:
        try: quality = urlparse.parse_qs(urlparse.urlparse(url).query)['quality'][0]
        except: quality = '1080P'

        url = url.rsplit('?', 1)[0]

        result = client.request(url, close=False)

        url = client.parseDOM(result, 'div', attrs = {'class': 'player'})[0]
        url = client.parseDOM(url, 'iframe', ret='src')[0]

        result = client.request(url)

        url = client.parseDOM(result, 'iframe', ret='src')
        if len(url) > 0: return resolvers.request(url[0], debrid)

        count = len(re.findall('window\.atob', result))
        result = re.compile("window\.atob\('([^']+)").findall(result)[0]

        for i in xrange(count):
            result = base64.decodestring(result)

        result = re.compile('(\d*p)="([^"]+)"').findall(result)

        url = [i for i in result if i[0].upper() == quality]
        if len(url) > 0: url = url[0][1]
        else: url = result[0][1]

        return url
    except:
        return
Esempio n. 3
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)

            quality = client.parseDOM(result, 'li')
            quality = [i for i in quality if '>Quality :<' in i][0]
            quality = client.parseDOM(quality, 'p')[0]

            if 'CAM' in quality or 'TS' in quality: quality = 'CAM'
            elif 'SCREENER' in quality: quality = 'SCR'
            else: quality = 'HD'

            url = client.parseDOM(result, 'iframe', ret='src')
            url = [i for i in url if 'videomega' in i.lower()][0]
            url = re.compile('[ref|hashkey]=([\w]+)').findall(url)
            url = 'http://videomega.tv/cdn.php?ref=%s' % url[0]

            url = resolvers.request(url)

            if url == None: raise Exception()

            sources.append({'source': 'Videomega', 'quality': quality, 'provider': 'Movietubev2', 'url': url})

            return sources
        except:
            return sources
Esempio n. 4
0
    def get_movie(self, imdb, title, year):
        try:
            self.base_link = self.base_link
            query = '%s %s' % (title, year)
            query = self.search_link % (urllib.quote_plus(query))
            query = urlparse.urljoin(self.base_link, query)

            result = client.source(query)

            result = result.decode('iso-8859-1').encode('utf-8')

            result = client.parseDOM(result, "div", attrs={"class":"boxinfo"})
            title = cleantitle.movie(title)

            for item in result:
                searchTitle = client.parseDOM(item, "span", attrs={"class":"tt"})[0]
                try : searchTitle = re.compile('(.+?) \d{4} ').findall(searchTitle)[0]
                except: pass
                searchTitle = cleantitle.movie(searchTitle)
                if title in searchTitle:
                    url = client.parseDOM(item, "a", ret="href")[0]
                    url = re.compile(".+/(.+?)/").findall(url)[0]
                    break
            if url == None or url == '':
                raise Exception()
            return url
        except:
            return
Esempio n. 5
0
    def get_movie(self, imdb, title, year):
        try:
            self.base_link = random.choice([self.base_link_1, self.base_link_2])

            query = self.search_link % (urllib.quote_plus(title))
            query = urlparse.urljoin(self.base_link, query)

            result = client.source(query)

            result = client.parseDOM(result, 'div', attrs = {'class': 'cell_container'})

            title = cleantitle.movie(title)
            years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)]

            result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in result]
            result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
            result = [(i[0], re.compile('(.+?) [(](\d{4})[)]').findall(i[1])) for i in result]
            result = [(i[0], i[1][0][0], i[1][0][1]) for i in result if len(i[1]) > 0]
            result = [i for i in result if title == cleantitle.movie(i[1])]
            result = [i[0] for i in result if any(x in i[2] for x in years)][0]

            try: url = re.compile('//.+?(/.+)').findall(result)[0]
            except: url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Esempio n. 6
0
    def get_movie(self, imdb, title, year):
        try:
            query = self.search_link % urllib.quote_plus(title)
            query = urlparse.urljoin(self.base_link, query)

            result = client.source(query)

            result = result.decode("iso-8859-1").encode("utf-8")
            result = client.parseDOM(result, "div", attrs={"class": "movie_table"})

            title = cleantitle.movie(title)
            years = ["(%s)" % str(year), "(%s)" % str(int(year) + 1), "(%s)" % str(int(year) - 1)]
            result = [(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "a", ret="title")[1]) for i in result]
            result = [i for i in result if title == cleantitle.movie(i[1])]
            result = [i[0] for i in result if any(x in i[1] for x in years)][0]

            url = client.replaceHTMLCodes(result)
            try:
                url = urlparse.parse_qs(urlparse.urlparse(url).query)["u"][0]
            except:
                pass
            url = urlparse.urlparse(url).path
            url = url.encode("utf-8")
            return url
        except:
            return
Esempio n. 7
0
    def get_movie(self, imdb, title, year):
        try:
            post = '%s %s' % (title, year)
            post = self.search_post % (urllib.quote_plus(post))

            query = urlparse.urljoin(self.base_link, self.search_link)

            result = client.source(query, post=post)
            result = client.parseDOM(result, 'div', attrs = {'id': 'dle-content'})[0]

            title = cleantitle.movie(title)

            result = client.parseDOM(result, 'div', attrs = {'class': 'short-film'})
            result = client.parseDOM(result, 'h5')
            result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in result]
            result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
            result = [i for i in result if title == cleantitle.movie(i[1])]
            if not len(result) == 1: raise Exception()
            result = result[0][0]

            try: url = re.compile('//.+?(/.+)').findall(result)[0]
            except: url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Esempio n. 8
0
    def check(self, i):
        try:
            result = client.request(i['url'])
            result = client.parseDOM(result, 'td', attrs = {'class': 'td_cols'})[0]
            result = result.split('"td_heads"')
            result = client.parseDOM(result, 'a', ret='href')

            for url in result:
                try:
                    if 'go4up.com' in url:
                        url = re.compile('//.+?/.+?/([\w]+)').findall(url)[0]
                        url = client.request(self.go4up_link_2 % url)
                        url = client.parseDOM(url, 'div', attrs = {'id': 'linklist'})[0]
                        url = client.parseDOM(url, 'a', ret='href')[0]

                    host = urlparse.urlparse(url).netloc
                    host = host.rsplit('.', 1)[0].split('.', 1)[-1]
                    host = host.strip().lower()

                    if not host in ['uptobox', 'hugefiles', 'uploadrocket']: raise Exception()

                    if host == 'hugefiles': check = hugefiles.check(url)
                    elif host == 'uploadrocket': check = uploadrocket.check(url)
                    elif host == 'uptobox': check = uptobox.check(url)

                    if check == False: raise Exception()

                    self.sources.append({'source': host, 'quality': 'HD', 'provider': 'TVrelease', 'url': url, 'info': i['info']})
                except:
                    pass
        except:
            pass
Esempio n. 9
0
    def get_show(self, imdb, tvdb, tvshowtitle, year):
        try:
            r = 'search/tvdb/%s?type=show&extended=full' % tvdb
            r = json.loads(trakt.getTrakt(r))
            if not r: return '0'

            d = r[0]['show']['genres']
            if not ('anime' in d or 'animation' in d): return '0'

            tv_maze = tvmaze.tvMaze()
            tvshowtitle = tv_maze.showLookup('thetvdb', tvdb)
            tvshowtitle = tvshowtitle['name']

            t = cleantitle.get(tvshowtitle)

            q = self.search_link % (urllib.quote_plus(tvshowtitle))
            q = urlparse.urljoin(self.base_link, q)

            r = client.request(q)

            r = client.parseDOM(r, 'ol', attrs={'id': 'searchresult'})[0]
            r = client.parseDOM(r, 'h2')
            r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in r]
            r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
            r = [(i[0], re.sub('<.+?>|</.+?>', '', i[1])) for i in r]
            r = [i for i in r if t == cleantitle.get(i[1])]
            r = r[-1][0]

            url = re.findall('(?://.+?|)(/.+)', r)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Esempio n. 10
0
    def get_movie(self, imdb, title, year):
        try:
            query = self.search_link % urllib.quote(title)
            query = urlparse.urljoin(self.base_link, query)
            #control.log("rainierland-0 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % query)
            result = client2.http_get(query)
            title = cleantitle.movie(title)
            years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)]
            r = client.parseDOM(result, 'div', attrs = {'class': 'thumb'})
            #control.log("rainierland-1 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % r)

            r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
            #control.log("rainierland-2 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % r)

            r = [(i[0][0], i[1][-1]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
            #control.log("rainierland-3 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % r)

            r = [(re.sub('http.+?//.+?/','', i[0]), i[1]) for i in r]
            #control.log("rainierland-4 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % r)

            r = [('/'.join(i[0].split('/')[:2]), i[1]) for i in r]
            r = [x for y,x in enumerate(r) if x not in r[:y]]
            r = [i for i in r if title == cleantitle.movie(i[1])]
            u = [i[0] for i in r][0]

            url = urlparse.urljoin(self.base_link, u)
            url = urlparse.urlparse(url).path
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            #control.log("rainierland url @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % url)

            return url
        except:
            return
Esempio n. 11
0
    def get_episode(self, url, imdb, tvdb, title, date, season, episode):
        try:
            tvshowtitle, year = re.compile('(.+?) [(](\d{4})[)]$').findall(url)[0]

            query = self.search_link % urllib.quote(tvshowtitle)
            query = urlparse.urljoin(self.base_link, query)

            result = client.source(query)

            tvshowtitle = cleantitle.tv(tvshowtitle)
            season = '%01d' % int(season)
            episode = '%01d' % int(episode)
            years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)]

            result = client.parseDOM(result, 'div', attrs = {'class': 'ml-item'})
            result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'h2'), re.compile('class *= *[\'|\"]jt-info[\'|\"]>(\d{4})<').findall(i)) for i in result]
            result = [(i[0][0], i[1][0], i[2][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0]
            result = [(i[0], re.compile('(.+?) - Season (\d*)$').findall(i[1]), i[2]) for i in result]
            result = [(i[0], i[1][0][0], i[1][0][1], i[2]) for i in result if len(i[1]) > 0]
            result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])]
            result = [i for i in result if season == i[2]]
            result = [(i[0], i[1], str(int(i[3]) - int(i[2]) + 1)) for i in result]
            result = [i[0] for i in result if any(x in i[2] for x in years)][0]

            result += '?S%02dE%02d' % (int(season), int(episode))

            try: url = re.compile('//.+?(/.+)').findall(result)[0]
            except: url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Esempio n. 12
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = client.request(url)
            vtype = re.findall('<div class="col-lg-9 col-md-9 col-sm-9">\s.*<b>Język</b>:(.*?)\.*</div>',result)[0].strip()
            q = re.findall('<div class="col-lg-9 col-md-9 col-sm-9">\s.*<b>Jakość</b>:(.*?)\.*</div>', result)[0].strip()
            quality = 'SD'
            if '720' in q: quality = 'HD'
            if '1080' in q: quality = '1080p'

            links = client.parseDOM(result, 'div', attrs={'id':'Film'})
            links = [client.parseDOM(i, 'a', ret='href', attrs={'target':'_blank'})[0] for i in links]
            for i in links:
                try:
                    host = urlparse.urlparse(i).netloc
                    host = host.split('.')
                    host = host[-2]+"."+host[-1]
                    host = host.lower()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')
                    sources.append({'source': host, 'quality': quality, 'provider': 'SEGOS', 'url': i, 'vtype':vtype})
                except:
                    pass

            return sources
        except:
            return sources
Esempio n. 13
0
def resolve(url):
    try:
        url = re.compile('//.+?/([\w]+)').findall(url)[0]
        url = 'http://www.movdivx.com/%s' % url
 
        result = client.request(url)

        post = {}
        f = client.parseDOM(result, 'Form', attrs = {'action': '' })[0]
        k = client.parseDOM(f, 'input', ret='name', attrs = {'type': 'hidden'})
        for i in k: post.update({i: client.parseDOM(f, 'input', ret='value', attrs = {'name': i})[0]})
        post.update({'method_free': 'Free Download'})


        result = client.request(url, post=post)

        result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
        result = jsunpack.unpack(result)

        url = client.parseDOM(result, 'embed', ret='src')
        url += re.compile("file *: *[\'|\"](.+?)[\'|\"]").findall(result)
        url = [i for i in url if not i.endswith('.srt')]
        url = 'http://' + url[0].split('://', 1)[-1]

        return url
    except:
        return
Esempio n. 14
0
    def get_episode(self, url, imdb, tvdb, title, date, season, episode):
        try:
            if url == None: return

            season = '%01d' % int(season)
            episode = '%01d' % int(episode)

            query = '%s "Season %s" "Episode %s"' % (url, season, episode)
            query = urlparse.urljoin(self.base_link, self.tvsearch_link + urllib.quote_plus(query))

            result = cloudflare.source(query)
            if result == None: result = client.source(self.__proxy() + urllib.quote_plus(query))

            r = client.parseDOM(result, 'li', attrs = {'class': 'first element.+?'})
            r += client.parseDOM(result, 'li', attrs = {'class': 'element.+?'})
            r += client.parseDOM(result, 'header', attrs = {'class': 'entry-header'})

            tvshowtitle = cleantitle.tv(url)

            result = [(client.parseDOM(i, 'a', ret='href'), re.compile('(.+?): Season (\d*).+?Episode (\d*)').findall(i)) for i in r]
            result = [(i[0][0], i[1][-1]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
            result = [(i[0], i[1][0].split('>')[-1], i[1][1], i[1][2]) for i in result]
            result = [i for i in result if season == '%01d' % int(i[2]) and episode == '%01d' % int(i[3])]
            result = [i[0] for i in result if tvshowtitle == cleantitle.tv(i[1])][0]

            url = client.replaceHTMLCodes(result)
            try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
            except: pass
            try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0]
            except: pass
            url = urlparse.urlparse(url).path
            url = url.encode('utf-8')
            return url
        except:
            return
Esempio n. 15
0
    def get_movie(self, imdb, title, year):
        try:
            query = urlparse.urljoin(self.base_link, self.moviesearch_link + urllib.quote_plus(title))

            result = cloudflare.source(query)
            if result == None: result = client.source(self.__proxy() + urllib.quote_plus(query))

            r = client.parseDOM(result, 'li', attrs = {'class': 'first element.+?'})
            r += client.parseDOM(result, 'li', attrs = {'class': 'element.+?'})
            r += client.parseDOM(result, 'header', attrs = {'class': 'entry-header'})

            title = cleantitle.movie(title)
            years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]

            result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in r]
            result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
            result = [(i[0], re.compile('(.+? [(]\d{4}[)])').findall(i[1])) for i in result]
            result = [(i[0], i[1][0]) for i in result if len(i[1]) > 0]
            result = [i for i in result if title == cleantitle.movie(i[1])]
            result = [i[0] for i in result if any(x in i[1] for x in years)][0]

            url = client.replaceHTMLCodes(result)
            try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
            except: pass
            try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0]
            except: pass
            url = urlparse.urlparse(url).path
            url = url.encode('utf-8')
            return url
        except:
            return
Esempio n. 16
0
def resolve(url):
    try:
        url = url.replace('/embed-', '/')
        url = re.compile('//.+?/([\w]+)').findall(url)[0]
        url = 'http://grifthost.com/embed-%s.html' % url

        result = client.request(url)

        try:
            post = {}
            f = client.parseDOM(result, 'Form', attrs = {'method': 'POST'})[0]
            f = f.replace('"submit"', '"hidden"')
            k = client.parseDOM(f, 'input', ret='name', attrs = {'type': 'hidden'})
            for i in k: post.update({i: client.parseDOM(f, 'input', ret='value', attrs = {'name': i})[0]})
            post = post
            result = client.request(url, post=post)
        except:
            pass

        result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
        result = jsunpack.unpack(result)

        url = client.parseDOM(result, 'embed', ret='src')
        url += re.compile("file *: *[\'|\"](.+?)[\'|\"]").findall(result)
        url = [i for i in url if not i.endswith('.srt')]
        url = 'http://' + url[0].split('://', 1)[-1]

        return url
    except:
        return
Esempio n. 17
0
    def get_movie(self, imdb, title, year):
        try:
            query = self.search_link2 % (urllib.quote_plus(title))
            query = urlparse.urljoin(self.base_link, query)

            for i in range(5):
                result = client.source(query, close=False)
                if not result == None: break

            result = client.parseDOM(result, 'section', attrs = {'id': 'contentrea'})[0]

            title = cleantitle.movie(title)
            years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)]

            result = zip(client.parseDOM(result, 'a', ret='href'), client.parseDOM(result, 'a'))
            result = [(i[0], re.compile('(^Watch Full "|^Watch |)(.+? \d{4})').findall(i[1])) for i in result]
            result = [(i[0], i[1][0][-1]) for i in result if len(i[1]) > 0]
            result = [(i[0], re.compile('(.+?) (\d{4})$').findall(i[1])) for i in result]
            result = [(i[0], i[1][0][0], i[1][0][1]) for i in result if len(i[1]) > 0]
            result = [i for i in result if any(x in i[2] for x in years)]
            result = [i[0] for i in result if title == cleantitle.movie(i[1])][0]

            try: url = re.compile('//.+?(/.+)').findall(result)[0]
            except: url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
def NBANFL_ARC(url, image, fanart):
    try:
        if "nbaarch" in url or "nflarch" in url:
            page = "1"
        else:
            url = url.split("/")
            page = url[1]
            url = url[0]
        if url == "nbaarch" or url == "nba":
            fanart = back["nba"]
            image = logos["nba"]
            url = "http://www.life2sport.com/category/basketbol/nba/page/" + str(page)
        elif url == "nflarch" or url == "nfl":
            fanart = back["nfl"]
            image = logos["nfl"]
            url = "http://www.life2sport.com/category/american-football/page/" + str(page)
        html = client.request(url)
        links = client.parseDOM(html, "a", attrs={"rel": "bookmark"}, ret="href")
        titles = client.parseDOM(html, "a", attrs={"rel": "bookmark"}, ret="title")
        del links[1::2]
        for i, el in enumerate(links):
            if "-nba-" in el or "-nfl-" in el:
                title = client.parseDOM(html, "a", attrs={"href": el}, ret="title")[0]
                title = title.split("/")[-1] + " - " + title.split("/")[len(title.split("/")) - 2]
                addDirectoryItem(title, "NBANFL_Stream", image, image, fanart, url=el)
        page = str(int(page) + 1)
        if re.search("-nba-", str(links)):
            addDirectoryItem("next page...", "NBANFL_ARC", image, image, fanart, url="nba" + "/" + page)
        if re.search("-nfl-", str(links)):
            addDirectoryItem("next page...", "NBANFL_ARC", image, image, fanart, url="nfl" + "/" + page)

        endDirectory()
    except:
        pass
Esempio n. 19
0
    def get_movie(self, imdb, title, year):
        try:
            query = self.search_link % (urllib.quote_plus(title))
            query = urlparse.urljoin(self.base_link, query)

            result = client.source(query)
            result = client.parseDOM(result, 'div', attrs = {'class': 'home_post_cont.+?'})

            title = cleantitle.movie(title)
            years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]

            result = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'img', ret='title')[0]) for i in result]
            result = [(i[0], client.replaceHTMLCodes(i[1])) for i in result]
            result = [(i[0], client.parseDOM(i[1], 'a')) for i in result]
            result = [(i[0], i[1][0]) for i in result if len(i[1]) > 0]
            result = [i for i in result if title == cleantitle.movie(i[1])]
            result = [i[0] for i in result if any(x in i[1] for x in years)][0]

            try: url = re.compile('//.+?(/.+)').findall(result)[0]
            except: url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Esempio n. 20
0
    def get_show(self, imdb, tvdb, tvshowtitle, year):

        try:
            query = self.search_link
            post = {'searchquery': tvshowtitle, 'searchin': '2'}

            result = ''
            links = [self.link_1, self.link_3]
            for base_link in links:
                result = client.source(urlparse.urljoin(base_link, query), post=post, headers=self.headers)
                if 'widget search-page' in str(result): break

            result = client.parseDOM(result, 'div', attrs = {'class': 'widget search-page'})[0]
            result = client.parseDOM(result, 'td')

            tvshowtitle = cleantitle.tv(tvshowtitle)
            years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]
            result = [(client.parseDOM(i, 'a', ret='href')[-1], client.parseDOM(i, 'a')[-1]) for i in result]
            result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])]
            result = [i[0] for i in result if any(x in i[1] for x in years)][0]

            url = client.replaceHTMLCodes(result)
            try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
            except: pass
            url = urlparse.urlparse(url).path
            url = url.encode('utf-8')
            return url
        except:
            return
def HHMoonfruit(url):
    try:
        cookieJar = cookielib.CookieJar()
        opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookieJar), urllib2.HTTPHandler())
        conn = urllib2.Request(url + "/htown3")
        connection = opener.open(conn)
        for cookie in cookieJar:
            token = cookie.value
        headers = {
            "User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3",
            "Content-Type": "application/x-www-form-urlencoded",
            "Cookie": "markc=" + token,
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
            "Accept-Language": "en-US,en;q=0.8,bg;q=0.6,it;q=0.4,ru;q=0.2,uk;q=0.2",
        }
        html = connection.read()
        link = client.parseDOM(html, "iframe", ret="src")
        link = url + link[-1]
        conn = urllib2.Request(link, headers=headers)
        connection = opener.open(conn)
        html = connection.read()
        link = client.parseDOM(html, "iframe", ret="src")[0]
        if "streamup.com" in link:
            channel = link.split("/")[4]
            HHGetStreamup(channel)
    except:
        return
Esempio n. 22
0
    def get_show(self, imdb, tvdb, tvshowtitle, year):
        try:
            query = urlparse.urljoin(self.base_link, self.ajax_link)
            post = self.search_link % (urllib.quote_plus(tvshowtitle))

            result = client.source(query, post=post, headers=self.headers)
            result = json.loads(result)

            tvshowtitle = cleantitle.tv(tvshowtitle)
            years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)]

            result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'h3'), re.compile('<h5>.+?(\d{4}).+?</h5>').findall(i)) for i in result]
            result = [(i[0][0], i[1][0], i[2][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0]
            result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])]
            result = [i[0] for i in result if any(x in i[2] for x in years)][0]

            url = urlparse.urljoin(self.base_link, result)

            result = client.source(url)

            url = client.parseDOM(result, 'div', ret='value', attrs = {'id': 'icerikid'})[0]
            url = url.encode('utf-8')
            return url
        except:
            return
Esempio n. 23
0
def resolve(url):
    try:
        id = re.compile('//.+?/.+?/([\w]+)').findall(url)
        id += re.compile('//.+?/.+?v=([\w]+)').findall(url)
        id = id[0]

        url = 'http://www.cloudtime.to/video/%s' % id

        result = client.request(url)
        post = {}
        f = client.parseDOM(result, 'form', attrs = {'action': ''})
        k = client.parseDOM(f, 'input', ret='name', attrs = {'type': 'hidden'})
        for i in k: post.update({i: client.parseDOM(f, 'input', ret='value', attrs = {'name': i})[0]})
        post = urllib.urlencode(post)

        result = client.request(url, post=post)

        key = re.compile('flashvars.filekey=(.+?);').findall(result)[-1]
        try: key = re.compile('\s+%s="(.+?)"' % key).findall(result)[-1]
        except: pass

        url = 'http://www.cloudtime.to/api/player.api.php?key=%s&file=%s' % (key, id)
        result = client.request(url)

        url = re.compile('url=(.+?)&').findall(result)[0]
        return url
    except:
        return
Esempio n. 24
0
def daytse_movies(dbname):
    con = lite.connect(dbname)
    cur = con.cursor()
    #cur.execute("DROP TABLE IF EXISTS movies")
    #cur.executemany("INSERT INTO Cars VALUES(?, ?, ?)", cars)
    #cur.execute("CREATE TABLE movies (title TEXT, link TEXT, quality TEXT, UNIQUE (link))")

    for j in range(1,2):
        print'------- %s ' % j
        src = 'http://dayt.se/movies/index.php?&page=%s' % j
        result = ''
        result = client2.http_get(src).decode('windows-1256').encode('utf8')
        result = client.parseDOM(result, 'table', attrs={'class': 'topic_table'})
        for i in result:
            print '-------------------------'
            #print i
            print client.parseDOM(i, 'img', attrs={'class': 'image'}, ret='alt')[0]
            print  client.parseDOM(i, 'a', attrs={'target': '_self'},  ret='href')[1]

        #result = [(client.parseDOM(i, 'img', attrs={'class': 'image'}, ret='alt')[0],
        #       client.parseDOM(i, 'a', attrs={'target': '_self'},  ret='href')[0],
        #       re.findall('Quality: (\d+).*</pre>', i)[0]) for i in result]
        #cur.executemany("INSERT INTO movies VALUES(?, ?, ?)", result)
    con.commit()
    con.close()
Esempio n. 25
0
def resolve(url):
    try:
        url = url.replace("/embed-", "/")
        url = re.compile("//.+?/([\w]+)").findall(url)[0]
        url = "http://grifthost.com/embed-%s.html" % url

        result = client.request(url)

        try:
            post = {}
            f = client.parseDOM(result, "Form", attrs={"method": "POST"})[0]
            f = f.replace('"submit"', '"hidden"')
            k = client.parseDOM(f, "input", ret="name", attrs={"type": "hidden"})
            for i in k:
                post.update({i: client.parseDOM(f, "input", ret="value", attrs={"name": i})[0]})
            post = post
            result = client.request(url, post=post)
        except:
            pass

        result = re.compile("(eval.*?\)\)\))").findall(result)[-1]
        result = jsunpack.unpack(result)

        url = client.parseDOM(result, "embed", ret="src")
        url += re.compile("file *: *['|\"](.+?)['|\"]").findall(result)
        url = [i for i in url if not i.endswith(".srt")]
        url = "http://" + url[0].split("://", 1)[-1]

        return url
    except:
        return
Esempio n. 26
0
    def resolve(self, url):
        control.log('RESSS %s' % url)
        try:
            if 'openload.co' in url:
                url = resolvers.request(url)
                return url
            if 'movieshd' in url:
                r = self.request(url)[0]
                r = re.findall("file: '([^']+)',label: '(\d+)", r)
                r1 = sorted(r, key=lambda k: k[1])
                r2  = client.replaceHTMLCodes(r1[-1][0])
                #r2 = client.googlepass(url)
                return r2
            if 'seriesonline' in url:
                r = self.request(url)[0]
                r = [client.parseDOM(r, 'source', ret='src'), client.parseDOM(r,'source', ret='label')]
                r = zip(r[0],r[1])
                r1 = sorted(r, key=lambda k: k[1])
                r2  = client.replaceHTMLCodes(r1[-2][0])
                r2 = client.googlepass(url)
                return r2


            return False

        except Exception as e:
            control.log('RESSS %S' % e)
            pass
Esempio n. 27
0
    def get_movie(self, imdb, title, year):
        try:
            t = cleantitle.get(title)

            q = self.search_link_2 % (urllib.quote_plus(cleantitle.query(title)))
            q = q.replace('+','-')
            q = urlparse.urljoin(self.base_link, q)


            r = self.request(q)[0]
            r = client.parseDOM(r, 'div', attrs = {'class': 'ml-item'})
            r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title'), client.parseDOM(i, 'a', ret='data-url')) for i in r]
            r = [(i[0][0], i[1][0], i[2][0]) for i in r if i[0] and i[1]]
            #else:
            #    r = zip(client.parseDOM(r, 'a', ret='href', attrs = {'class': 'ss-title'}), client.parseDOM(r, 'a', attrs = {'class': 'ss-title'}))

            r = [(i[0],i[2]) for i in r if cleantitle.get(t) == cleantitle.get(i[1])][:2]
            r = [(i[0], re.findall('(\d+)', i[1])[-1]) for i in r]

            for i in r:
                try:
                    y, q = cache.get(self.muchmovies_info, 9000, i[1])
                    if not y == year: raise Exception()
                    return urlparse.urlparse(i[0]).path
                except:
                    pass
        except:
            return
Esempio n. 28
0
def resolve(url):
    try:
        result = client.request(url, mobile=True, close=False)

        try:
            post = {}
            f = client.parseDOM(result, "Form", attrs={"method": "POST"})[0]
            f = f.replace('"submit"', '"hidden"')
            k = client.parseDOM(f, "input", ret="name", attrs={"type": "hidden"})
            for i in k:
                post.update({i: client.parseDOM(f, "input", ret="value", attrs={"name": i})[0]})
            post = urllib.urlencode(post)
        except:
            post = None

        for i in range(0, 10):
            try:
                result = client.request(url, post=post, mobile=True, close=False)
                result = result.replace("\n", "")

                result = re.compile("sources *: *\[.+?\]").findall(result)[-1]
                result = re.compile('file *: *"(http.+?)"').findall(result)

                url = [i for i in result if ".m3u8" in i]
                if len(url) > 0:
                    return url[0]
                url = [i for i in result if not ".m3u8" in i]
                if len(url) > 0:
                    return url[0]
            except:
                time.sleep(1)
    except:
        return
Esempio n. 29
0
def resolve(url):
    try:
        url = url.replace('/embed-', '/')
        url = re.compile('//.+?/([\w]+)').findall(url)[0]
        page = 'http://allmyvideos.net/%s' % url
        control.log('AAA Page %s' % page)

        result = client.request(page, close=False)

        post = {}
        f = client.parseDOM(result, 'form', attrs = {'action': ''})
        k = client.parseDOM(f, 'input', ret='name', attrs = {'type': 'hidden'})
        for i in k: post.update({i: client.parseDOM(f, 'input', ret='value', attrs = {'name': i})[0]})
        post = urllib.urlencode(post)

        result = client.request(page, post=post)

        url = re.compile('"file" *: *"(http.+?)"').findall(result)
        #control.log('AAA Page %s' % url)
        url = url[-1]
        url += '&direct=false&ua=false'
        xbmc.sleep(2000)
        #return url + '|' + urllib.urlencode({ 'User-Agent': client.IE_USER_AGENT })
        return url
    except:
        return
Esempio n. 30
0
def resolve(url):
    try:
        url = re.compile('//.+?/([\w]+)').findall(url)[0]
        url = 'http://180upload.com/embed-%s.html' % url

        result = client.request(url)

        post = {}
        f = client.parseDOM(result, 'form', attrs = {'id': 'captchaForm'})[0]
        k = client.parseDOM(f, 'input', ret='name', attrs = {'type': 'hidden'})
        for i in k: post.update({i: client.parseDOM(f, 'input', ret='value', attrs = {'name': i})[0]})
        post = urllib.urlencode(post)

        result = client.request(url, post=post)

        result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
        result = jsunpack.unpack(result)

        url = client.parseDOM(result, 'embed', ret='src')
        url += re.compile("'file' *, *'(.+?)'").findall(result)
        url = [i for i in url if not i.endswith('.srt')]
        url = 'http://' + url[0].split('://', 1)[-1]

        return url
    except:
        return
Esempio n. 31
0
    def get_sources(self, url, hostDict, hostprDict, locDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            headers = {}
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            if 'tvshowtitle' in data:
                url = '/tv-series/%s-season-%01d/watch/' % (
                    cleantitle.geturl(title), int(data['season']))
                year = str((int(data['year']) + int(data['season'])) - 1)
                episode = '%01d' % int(data['episode'])

            else:
                url = '/movie/%s/watch/' % cleantitle.geturl(title)
                year = data['year']
                episode = None

            url = urlparse.urljoin(self.base_link, url)
            referer = url

            r = client.request(url)

            y = re.findall('Release\s*:\s*.+?\s*(\d{4})', r)[0]

            if not year == y: raise Exception()

            r = client.parseDOM(r, 'div', attrs={'class': 'les-content'})
            r = zip(client.parseDOM(r, 'a', ret='href'),
                    client.parseDOM(r, 'a'))
            r = [(i[0], ''.join(re.findall('(\d+)', i[1])[:1])) for i in r]

            if not episode == None:
                r = [i[0] for i in r if '%01d' % int(i[1]) == episode]
            else:
                r = [i[0] for i in r]

            r = [i for i in r if '/server-' in i]

            for u in r:
                try:
                    p = client.request(u, referer=referer, timeout='10')

                    t = re.findall('player_type\s*:\s*"(.+?)"', p)[0]
                    if t == 'embed': raise Exception()

                    episodeId = client.parseDOM(p,
                                                'input',
                                                ret='value',
                                                attrs={'name': 'episodeID'})[0]
                    js = json.loads(
                        client.request(self.token_link,
                                       post=urllib.urlencode({'id':
                                                              episodeId}),
                                       referer=referer,
                                       timeout='10'))
                    hash = js['hash']
                    token = js['token']
                    _ = js['_']
                    url = self.grabber_link % (episodeId, hash, token, _)
                    u = client.request(url, referer=referer, timeout='10')
                    js = json.loads(u)

                    try:
                        u = js['playlist'][0]['sources']
                        u = [i['file'] for i in u if 'file' in i]

                        for i in u:
                            try:
                                sources.append({
                                    'source':
                                    'gvideo',
                                    'quality':
                                    directstream.googletag(i)[0]['quality'],
                                    'provider':
                                    'watch5s',
                                    'url':
                                    i,
                                    'direct':
                                    True,
                                    'debridonly':
                                    False
                                })
                            except:
                                pass
                    except:
                        pass

                    try:
                        u = js['backup']
                        u = urlparse.parse_qs(urlparse.urlsplit(u).query)
                        u = dict([(i, u[i][0]) if u[i] else (i, '')
                                  for i in u])
                        eid = u['eid']
                        mid = u['mid']
                        p = client.request(self.backup_token_link %
                                           (eid, mid, _),
                                           XHR=True,
                                           referer=referer,
                                           timeout='10')
                        x = re.search('''_x=['"]([^"']+)''', p).group(1)
                        y = re.search('''_y=['"]([^"']+)''', p).group(1)
                        u = client.request(self.backup_link % (eid, x, y),
                                           referer=referer,
                                           XHR=True,
                                           timeout='10')
                        js = json.loads(u)
                        try:
                            u = js['playlist'][0]['sources']
                            u = [i['file'] for i in u if 'file' in i]

                            for i in u:
                                try:
                                    sources.append({
                                        'source':
                                        'gvideo',
                                        'quality':
                                        directstream.googletag(i)[0]
                                        ['quality'],
                                        'provider':
                                        'watch5s',
                                        'url':
                                        i,
                                        'direct':
                                        True,
                                        'debridonly':
                                        False
                                    })
                                except:
                                    pass
                        except:
                            pass
                    except:
                        pass
                except:
                    pass

            return sources
        except:
            return sources
Esempio n. 32
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        control.log('### WATCHFREE %s' % url)
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = client.request(url)
            result = result.decode('iso-8859-1').encode('utf-8')
            #control.log('### %s' % url)

            links = client.parseDOM(result,
                                    'table',
                                    attrs={'class': 'link_ite.+?'})
            print links
            for i in links:
                #control.log('### i %s' % i)

                try:
                    url = client.parseDOM(i, 'a', ret='href')
                    url = [x for x in url if 'gtfo' in x][-1]
                    url = urlparse.parse_qs(
                        urlparse.urlparse(url).query)['gtfo'][0]
                    url = base64.b64decode(url)
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = re.findall(
                        '([\w]+[.][\w]+)$',
                        urlparse.urlparse(url.strip().lower()).netloc)[0]
                    print host
                    if not host in hostDict: raise Exception()

                    #if not host in hostDict:
                    #    control.log('watchfree HOST; %s' % host)
                    #    raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')
                    try:
                        host = host.split('.')[0]
                    except:
                        pass
                    quality = client.parseDOM(i,
                                              'div',
                                              attrs={'class': 'quality'})
                    if any(x in ['[CAM]', '[TS]'] for x in quality):
                        quality = 'CAM'
                    else:
                        quality = 'SD'
                    #quality = quality.encode('utf-8')
                    sources.append({
                        'source': host,
                        'quality': quality,
                        'provider': 'Watchfree',
                        'url': url
                    })

                except:
                    pass

            return sources
        except Exception as e:
            control.log('ERROR Watchfree %s' % e)
            return sources
Esempio n. 33
0
    def get_sources(self, url):
        logger.debug('%s SOURCES URL %s' % (self.__class__, url))

        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            content = re.compile('(.+?)\?episode=\d*$').findall(url)
            content = 'movie' if len(content) == 0 else 'episode'

            try:
                url, episode = re.compile('(.+?)\?episode=(\d*)$').findall(
                    url)[0]
            except:
                pass

            result = client.request(url)

            url = zip(
                client.parseDOM(result,
                                'a',
                                ret='href',
                                attrs={'target': 'player_iframe'}),
                client.parseDOM(result, 'a', attrs={'target':
                                                    'player_iframe'}))
            url = [(i[0], re.compile('(\d+)').findall(i[1])) for i in url]
            url = [(i[0], i[1][-1]) for i in url if len(i[1]) > 0]

            if content == 'episode':
                url = [i for i in url if i[1] == '%01d' % int(episode)]

            links = [client.replaceHTMLCodes(i[0]) for i in url]

            for u in links:

                try:
                    result = client.request(u)
                    result = re.findall('sources\s*:\s*\[(.+?)\]', result)[0]
                    result = re.findall(
                        '"file"\s*:\s*"(.+?)".+?"label"\s*:\s*"(.+?)"', result)

                    url = [{
                        'url': i[0],
                        'quality': '1080p'
                    } for i in result if '1080' in i[1]]
                    url += [{
                        'url': i[0],
                        'quality': 'HD'
                    } for i in result if '720' in i[1]]

                    for i in url:
                        sources.append({
                            'source': 'gvideo',
                            'quality': i['quality'],
                            'provider': 'Pubfilm',
                            'url': i['url'],
                            'direct': True,
                            'debridonly': False
                        })
                except:
                    pass
            logger.debug('%s SOURCES URL %s' % (self.__class__, sources))
            return sources
        except:
            return sources
Esempio n. 34
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        #                sources.append({'source': 'gvideo', 'quality': i['quality'].encode('utf-8'), 'provider': 'Yesmovies', 'url': i['url'].encode('utf-8')})
        try:
            sources = []

            if url == None: return sources

            try:
                url, episode = re.findall('(.+?)\?episode=(\d*)$', url)[0]
            except:
                episode = None

            url = urlparse.urljoin(self.base_link, url)

            vid_id = re.findall('-(\d+)', url)[-1]
            '''
            quality = cache.get(self.ymovies_info, 9000, vid_id)[1].lower()
            if quality == 'cam' or quality == 'ts': quality = 'CAM'
            elif quality == 'hd': quality = 'HD'
            else: quality = 'SD'
            '''

            for i in range(3):
                r = client.request(url)
                if not r == None: break

            ref = client.parseDOM(r,
                                  'a',
                                  ret='href',
                                  attrs={'class': 'mod-btn mod-btn-watch'})[0]
            ref = urlparse.urljoin(self.base_link, ref)

            for i in range(3):
                r = client.request(ref, referer=url)
                if not r == None: break

            c = client.parseDOM(r, 'img', ret='src', attrs={'class': 'hidden'})
            if c: cookie = client.request(c[0], referer=ref, output='cookie')
            else: cookie = ''

            server = re.findall('server\s*:\s*"(.+?)"', r)[0]

            type = re.findall('type\s*:\s*"(.+?)"', r)[0]

            episode_id = re.findall('episode_id\s*:\s*"(.+?)"', r)[0]

            r = self.episode_link % (vid_id, server, episode_id, type)
            u = urlparse.urljoin(self.base_link, r)

            for i in range(13):
                r = client.request(u, referer=ref)
                if not r == None: break

            r = re.compile('(<li.+?/li>)', re.DOTALL).findall(r)
            r = [(client.parseDOM(i, 'li', ret='onclick'),
                  client.parseDOM(i, 'a', ret='title')) for i in r]

            if not episode == None:
                r = [(i[0][0], i[1][0]) for i in r if i[0] and i[1]]
                r = [(i[0], ''.join(re.findall('(\d+)', i[1])[:1])) for i in r]
                r = [i[0] for i in r if '%01d' % int(i[1]) == episode]
            else:
                r = [i[0][0] for i in r if i[0]]

            r = [re.findall('(\d+)', i) for i in r]
            r = [i[:2] for i in r if len(i) > 1]
            r = [i[0] for i in r if 1 <= int(i[1]) <= 11][:3]

            for u in r:
                try:
                    key = 'xwh38if39ucx'
                    key2 = '8qhfm9oyq1ux'
                    key3 = 'ctiw4zlrn09tau7kqvc153uo'

                    k = u + key3
                    v = ''.join(
                        random.choice(string.ascii_lowercase + string.digits)
                        for x in range(6))

                    c = key + u + key2 + '=%s' % v
                    c = '%s; %s' % (cookie, c)

                    url = urllib.quote(uncensored(k, v))
                    url = '/ajax/v2_get_sources/%s?hash=%s' % (u, url)
                    url = urlparse.urljoin(self.base_link, url)

                    for i in range(3):
                        u = client.request(url,
                                           referer=ref,
                                           cookie=c,
                                           timeout='10')
                        if not u == None: break

                    u = json.loads(u)['playlist'][0]['sources']
                    u = [i['file'] for i in u if 'file' in i]

                    for i in u:
                        try:
                            sources.append({
                                'source':
                                'gvideo',
                                'quality':
                                client.googletag(i)[0]['quality'],
                                'provider':
                                'Yesmovies',
                                'url':
                                i
                            })
                        except:
                            pass
                except:
                    pass

            return sources
        except:
            return sources
Esempio n. 35
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            if debrid.status() is False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            posts = client.parseDOM(r, 'item')

            hostDict = hostprDict + hostDict

            items = []

            for post in posts:
                try:
                    t = client.parseDOM(post, 'title')[0]
                    u = client.parseDOM(post, 'link')[0]
                    s = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', t)
                    s = s[0] if s else '0'

                    items += [(t, u, s)]

                except:
                    pass

            urls = []
            for item in items:

                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)

                    if not cleantitle.get(t) == cleantitle.get(title): raise Exception()

                    y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()

                    if not y == hdlr: raise Exception()

                    quality, info = source_utils.get_release_quality(name, item[1])
                    if any(x in quality for x in ['CAM', 'SD']): continue

                    try:
                        size = re.sub('i', '', item[2])
                        div = 1 if size.endswith('GB') else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        pass

                    info = ' | '.join(info)

                    url = item[1]
                    links = self.links(url)
                    urls += [(i, quality, info) for i in links]

                except:
                    pass

            for item in urls:

                if 'earn-money' in item[0]: continue
                if any(x in item[0] for x in ['.rar', '.zip', '.iso']): continue
                url = client.replaceHTMLCodes(item[0])
                url = url.encode('utf-8')

                valid, host = source_utils.is_host_valid(url, hostDict)
                if not valid: continue
                host = client.replaceHTMLCodes(host)
                host = host.encode('utf-8')

                sources.append(
                    {'source': host, 'quality': item[1], 'language': 'en', 'url': url, 'info': item[2], 'direct': False,
                     'debridonly': True})

            return sources
        except:
            return sources
Esempio n. 36
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            self.sources = []
            mylinks = []
            if url == None: return self.sources

            result = ''
            links = [self.link_1, self.link_3]
            for base_link in links:
                headers = {"Referer": urlparse.urljoin(base_link, url)}
                result, headers, content, cookie = client.request(
                    urlparse.urljoin(base_link, url),
                    output='extended',
                    headers=headers)
                myref = urlparse.urljoin(base_link, url)
                #control.log('### %s' % result)
                if 'original-title' in str(result): break

            links = client.parseDOM(result, 'tr', attrs={'id': 'pt.+?'})

            for i in links:
                try:
                    lang = re.compile(
                        '<img src=[\'|\"|\s|\<]*(.+?)[\'|\"|\s|\>]').findall(
                            i)[1]

                    if not 'English' in lang: raise Exception()

                    host = re.compile(
                        '<img src=[\'|\"|\s|\<]*(.+?)[\'|\"|\s|\>]').findall(
                            i)[0]
                    host = host.split('/')[-1]
                    host = host.split('.')[-3]
                    host = host.strip().lower()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    if '>Cam<' in i or '>TS<' in i: quality = 'CAM'
                    elif '>HD<' in i and host in hosthdDict: quality = 'HD'
                    else: quality = 'SD'

                    #if quality == 'HD' and not host in hosthdDict: raise Exception()
                    #if quality == 'SD' and not host in hostDict: raise Exception()

                    if '>3D<' in i: info = '3D'
                    else: info = ''
                    #control.log('### host:%s q:%s' % (host,quality))

                    url = re.compile(
                        'href=[\'|\"|\s|\<]*(.+?)[\'|\"|\s|\>]').findall(i)[0]
                    url = client.replaceHTMLCodes(url)

                    try:
                        url = urlparse.parse_qs(
                            urlparse.urlparse(url).query)['u'][0]
                    except:
                        pass
                    if url.startswith('http'):
                        url = urlparse.urlparse(url).path
                    if not url.startswith('http'):
                        url = urlparse.urljoin(self.base_link, url)

                    url = url.encode('utf-8')
                    #control.log('########  IWATCH LINK url:%s  host:%s q:%s' % (url,host,quality))
                    mylinks.append({
                        'source': host,
                        'quality': quality,
                        'url': url
                    })

                except:
                    pass
            #print("M",mylinks)
            #for i in mylinks:
            #    control.log(">>>>>>>>>>>>>>> ONE IWACH LINKS %s" % (i))

            threads = []
            for i in mylinks:
                threads.append(
                    workers.Thread(self.check, i, headers, cookie, hostDict,
                                   hosthdDict))
            [i.start() for i in threads]
            for i in range(0, 10 * 2):
                is_alive = [x.is_alive() for x in threads]
                if all(x == False for x in is_alive): break
                time.sleep(0.5)

            return self.sources

        except:
            return self.sources
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []
            #control.log('#PUTLOCKER1 %s' % url)

            if url == None: return sources

            if not str(url).startswith('http'):

                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '')
                             for i in data])

                title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                    'title']

                imdb = data['imdb']
                match = title.replace('-', '').replace(':', '').replace(
                    '\'', '').replace(' ', '-').replace('--', '-').lower()

                if 'tvshowtitle' in data:
                    url = '%s/show/%s/season/%01d/episode/%01d' % (
                        self.base_link, match, int(
                            data['season']), int(data['episode']))
                else:
                    url = '%s/movie/%s' % (self.base_link, match)
                #control.log('#PUTLOCKER2 %s' % url)

                #result = client.source(url, output='title')
                result = client2.http_get(url)

                if '%TITLE%' in result: raise Exception()

                cookie_file = os.path.join(
                    control.cookieDir,
                    '%s_cookies.lwp' % client2.shrink_host(url))
                #cookie_file = os.path.join('/home/mrknow/.kodi/userdata/addon_data/plugin.video.specto/Cookies','%s_cookies.lwp' % client2.shrink_host((url)))
                cj = cookielib.LWPCookieJar(cookie_file)
                try:
                    cj.load(ignore_discard=True)
                except:
                    pass
                auth = cj._cookies['www.putlocker.systems']['/'][
                    '__utmx'].value
                headers = {}

                if not imdb in result: raise Exception()

            else:
                result, headers, content, cookie = client.source(
                    url, output='extended')

            #control.log('#PUTLOCKER3 %s' % auth)

            auth = 'Bearer %s' % urllib.unquote_plus(auth)

            headers['Authorization'] = auth
            headers['X-Requested-With'] = 'XMLHttpRequest'
            headers['Referer'] = url

            u = 'http://www.putlocker.systems/ajax/embeds.php'

            action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb'

            elid = urllib.quote(
                base64.encodestring(str(int(time.time()))).strip())

            token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0]

            idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0]

            post = {
                'action': action,
                'idEl': idEl,
                'token': token,
                'elid': elid
            }
            r = client2.http_get(u, data=post, headers=headers)
            #print r
            #control.log('#PUTLOCKER4 %s' % r)

            r = str(json.loads(r))
            r = client.parseDOM(r, 'iframe', ret='.+?') + client.parseDOM(
                r, 'IFRAME', ret='.+?')
            #control.log('#PUTLOCKER5 %s' % r)

            links = []

            for i in r:
                try:
                    links += [{
                        'source': 'gvideo',
                        'quality': client.googletag(i)[0]['quality'],
                        'url': i
                    }]
                except:
                    pass

            links += [{
                'source': 'openload.co',
                'quality': 'SD',
                'url': i
            } for i in r if 'openload.co' in i]

            links += [{
                'source': 'videomega.tv',
                'quality': 'SD',
                'url': i
            } for i in r if 'videomega.tv' in i]
            links += [{
                'source': 'Allmyvideos',
                'quality': 'SD',
                'url': i
            } for i in r if 'allmyvideos.net' in i]

            for i in links:
                sources.append({
                    'source': i['source'],
                    'quality': i['quality'],
                    'provider': 'Putlocker',
                    'url': i['url']
                })
            control.log('#PUTLOCKER6 SOURCES %s' % sources)

            return sources
        except:
            return sources
Esempio n. 38
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            posts = client.parseDOM(r, 'item')

            hostDict = hostprDict + hostDict

            items = []

            for post in posts:
                try:
                    t = client.parseDOM(post, 'title')[0]

                    c = client.parseDOM(post, 'content.+?')[0]

                    u = re.findall('>Single Link(.+?)(?:#ff0000|$)',
                                   c.replace('\n', ''))[0]

                    u = client.parseDOM(u, 'a', ret='href')

                    s = re.findall(
                        '((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', c)
                    s = s[0] if s else '0'

                    items += [(t, i, s) for i in u]
                except:
                    pass

            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    t = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)',
                        '', name)

                    if not cleantitle.get(t) == cleantitle.get(title):
                        raise Exception()

                    y = re.findall(
                        '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]',
                        name)[-1].upper()

                    if not y == hdlr: raise Exception()

                    fmt = re.sub(
                        '(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)',
                        '', name.upper())
                    fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
                    fmt = [i.lower() for i in fmt]

                    if any(
                            i.endswith(('subs', 'sub', 'dubbed', 'dub'))
                            for i in fmt):
                        raise Exception()
                    if any(i in ['extras'] for i in fmt): raise Exception()

                    if '1080p' in fmt:
                        quality = '1080p'
                    elif '720p' in fmt:
                        quality = 'HD'
                    else:
                        quality = 'SD'
                    if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt):
                        quality = 'SCR'
                    elif any(i in [
                            'camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam',
                            'dvdts', 'cam', 'telesync', 'ts'
                    ] for i in fmt):
                        quality = 'CAM'

                    info = []

                    if '3d' in fmt: info.append('3D')

                    try:
                        size = re.findall(
                            '((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))',
                            item[2])[-1]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        pass

                    if any(i in ['hevc', 'h265', 'x265'] for i in fmt):
                        info.append('HEVC')

                    info = ' | '.join(info)

                    url = item[1]
                    if any(x in url for x in ['.rar', '.zip', '.iso']):
                        continue
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = re.findall(
                        '([\w]+[.][\w]+)$',
                        urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if not host in hostDict: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': False
                    })
                except:
                    pass

            check = [i for i in sources if not i['quality'] == 'CAM']
            if check: sources = check

            return sources
        except:
            failure = traceback.format_exc()
            print('PhazeDDL - Exception: \n' + str(failure))
            return sources
Esempio n. 39
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            try:
                url, episode = re.findall('(.+?)\?episode=(\d*)$', url)[0]
            except:
                episode = None

            ref = url

            for i in range(3):
                result = client.request(url)
                if not result == None: break

            if not episode == None:
                result = client.parseDOM(result,
                                         'div',
                                         attrs={'id': 'ip_episode'})[0]
                ep_url = client.parseDOM(result,
                                         'a',
                                         attrs={'data-name': str(episode)},
                                         ret='href')[0]
                for i in range(3):
                    result = client.request(ep_url)
                    if not result == None: break

            r = client.parseDOM(result,
                                'div',
                                attrs={'class': '[^"]*server_line[^"]*'})

            for u in r:
                try:
                    url = urlparse.urljoin(
                        self.base_link, '/ip.file/swf/plugins/ipplugins.php')
                    p1 = client.parseDOM(u, 'a', ret='data-film')[0]
                    p2 = client.parseDOM(u, 'a', ret='data-server')[0]
                    p3 = client.parseDOM(u, 'a', ret='data-name')[0]
                    post = {
                        'ipplugins': 1,
                        'ip_film': p1,
                        'ip_server': p2,
                        'ip_name': p3
                    }
                    post = urllib.urlencode(post)
                    for i in range(3):
                        result = client.request(url,
                                                post=post,
                                                XHR=True,
                                                referer=ref,
                                                timeout='10')
                        if not result == None: break

                    result = json.loads(result)
                    u = result['s']
                    s = result['v']

                    url = urlparse.urljoin(
                        self.base_link, '/ip.file/swf/ipplayer/ipplayer.php')

                    post = {'u': u, 'w': '100%', 'h': '420', 's': s, 'n': 0}
                    post = urllib.urlencode(post)

                    for i in range(3):
                        result = client.request(url,
                                                post=post,
                                                XHR=True,
                                                referer=ref)
                        if not result == None: break

                    url = json.loads(result)['data']

                    if type(url) is list:
                        url = [i['files'] for i in url]
                        for i in url:
                            try:
                                sources.append({
                                    'source':
                                    'gvideo',
                                    'quality':
                                    directstream.googletag(i)[0]['quality'],
                                    'provider':
                                    'TuneMovie',
                                    'url':
                                    i,
                                    'direct':
                                    True,
                                    'debridonly':
                                    False
                                })
                            except:
                                pass

                    else:
                        url = client.request(url)
                        url = client.parseDOM(url,
                                              'source',
                                              ret='src',
                                              attrs={'type': 'video.+?'})[0]
                        url += '|%s' % urllib.urlencode(
                            {'User-agent': client.randomagent()})
                        sources.append({
                            'source': 'cdn',
                            'quality': 'HD',
                            'provider': 'TuneMovie',
                            'url': url,
                            'direct': False,
                            'debridonly': False
                        })

                except:
                    pass

            return sources
        except:
            return sources
Esempio n. 40
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if not debrid.status(): raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)
            r = client.parseDOM(r, 'h2')
            r = [re.findall('''<a.+?href=["']([^"']+)["']>(.+?)</a>''', i, re.DOTALL) for i in r]

            hostDict = hostprDict + hostDict

            items = []

            for item in r:
                try:
                    t = item[0][1]
                    t = re.sub('(\[.*?\])|(<.+?>)', '', t)
                    t1 = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', t)

                    if not cleantitle.get(t1) == cleantitle.get(title): raise Exception()

                    y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', t)[-1].upper()

                    if not y == hdlr: raise Exception()

                    data = client.request(urlparse.urljoin(self.base_link, item[0][0]))
                    data = dom_parser2.parse_dom(data, 'a', attrs={'target': '_blank'})
                    u = [(t, i.content) for i in data]
                    items += u

                except:
                    pass

            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    quality, info = source_utils.get_release_quality(name, item[1])

                    try:
                        size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', name)[-1]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        pass

                    info = ' | '.join(info)

                    url = item[1]
                    if not url.startswith('http'): continue
                    if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    valid, host = source_utils.is_host_valid(url, hostDict)
                    if not valid: continue
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info,
                                    'direct': False, 'debridonly': True})
                except:
                    pass

            check = [i for i in sources if not i['quality'] == 'CAM']
            if check: sources = check

            return sources
        except:
            failure = traceback.format_exc()
            print('TVRelease - Exception: \n' + str(failure))
            return sources
Esempio n. 41
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []
            control.log('#Dayt url %s' % url)

            if url == None: return sources
            content = re.compile('(.+?)\sS\d*E\d*$').findall(url)
            control.log('#Dayt content %s' % content)

            if len(content) == 0:
                control.log('#Dayt ttttttt')
                title, year = re.compile('(.+?) \((\d{4})\)').findall(url)[0]
                mytitle = cleantitle.movie(title)
                control.log('#Dayt title, year: %s,%s' % (title, year))

                data = os.path.join(control.dataPath, 'daytse1.db')
                download = True
                try:
                    download = abs(
                        datetime.datetime.fromtimestamp(os.path.getmtime(
                            data)) -
                        (datetime.datetime.now())) > datetime.timedelta(days=2)
                except:
                    pass
                if download == True:
                    control.log('#Dayt DDDOOOWNLOAD ')
                    #result = client.request(base64.b64decode(self.data_link))
                    #with open(data, "wb") as code:
                    #    code.write(result)
                    result = client.request(base64.b64decode(self.data_link))
                    print(len(result))
                    control.log(">>>>>>>>>>>>>>> ONEC Downloading")
                    zip = zipfile.ZipFile(StringIO.StringIO(result))
                    zip.extractall(control.dataPath)
                    zip.close()

                dbcon = database.connect(data)
                dbcur = dbcon.cursor()
                control.log(
                    "#Dayt DDDOOOWNLOAD SELECT * FROM movies WHERE title like '%"
                    + cleantitle.movie(title) + "%'")
                dbcur.execute("SELECT * FROM movies WHERE title like '%" +
                              cleantitle.movie(title) + "%'")
                result = dbcur.fetchone()
                #myurl = urlparse.urljoin(self.base_link, '/movies/' + urllib.quote_plus(result[1]))
                myurl = urlparse.urljoin(self.base_link,
                                         '/movies/' + result[1])

                myhead = {'Referer': 'http://dayt.se/movies/'}
                result10 = client.request(myurl, headers=myhead)
                result10 = client.parseDOM(result10,
                                           'div',
                                           attrs={'id': '5throw'})[0]
                result10 = client.parseDOM(result10,
                                           'a',
                                           attrs={'rel': 'nofollow'},
                                           ret='href')
                mquality = 'HD'
                if '1080' in result[2]: mquality = '1080p'
                for i in result10:
                    if 'mail.ru' in i:
                        myresolve = resolvers.request(i)
                        sources.append({
                            'source': 'MAIL.RU',
                            'quality': mquality,
                            'provider': 'Dayt',
                            'url': myresolve
                        })
                    if 'yadi.sk' in i:
                        myresolve = resolvers.request(i)
                        sources.append({
                            'source': 'YADISK',
                            'quality': mquality,
                            'provider': 'Dayt',
                            'url': myresolve
                        })

                result = client.parseDOM(result, 'iframe', ret='src')
                result = [i for i in result if 'pasep' in i][0]
                result = client.request(result)
                result = client.parseDOM(result, 'iframe', ret='src')[0]
                result = client.request(result)
                result = client.parseDOM(result, 'iframe', ret='src')[0]
                links = resolvers.request(result)
                for i in links:
                    sources.append({
                        'source': 'gvideo',
                        'quality': i[1],
                        'provider': 'Dayt',
                        'url': i[0]
                    })
                return sources
        except:
            return sources
Esempio n. 42
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            for i in range(3):
                result = client.request(url)
                if not result == None: break

            result = re.sub(r'[^\x00-\x7F]+', ' ', result)

            pages = []

            r = client.parseDOM(result, 'div', attrs={'id': 'embed'})[0]
            pages.append(client.parseDOM(r, 'iframe', ret='src')[0])

            for page in pages:
                try:
                    if not page.startswith('http'):
                        page = 'http:' + page

                    for i in range(3):
                        result = client.request(page)
                        if not result == None: break

                    captions = re.search(
                        'kind\s*:\s*(?:\'|\")captions(?:\'|\")', result)
                    if not captions: raise Exception()

                    try:
                        r = re.findall(
                            'url\s*:\s*\'(http(?:s|)://api.pcloud.com/.+?)\'',
                            result)[0]
                        r = client.request(r)
                        r = json.loads(r)['variants']
                        r = [(i['hosts'], i['path'], i['height']) for i in r
                             if 'hosts' in i and 'path' in i and 'height' in i]
                        r = [('%s%s' % (i[0][0], i[1]), str(i[2])) for i in r
                             if len(i[0]) > 0]
                        r = [
                            (i[0] if i[0].startswith('http') else 'http://%s' %
                             i[0], i[1]) for i in r
                        ]
                        host = 'cdn'
                        direct = False
                        l = r
                    except:
                        pass

                    try:
                        r = re.findall(
                            '"?file"?\s*:\s*"([^"]+)"\s*,\s*"?label"?\s*:\s*"(\d+)p?[^"]*"',
                            result)
                        if not r: raise Exception()
                        host = 'gvideo'
                        direct = True
                        l = r
                    except:
                        pass

                    links = [(i[0], '1080p') for i in l if int(i[1]) >= 1080]
                    links += [(i[0], 'HD') for i in l
                              if 720 <= int(i[1]) < 1080]
                    links += [(i[0], 'SD') for i in l
                              if 480 <= int(i[1]) < 720]

                    for i in links:
                        sources.append({
                            'source': host,
                            'quality': i[1],
                            'provider': 'Sezonlukdizi',
                            'url': i[0]
                        })
                except:
                    pass

            return sources
        except Exception as e:
            control.log('ERROR sezonlukidz %s' % e)
            return sources
Esempio n. 43
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        sources = []
        try:
            #control.log("one-url-0 %s" % url)

            if url == None: return sources

            if not str(url).startswith('/'):
                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
                #control.log("# DATA %s" % data)

                title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
                #control.log("one-date-TITLE %s" % title)
                sezon = data['season']
                episode = data['episode']
                year = re.findall('(\d{4})', data['premiered'])[0] if 'tvshowtitle' in data else data['year']
                tvtitle = '%s - Season %s' % (title, sezon)
                query = self.search_link % urllib.quote(tvtitle)
                query = urlparse.urljoin(self.base_link, query)

                result = client.request(query)
                #control.log("one-date-0 %s" % year)
                tvshowtitle = cleantitle.tv(title)
                years = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1)]

                r = client.parseDOM(result, 'div', attrs={'class': 'item_movie'})
                r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
                r = [(i[0][0], i[1][-1]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
                r = [(re.sub('//.+?/', '', i[0]), i[1], re.findall('(\d{4})', i[1])[0]) for i in r]
                r = [(i[0], i[1].split('-')[0].strip(), i[2]) for i in r]
                r = [i for i in r if tvshowtitle == cleantitle.tv(i[1])]
                r = [i for i in r if any(x in i[2] for x in years)]

                u = [i[0] for i in r][0]
                url = urlparse.urljoin(self.base_link, '/' + u)
                result = client.request(url)

                result = client.parseDOM(result, 'div', attrs={'class': 'ep_link full'})[0]
                r = [client.parseDOM(result, 'a', ret='href'), client.parseDOM(result, 'a')]
                #control.log("one-epis-2 %s" % result)
                r = [(r[0][idx],r[1][idx]) for idx,i in enumerate(r[0])]
                r = [(i[0], re.findall('\d+',i[1])[0]) for i in r]
                #control.log("one-epis-3 %s" % r)
                u = [i[0] for i in r if i[1] == episode][0]

                #control.log("one-epis-0 %s" % u)
                url = 'http:' + u
                url = client.replaceHTMLCodes(url)
                #control.log("one-epis-0 %s" % url)

                url = url.encode('utf-8')

            ref = urlparse.urljoin(self.base_link, url)
            #control.log("one-sources-0 %s" % ref)
            headers= {'Referer':ref, "User-Agent":"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0"}
            r100 = client.request(ref,headers=headers, output='extended')
            cookie = r100[4] ; headers = r100[3] ; result = r100[0]


            r = re.compile('load_player\((\d+)\);', ).findall(result)
            if len(r) > 0:
                t = urlparse.urljoin(self.base_link, self.load_player)
                headers['x-requested-with'] = "XMLHttpRequest"
                headers['cookie']=cookie
                headers['Referer'] = ref
                post = urllib.urlencode({'id':r[0]})

                r1= client.request(t, headers=headers, post=post)
                r1 = json.loads(r1)
                r1 = client.request('http:'+r1['value'], headers=headers)
                r1 = json.loads(r1)

                for i in r1['playlist'][0]['sources']:
                    try:
                        sources.append({'source': 'gvideo', 'quality': client.googletag(i['file'])[0]['quality'],
                                                'provider': 'OneMovies', 'url': i['file']})

                        #sources.append({'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'provider': 'Rainierland', 'url': i})
                    except:
                        pass
                return sources

        except Exception as e:
            control.log('ERROR onemovies %s' % e)
            return sources
Esempio n. 44
0
    def get_sources(self,
                    url,
                    hosthdDict=None,
                    hostDict=None,
                    locDict=None,
                    proxy_options=None,
                    key=None,
                    testing=False):
        try:
            sources = []
            if control.setting('Provider-%s' % name) == False:
                log('INFO', 'get_sources', 'Provider Disabled by User')
                log('INFO', 'get_sources', 'Completed')
                return sources
            if url == None:
                log('FAIL',
                    'get_sources',
                    'url == None. Could not find a matching title: %s' %
                    cleantitle.title_from_key(key),
                    dolog=not testing)
                log('INFO', 'get_sources', 'Completed')
                return sources

            year = None
            episode = None
            season = None

            log('INFO', 'get_sources-1', 'data-items: %s' % url, dolog=False)
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            try:
                year = re.findall(
                    '(\d{4})', data['premiered']
                )[0] if 'tvshowtitle' in data else data['year']
            except:
                try:
                    year = data['year']
                except:
                    year = None
            try:
                season = data['season']
            except:
                pass
            try:
                episode = data['episode']
            except:
                pass

            queries = []
            if season != None:
                queries = [{
                    'keyword': '%s %s' % (title, season)
                }, {
                    'keyword': title
                }]
            else:
                queries = [{
                    'keyword': '%s %s' % (title, year)
                }, {
                    'keyword': title
                }]

            rs = []
            for query in queries:
                search_url = urlparse.urljoin(self.base_link, '/search.html')
                search_url = search_url + '?' + urllib.urlencode(query)
                log('INFO',
                    'get_sources-2',
                    'search-url: %s' % search_url,
                    dolog=False)

                result = proxies.request(search_url,
                                         headers=self.headers,
                                         proxy_options=proxy_options,
                                         use_web_proxy=self.proxyrequired,
                                         httpsskip=True)
                rs = client.parseDOM(result,
                                     'ul',
                                     attrs={'class': 'listing items'})
                if len(rs) > 0 and len(rs[0].strip()) > 4:
                    break

            r = [(urlparse.urljoin(self.base_link,
                                   client.parseDOM(i, 'a', ret='href')[0]),
                  client.parseDOM(i, 'div', attrs={'class': 'name'}))
                 for i in rs]
            ux = None
            for s in r:
                ux = s[0]
                result = proxies.request(ux,
                                         headers=self.headers,
                                         proxy_options=proxy_options,
                                         use_web_proxy=self.proxyrequired,
                                         httpsskip=True)
                rs = client.parseDOM(result,
                                     'div',
                                     attrs={'class': 'watch infonation'})[0]
                rs = client.parseDOM(rs, 'ul', attrs={'class': 'three'})[0]
                if season != None:
                    break
                if year != None and year in rs:
                    break

            log('INFO',
                'get_sources-3',
                'match-page-url: %s' % ux,
                dolog=False)
            links_m = []
            trailers = []
            poster = None
            vidtype = 'Movie'
            if season != None:
                vidtype = 'Show'

            riptype = 'BRRIP'
            quality = '720p'
            sub_url = None

            try:
                poster1 = client.parseDOM(result,
                                          'div',
                                          attrs={'class': 'picture'})
                poster = client.parseDOM(poster1, 'img', ret='src')[0]
            except:
                pass

            links = client.parseDOM(result,
                                    'li',
                                    attrs={'class': 'child_episode'})

            try:
                if season == None:
                    rip_qual = client.parseDOM(result,
                                               'div',
                                               attrs={'id': 'info_movies'})[0]
                    rip_qual = client.parseDOM(rip_qual,
                                               'div',
                                               attrs={'class': 'right'})[0]
                    rip_qual = client.parseDOM(rip_qual, 'a')[0].strip()
                    rip_qual2 = ep_title = client.parseDOM(links[0],
                                                           'a',
                                                           ret='title')[0]

                    if 'HD' not in rip_qual and 'HD' not in rip_qual2:
                        riptype = 'CAM'
                    elif 'CAM' in rip_qual or 'CAM' in rip_qual2:
                        riptype = 'CAM'
                    if riptype == 'CAM':
                        quality = '480p'
                    if '720p' in rip_qual or '720p' in rip_qual2:
                        quality = '720p'
                    elif '1080p' in rip_qual or '1080p' in rip_qual2:
                        quality = '1080p'
            except:
                pass
            mov_url = None

            for l in links:
                try:
                    mov_urlx = urlparse.urljoin(
                        self.base_link,
                        client.parseDOM(l, 'a', ret='href')[0])
                    ep_title = client.parseDOM(l, 'a', ret='title')[0]

                    if season == None:
                        mov_url = mov_urlx
                    else:
                        try:
                            ep_nr = re.findall(r'Episode (.*?) ', ep_title)[0]
                        except:
                            try:
                                ep_nr = re.findall(r'Episode (.*?)-',
                                                   ep_title)[0]
                            except:
                                try:
                                    ep_nr = re.findall(r'Episode (.*?):',
                                                       ep_title)[0]
                                except:
                                    ep_nr = re.findall(r'Episode (.*)',
                                                       ep_title)[0]

                        ep_nr = ep_nr.replace('-',
                                              '').replace(':',
                                                          '').replace(' ', '')
                        ep_nr = filter(lambda x: x.isdigit(), ep_nr)

                        if int(episode) == int(ep_nr):
                            mov_url = mov_urlx
                except Exception as e:
                    log('FAIL',
                        'get_sources-4-A',
                        '%s: %s' % (title, e),
                        dolog=False)

            if mov_url == None:
                raise Exception('No match found !')

            if season == None:
                log('INFO',
                    'get_sources-4',
                    'movie-page-url: %s' % mov_url,
                    dolog=False)
            else:
                log('INFO',
                    'get_sources-4',
                    'show-episode-url: %s' % mov_url,
                    dolog=False)

            page_url = mov_url
            result = proxies.request(mov_url,
                                     headers=self.headers,
                                     proxy_options=proxy_options,
                                     use_web_proxy=self.proxyrequired,
                                     httpsskip=True)

            try:
                sub_url = re.findall(r'\"(.*vtt)\"', result)[0]
            except:
                pass

            if testing == False:
                try:
                    matches = re.compile(
                        'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
                    ).findall(result)
                    for match in matches:
                        try:
                            if 'youtube.com' in match:
                                match = match.replace('embed/', 'watch?v=')
                                trailers.append(match)
                        except:
                            pass
                except Exception as e:
                    pass

                for trailer in trailers:
                    try:
                        l = resolvers.createMeta(trailer,
                                                 self.name,
                                                 self.logo,
                                                 '720p', [],
                                                 key,
                                                 poster=poster,
                                                 vidtype='Trailer',
                                                 testing=testing)
                        for ll in l:
                            if ll != None and 'key' in ll.keys():
                                links_m.append(ll)
                    except:
                        pass

            links = client.parseDOM(result,
                                    'div',
                                    attrs={'class': 'anime_muti_link'})
            links = client.parseDOM(links, 'li', ret='data-video')
            video_urls = []

            for l in links:
                if 'http' not in l:
                    l = 'http:' + l
                video_urls.append(l)

            for video_url in video_urls:
                try:
                    l = resolvers.createMeta(video_url,
                                             self.name,
                                             self.logo,
                                             quality, [],
                                             key,
                                             poster=poster,
                                             riptype=riptype,
                                             vidtype=vidtype,
                                             sub_url=sub_url,
                                             testing=testing,
                                             page_url=page_url)
                    for ll in l:
                        if ll != None and 'key' in ll.keys():
                            links_m.append(ll)
                except:
                    pass

            for l in links_m:
                if l != None and 'key' in l.keys():
                    sources.append(l)

            if len(sources) == 0:
                log(
                    'FAIL', 'get_sources',
                    'Could not find a matching title: %s' %
                    cleantitle.title_from_key(key))
            else:
                log(
                    'SUCCESS', 'get_sources', '%s sources : %s' %
                    (cleantitle.title_from_key(key), len(sources)))

            log('INFO', 'get_sources', 'Completed')

            return sources
        except Exception as e:
            log('ERROR', 'get_sources', '%s' % e)
            log('INFO', 'get_sources', 'Completed')
            return sources
Esempio n. 45
0
    def get_sources(self, url, hostDict, hostprDict, locDict):
        try:
            sources = []

            if url == None: return sources

            if not str(url).startswith('http'):

                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '')
                             for i in data])

                title = cleantitle.get(data['tvshowtitle'])
                season, episode = '%01d' % int(data['season']), '%01d' % int(
                    data['episode'])
                year = re.findall('(\d{4})', data['premiered'])[0]

                url = cache.get(self.dizibox_tvcache, 120)

                url = [i[0] for i in url if title == i[1]][-1]
                url = urlparse.urljoin(self.base_link, url)

                result = client.request(url)

                if not season == '1':
                    url = client.parseDOM(result,
                                          'a',
                                          ret='href',
                                          attrs={'class': 'season-.+?'})
                    url = [i for i in url if '/%s-sezon-' % season in i][0]
                    result = client.request(url)

                url = client.parseDOM(result, 'a', ret='href')
                url = [
                    i for i in url
                    if '%s-sezon-%s-bolum-' % (season, episode) in i
                ][0]

                atr = re.findall('%s.+?\s+(\d{4})' % url, result)[0]
                if not atr == year: raise Exception()

            url = urlparse.urljoin(self.base_link, url)

            result = client.request(url)
            result = re.sub(r'[^\x00-\x7F]+', '', result)

            url = re.compile('(<a.*?</a>)', re.DOTALL).findall(result)
            url = [(client.parseDOM(i, 'a',
                                    ret='href'), client.parseDOM(i, 'a'))
                   for i in url]
            url = [(i[0][0], i[1][0]) for i in url
                   if len(i[0]) > 0 and len(i[1]) > 0]
            url = [i[0] for i in url if i[1] == 'Altyazsz'][0]

            result = client.request(url)
            result = re.sub(r'[^\x00-\x7F]+', '', result)

            headers = {'Referer': url}

            url = client.parseDOM(result,
                                  'span',
                                  attrs={'class': 'object-wrapper'})[0]
            url = client.parseDOM(url, 'iframe', ret='src')[0]
            url = client.replaceHTMLCodes(url)

            url = client.request(url, headers=headers)
            url = client.parseDOM(url,
                                  'param',
                                  ret='value',
                                  attrs={'name': 'flashvars'})[0]
            url = urllib.unquote_plus(url)
            url = 'http://ok.ru/video/%s' % urlparse.parse_qs(
                urlparse.urlparse(url).query)['mid'][0]
            url = directstream.odnoklassniki(url)

            for i in url:
                sources.append({
                    'source': 'vk',
                    'quality': i['quality'],
                    'provider': 'Dizibox',
                    'url': i['url'],
                    'direct': True,
                    'debridonly': False
                })

            return sources
        except:
            return sources
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)
            path = urlparse.urlparse(url).path

            result = client2.http_get(url)
            result = re.sub(r'[^\x00-\x7F]+', '', result)
            result = client.parseDOM(result, 'li')
            result = [(client.parseDOM(i, 'a',
                                       ret='href'), client.parseDOM(i, 'a'))
                      for i in result]
            result = [
                i[0] for i in result if len(i[0]) > 0 and path in i[0][0]
                and len(i[1]) > 0 and 'Altyaz' in i[1][0]
            ][0][0]

            url = urlparse.urljoin(self.base_link, result)

            result = client2.http_get(url)
            result = re.sub(r'[^\x00-\x7F]+', '', result)
            result = client.parseDOM(result,
                                     'div',
                                     attrs={'class': 'video-player'})[0]
            result = client.parseDOM(result, 'iframe', ret='src')[-1]

            try:
                url = base64.b64decode(
                    urlparse.parse_qs(
                        urlparse.urlparse(result).query)['id'][0])
                if not url.startswith('http'): raise Exception()
            except:
                url = client2.http_get(result)
                url = urllib.unquote_plus(url.decode('string-escape'))

                frame = client.parseDOM(url, 'iframe', ret='src')

                if len(frame) > 0:
                    url = [client.source(frame[-1], output='geturl')]
                else:
                    url = re.compile('"(.+?)"').findall(url)
                url = [
                    i for i in url
                    if 'ok.ru' in i or 'vk.com' in i or 'openload.co' in i
                ][0]

            try:
                url = 'http://ok.ru/video/%s' % urlparse.parse_qs(
                    urlparse.urlparse(url).query)['mid'][0]
            except:
                pass

            if 'openload.co' in url:
                host = 'openload.co'
                direct = False
                url = [{
                    'url': url,
                    'quality': 'HD'
                }]
            elif 'ok.ru' in url:
                host = 'vk'
                direct = True
                url = resolvers.request(url)
            elif 'vk.com' in url:
                host = 'vk'
                direct = True
                url = resolvers.request(url)
            else:
                raise Exception()

            for i in url:
                sources.append({
                    'source': host,
                    'quality': i['quality'],
                    'provider': 'Onlinedizi',
                    'url': i['url'],
                })

            return sources
        except:
            return sources
Esempio n. 47
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            if (self.user == '' or self.password == ''): raise Exception()

            cookie = client.request(self.sign,
                                    post=self.post,
                                    headers=self.headers,
                                    cookie=self.lang,
                                    output='cookie')
            cookie = '%s; %s' % (cookie, self.lang)

            try:
                url, season, episode = re.compile(
                    '(.+?)#(\d*)-(\d*)$').findall(url)[0]
            except:
                pass
            try:
                href = '#%01d-%01d' % (int(season), int(episode))
            except:
                href = '.+?'

            url = referer = urlparse.urljoin(self.base_link, url)

            result = client.request(url, cookie=cookie)

            url = client.parseDOM(result,
                                  'a',
                                  ret='data-href',
                                  attrs={'href': href})[0]
            url = urlparse.urljoin(self.base_link, url)

            headers = {'X-Requested-With': 'XMLHttpRequest'}
            result = client.request(url,
                                    cookie=cookie,
                                    referer=referer,
                                    headers=headers)

            headers = '|%s' % urllib.urlencode(
                {
                    'User-Agent': self.headers['User-Agent'],
                    'Cookie': str(cookie)
                })

            url = client.parseDOM(result,
                                  'source',
                                  ret='src',
                                  attrs={'type': 'video/mp4'})
            url += client.parseDOM(result,
                                   'source',
                                   ret='src',
                                   attrs={'type': 'video/.+?'})
            url = url[0] + headers

            sources.append({
                'source': 'ororo',
                'quality': 'HD',
                'provider': 'Ororo',
                'url': url
            })

            return sources
        except:
            return sources
Esempio n. 48
0
	def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_options=None, key=None, testing=False):
		try:
			sources = []
			if control.setting('Provider-%s' % name) == False:
				log('INFO','get_sources','Provider Disabled by User')
				return sources
			if url == None: 
				log('FAIL','get_sources','url == None. Could not find a matching title: %s' % cleantitle.title_from_key(key), dolog=not testing)
				return sources

			url = urlparse.urljoin(self.base_link, url)
			
			#r = client.request(url)
			req = proxies.request(url, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, IPv4=True)

			r = client.parseDOM(req, 'iframe', ret='src')
			try:
				r2 = re.findall('data-video=\"(.*?)\"', req)
				for r2_i in r2:
					r.append(r2_i)
			except:
				pass
				
			links = []

			for u in r:
				try:
					if 'http' not in u:
						u = 'http:' + u

					if u.startswith('http') == True:
						if 'vidstreaming' in u:
							#url = client.request(u)
							url = proxies.request(u, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, IPv4=True)
							
							url = client.parseDOM(url, 'source', ret='src')
						else:
							url = [u]

						for i in url:
							#try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False})
							#except: pass
							
							try:
								qualityt = client.googletag(i)[0]['quality']
							except:
								qualityt = u'720p'
							try:
								links = resolvers.createMeta(i, self.name, self.logo, qualityt, links, key, vidtype='Show', testing=testing)
							except:
								pass
				except:
					pass
					
			for i in links: sources.append(i)
			
			if len(sources) == 0:
				log('FAIL','get_sources','Could not find a matching title: %s' % cleantitle.title_from_key(key))
				return sources
			
			log('SUCCESS', 'get_sources','%s sources : %s' % (cleantitle.title_from_key(key), len(sources)), dolog=not testing)
			return sources
		except Exception as e:
			log('ERROR', 'get_sources', '%s' % e, dolog=not testing)
			return sources
Esempio n. 49
0
    def get_sources(self,
                    url,
                    hosthdDict=None,
                    hostDict=None,
                    locDict=None,
                    proxy_options=None,
                    key=None,
                    testing=False):
        try:
            sources = []

            #print "PRIMEWIRE get_sources %s" % url

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            #result = proxies.request(url, 'choose_tabs', proxy_options=proxy_options, use_web_proxy=self.proxyrequired)
            result = proxies.request(url,
                                     proxy_options=proxy_options,
                                     use_web_proxy=self.proxyrequired)

            links_m = []
            trailers = []
            if testing == False:
                try:
                    matches = re.findall(r'\"(http[s]?://www.youtube.*?)\"',
                                         result)
                    for match in matches:
                        try:
                            #print match
                            if 'youtube.com' in match and '"' not in match:
                                match = match.replace('embed/', 'watch?v=')
                                trailers.append(match)
                        except:
                            pass
                except Exception as e:
                    pass

                for trailer in trailers:
                    links_m = resolvers.createMeta(trailer,
                                                   self.name,
                                                   self.logo,
                                                   '720p',
                                                   links_m,
                                                   key,
                                                   vidtype='Trailer')

            links = client.parseDOM(result, 'tbody')

            for i in links:
                try:
                    url = client.parseDOM(i, 'a', ret='href')[0]
                    try:
                        url = urlparse.parse_qs(
                            urlparse.urlparse(url).query)['u'][0]
                    except:
                        pass
                    try:
                        url = urlparse.parse_qs(
                            urlparse.urlparse(url).query)['q'][0]
                    except:
                        pass
                    url = urlparse.parse_qs(
                        urlparse.urlparse(url).query)['url'][0]

                    url = base64.b64decode(url)
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    if 'http' not in url:
                        raise Exception()
                    for u in AVOID_DOMAINS:
                        if u in url:
                            raise Exception()

                    quality = client.parseDOM(i, 'span', ret='class')[0]

                    if quality == 'quality_cam' or quality == 'quality_ts':
                        quality = 'CAM'
                    elif quality == 'quality_dvd':
                        quality = 'SD'
                    else:
                        raise Exception()

                    #print "%s --- %s" % (self.name,url)
                    links_m = resolvers.createMeta(url, self.name, self.logo,
                                                   quality, links_m, key)

                    sources += [l for l in links_m]
                    if testing and len(sources) > 0:
                        break
                except:
                    pass

            self.log('SUCCESS',
                     'get_sources',
                     'links : %s' % len(sources),
                     dolog=testing)
            return sources
        except Exception as e:
            self.log('ERROR', 'get_sources', '%s' % e, dolog=testing)
            return sources
Esempio n. 50
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        sources = []
        try:
            #control.log("one-url-0 %s" % url)

            if url == None: return sources

            if not str(url).startswith('/'):
                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
                #control.log("# DATA %s" % data)

                title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
                #control.log("one-date-TITLE %s" % title)
                sezon = data['season']
                episode = data['episode']
                year = re.findall('(\d{4})', data['premiered'])[0] if 'tvshowtitle' in data else data['year']
                tvtitle = '%s - Season %s' % (title, sezon)
                query = self.search_link % urllib.quote(tvtitle)
                query = urlparse.urljoin(self.base_link, query)

                result = client.request(query)
                #control.log("one-date-0 %s" % year)
                tvshowtitle = cleantitle.tv(title)
                years = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1)]

                r = client.parseDOM(result, 'div', attrs={'class': 'item_movie'})
                r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
                r = [(i[0][0], i[1][-1]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
                r = [(re.sub('//.+?/', '', i[0]), i[1], re.findall('(\d{4})', i[1])[0]) for i in r]
                r = [(i[0], i[1].split('-')[0].strip(), i[2]) for i in r]
                r = [i for i in r if tvshowtitle == cleantitle.tv(i[1])]
                r = [i for i in r if any(x in i[2] for x in years)]

                u = [i[0] for i in r][0]
                url = urlparse.urljoin(self.base_link, '/' + u)
                result = client.request(url)

                result = client.parseDOM(result, 'div', attrs={'class': 'ep_link full'})[0]
                r = [client.parseDOM(result, 'a', ret='href'), client.parseDOM(result, 'a')]
                #control.log("one-epis-2 %s" % result)
                r = [(r[0][idx],r[1][idx]) for idx,i in enumerate(r[0])]
                r = [(i[0], re.findall('\d+',i[1])[0]) for i in r]
                #control.log("one-epis-3 %s" % r)
                u = [i[0] for i in r if i[1] == episode][0]

                #control.log("one-epis-0 %s" % u)
                url = 'http:' + u
                url = client.replaceHTMLCodes(url)
                #control.log("one-epis-0 %s" % url)

                url = url.encode('utf-8')

            ref = urlparse.urljoin(self.base_link, url)
            #control.log("one-sources-0 %s" % ref)
            headers= {'Referer':ref, "User-Agent":"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:50.0) Gecko/20100101 Firefox/50.0"}
            r100 = client.request(ref,headers=headers, output='extended')
            cookie = r100[4] ; headers = r100[3] ; result = r100[0]


            r = re.compile('id:.(\d+),\s.*episode_id:.(\d+),\s.*link_id:.(\d+)', ).findall(result)
            if len(r) > 0:
                t = urlparse.urljoin(self.base_link, self.episode_link %(r[0][0], r[0][1], r[0][2], self.now_milliseconds()))
                headers['x-requested-with'] = "XMLHttpRequest"
                headers['cookie']=cookie
                headers['Accept-Formating'] = 'application/json, text/javascript'
                headers['Referer'] = ref
                headers['Server'] = 'cloudflare-nginx'

                r1= client.request(t, headers=headers)
                r2 = client.parseDOM(r1, 'div', attrs = {'class': 'full server_link'})
                r2 =  [(client.parseDOM(i, 'a', ret='onclick')[0], client.parseDOM(i, 'a')[0]) for i in r2]
                r2 = [(re.compile("'(\d+)', (\d+)").findall(i[0])[0], i[1]) for i in r2]

                for i in r2:
                    try:
                        t = urlparse.urljoin(self.base_link,self.load_player % (i[0][0], i[0][1], self.now_milliseconds()))
                        #control.log("sources-7 %s @ %s " % ((t), i[1]))
                        r3 = client.request(t, headers=headers)
                        r4 = json.loads(r3)
                        #control.log("sources-8 %s @ " % (r4))
                        if r4['status'] == True:
                            if r4['link'] == False:
                                #gvideo
                                #control.log("sources-GV %s @ " % (r4))
                                r5 = client.request(r4['playlist'], headers=headers)
                                for link in json.loads(r5)['playlist'][0]['sources']:
                                    #control.log("sources-LINK %s @ " % (link))
                                    #ala['playlist'][0]['sources'][-1]['file']
                                    sources.append({'source': 'gvideo', 'quality': client.googletag(link['file'])[0]['quality'],
                                                'provider': 'OneMovies', 'url': link['file']})
                            else:
                                r5 = client.request(r4['link'], headers=headers, output='geturl')
                                sources.append({'source': 'openload', 'quality': i[1],
                                                'provider': 'OneMovies', 'url': r5})
                            #control.log("sources-810 %s @ " % (r5))

                            #sources.append({'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'],'provider': 'Rainierland', 'url': i})

                        #sources.append({'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'provider': 'Rainierland', 'url': i})
                    except:
                        pass
                return sources

        except Exception as e:
            control.log('ERROR onemovies %s' % e)
            return sources
Esempio n. 51
0
    def get_show(self,
                 imdb,
                 tvdb,
                 tvshowtitle,
                 year,
                 season,
                 proxy_options=None,
                 key=None):
        try:
            #print "PRIMEWIRE get_show %s" % tvshowtitle

            key = urlparse.urljoin(self.base_link, self.key_link)
            #key = proxies.request(key, 'searchform', proxy_options=proxy_options, use_web_proxy=self.proxyrequired)
            key = proxies.request(key,
                                  proxy_options=proxy_options,
                                  use_web_proxy=self.proxyrequired)
            key = client.parseDOM(key,
                                  'input',
                                  ret='value',
                                  attrs={'name': 'key'})[0]

            query = self.tvsearch_link % (urllib.quote_plus(
                cleantitle.query(tvshowtitle)), key)
            query = urlparse.urljoin(self.base_link, query)

            print query

            #result = str(proxies.request(query, 'index_item', proxy_options=proxy_options, use_web_proxy=self.proxyrequired))
            result = str(
                proxies.request(query,
                                proxy_options=proxy_options,
                                use_web_proxy=self.proxyrequired))
            #if 'page=2' in result or 'page%3D2' in result: result += str(proxies.request(query + '&page=2', 'index_item', proxy_options=proxy_options, use_web_proxy=self.proxyrequired))
            if 'page=2' in result or 'page%3D2' in result:
                result += str(
                    proxies.request(query + '&page=2',
                                    proxy_options=proxy_options,
                                    use_web_proxy=self.proxyrequired))

            result = client.parseDOM(result,
                                     'div',
                                     attrs={'class': 'index_item.+?'})

            tvshowtitle = 'watch' + cleantitle.get(tvshowtitle)
            years = ['%s' % str(year)]
            for iy in range(0, int(season)):
                years.append('%s' % str(int(year) - int(iy)))
                years.append('%s' % str(int(year) + int(iy)))

            years = list(set(years))
            #print years

            result = [(client.parseDOM(i, 'a', ret='href'),
                       client.parseDOM(i, 'a', ret='title')) for i in result]
            result = [(i[0][0], i[1][0]) for i in result
                      if len(i[0]) > 0 and len(i[1]) > 0]
            result = [i for i in result if any(x in i[1] for x in years)]

            #print result

            r = []
            for i in result:
                u = i[0]
                try:
                    u = urlparse.parse_qs(urlparse.urlparse(u).query)['u'][0]
                except:
                    pass
                try:
                    u = urlparse.parse_qs(urlparse.urlparse(u).query)['q'][0]
                except:
                    pass
                r += [(u, i[1])]

            print r

            for year in years:
                match = [
                    i[0] for i in r
                    if tvshowtitle == cleantitle.get(i[1]) and '(%s)' %
                    str(year) in i[1]
                ]
                if len(match) > 0:
                    break

            print match

            match2 = [i[0] for i in r]
            match2 = [x for y, x in enumerate(match2) if x not in match2[:y]]
            if match2 == []: return

            for i in match2[:5]:
                try:
                    if len(match) > 0:
                        url = match[0]
                        break
                    #r = proxies.request(urlparse.urljoin(self.base_link, i), 'tv_episode_item', proxy_options=proxy_options, use_web_proxy=self.proxyrequired)
                    r = proxies.request(urlparse.urljoin(self.base_link, i),
                                        proxy_options=proxy_options,
                                        use_web_proxy=self.proxyrequired)
                    if imdb in str(r):
                        url = i
                        break
                except:
                    pass

            url = re.findall('(?://.+?|)(/.+)', url)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except Exception as e:
            print(e)
            return
Esempio n. 52
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            query = url.replace('\'', '').replace('.', ' ')
            query = re.sub('\s+', ' ', query)
            query = self.base_link + self.search_link % urllib.quote_plus(
                query)

            result = client.source(query)

            result = client.parseDOM(result,
                                     'table',
                                     attrs={'class': 'posts_table'})

            title, hdlr = re.compile('(.+?) (S\d*E\d*)$').findall(url)[0]
            title = cleantitle.tv(title)
            hdlr = [hdlr]

            links = []

            for i in result:
                try:
                    name = client.parseDOM(i, 'a')[-1]
                    name = client.replaceHTMLCodes(name)

                    url = client.parseDOM(i, 'a', ret='href')[-1]
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    t = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|3D)(\.|\)|\]|\s)(.+)',
                        '', name)
                    t = cleantitle.tv(t)
                    if not t == title: raise Exception()

                    y = re.compile('[\.|\(|\[|\s](S\d*E\d*)[\.|\)|\]|\s]'
                                   ).findall(name)[-1]
                    if not any(x == y for x in hdlr): raise Exception()

                    fmt = re.sub('(.+)(\.|\(|\[|\s)(S\d*E\d*)(\.|\)|\]|\s)',
                                 '', name)
                    fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
                    fmt = [x.lower() for x in fmt]

                    if '1080p' in fmt: raise Exception()

                    info = ''
                    size = client.parseDOM(i, 'td')
                    size = [x for x in size if x.endswith((' MB', ' GB'))]
                    if len(size) > 0:
                        size = size[-1]
                        if size.endswith(' GB'): div = 1
                        else: div = 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                        info += '%.2f GB' % size

                    links.append({'url': url, 'info': info})
                except:
                    pass

            for i in links[::-1][:2]:
                try:
                    result = client.source(i['url'])
                    result = client.parseDOM(result,
                                             'td',
                                             attrs={'class': 'td_cols'})[0]
                    result = result.split('"td_heads"')
                    result = client.parseDOM(result, 'a', ret='href')

                    for url in result:
                        try:
                            if 'go4up.com' in url:
                                url = re.compile('//.+?/.+?/([\w]+)').findall(
                                    url)[0]
                                url = client.source(self.go4up_link_2 % url)
                                url = client.parseDOM(url,
                                                      'div',
                                                      attrs={'id':
                                                             'linklist'})[0]
                                url = client.parseDOM(url, 'a', ret='href')[0]

                            host = urlparse.urlparse(url).netloc
                            host = host.rsplit('.', 1)[0].split('.', 1)[-1]
                            host = host.strip().lower()

                            if not host in [
                                    'uptobox', 'hugefiles', 'uploadrocket'
                            ]:
                                raise Exception()

                            if host == 'hugefiles':
                                check = hugefiles.check(url)
                            elif host == 'uploadrocket':
                                check = uploadrocket.check(url)
                            elif host == 'uptobox':
                                check = uptobox.check(url)

                            if check == False: raise Exception()

                            sources.append({
                                'source': host,
                                'quality': 'HD',
                                'provider': 'TVrelease',
                                'url': url,
                                'info': i['info']
                            })
                        except:
                            pass
                except:
                    pass

            return sources
        except:
            return sources
Esempio n. 53
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        #sources.append({'source': 'gvideo', 'quality': s['quality'],'url': s['url'],'provider': 'Yesmovies'})
        try:
            sources = []

            if url is None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            headers = {}

            if 'tvshowtitle' in data:
                episode = int(data['episode'])
                url = self.searchShow(data['tvshowtitle'], data['season'], '',
                                      headers)
            else:
                episode = 0
                url = self.searchMovie(data['title'], data['year'], '',
                                       headers)

            mid = re.findall('-(\d+)', url)[-1]

            try:
                headers = {'Referer': url}
                u = urlparse.urljoin(self.base_link, self.server_link % mid)
                r = client.request(u, headers=headers, XHR=True)
                r = json.loads(r)['html']
                r = client.parseDOM(r, 'div', attrs={'class': 'pas-list'})
                ids = client.parseDOM(r, 'li', ret='data-id')
                servers = client.parseDOM(r, 'li', ret='data-server')
                labels = client.parseDOM(r, 'a', ret='title')
                r = zip(ids, servers, labels)
                for eid in r:
                    try:
                        ep = re.findall('episode.*?(\d+).*?',
                                        eid[2].lower())[0]
                    except:
                        ep = 0
                    if (episode == 0) or (int(ep) == episode):
                        url = urlparse.urljoin(self.base_link,
                                               self.token_link % (eid[0], mid))
                        script = client.request(url)
                        if '$_$' in script:
                            params = self.uncensored1(script)
                        elif script.startswith('[]') and script.endswith('()'):
                            params = self.uncensored2(script)
                        elif '_x=' in script:
                            x = re.search('''_x=['"]([^"']+)''',
                                          script).group(1)
                            y = re.search('''_y=['"]([^"']+)''',
                                          script).group(1)
                            params = {'x': x, 'y': y}
                        else:
                            raise Exception()

                        u = urlparse.urljoin(
                            self.base_link, self.source_link %
                            (eid[0], params['x'], params['y']))
                        r = client.request(u, XHR=True)
                        url = json.loads(r)['playlist'][0]['sources']
                        url = [i['file'] for i in url if 'file' in i]
                        #url = [i[0] for i in url if i]
                        for i in url:
                            print "i", i

                        links = []
                        links += [{
                            'source': 'gvideo',
                            'url': i,
                            'quality': client.googletag(i)[0]['quality']
                        } for i in url if 'google' in i]

                        for i in links:
                            sources.append({
                                'source': i['source'],
                                'quality': i['quality'],
                                'provider': 'Yesmovies',
                                'url': i['url']
                            })

            except:
                pass

            return sources
        except Exception as e:
            control.log('Yes sources error:%s' % e)
            return sources
Esempio n. 54
0
    def get_episode(self,
                    url,
                    imdb,
                    tvdb,
                    title,
                    year,
                    season,
                    episode,
                    proxy_options=None,
                    key=None):
        try:
            #print "PRIMEWIRE get_episode %s" % url

            if url == None: return

            url = urlparse.urljoin(self.base_link, url)

            #result = proxies.request(url, 'tv_episode_item', proxy_options=proxy_options, use_web_proxy=self.proxyrequired)
            result = proxies.request(url,
                                     proxy_options=proxy_options,
                                     use_web_proxy=self.proxyrequired)
            result = client.parseDOM(result,
                                     'div',
                                     attrs={'class': 'tv_episode_item'})

            title = cleantitle.get(title)

            result = [(client.parseDOM(i, 'a', ret='href'),
                       client.parseDOM(i,
                                       'span',
                                       attrs={'class': 'tv_episode_name'}),
                       re.compile('(\d{4}-\d{2}-\d{2})').findall(i))
                      for i in result]
            result = [
                (i[0], i[1][0], i[2]) for i in result if len(i[1]) > 0
            ] + [(i[0], None, i[2]) for i in result if len(i[1]) == 0]
            result = [
                (i[0], i[1], i[2][0]) for i in result if len(i[2]) > 0
            ] + [(i[0], i[1], None) for i in result if len(i[2]) == 0]
            result = [(i[0][0], i[1], i[2]) for i in result if len(i[0]) > 0]

            url = [
                i for i in result
                if title == cleantitle.get(i[1]) and year == i[2]
            ][:1]
            if len(url) == 0: url = [i for i in result if year == i[2]]
            if len(url) == 0 or len(url) > 1:
                url = [
                    i for i in result if 'season-%01d-episode-%01d' %
                    (int(season), int(episode)) in i[0]
                ]

            url = client.replaceHTMLCodes(url[0][0])
            try:
                url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
            except:
                pass
            try:
                url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0]
            except:
                pass
            url = re.findall('(?://.+?|)(/.+)', url)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Esempio n. 55
0
    def get_sources(self,
                    url,
                    hosthdDict=None,
                    hostDict=None,
                    locDict=None,
                    proxy_options=None,
                    key=None,
                    testing=False):
        try:
            sources = []
            if control.setting('Provider-%s' % name) == False:
                log('INFO', 'get_sources', 'Provider Disabled by User')
                return sources
            if url == None:
                log('FAIL',
                    'get_sources',
                    'url == None. Could not find a matching title: %s' %
                    cleantitle.title_from_key(key),
                    dolog=not testing)
                return sources

            myts = str(((int(time.time()) / 3600) * 3600))
            log('INFO', 'get_sources-1', 'url: %s' % url, dolog=False)
            token_error = False
            urls = []

            if not str(url).startswith('http'):
                try:
                    data = urlparse.parse_qs(url)
                    data = dict([(i, data[i][0]) if data[i] else (i, '')
                                 for i in data])

                    title = data[
                        'tvshowtitle'] if 'tvshowtitle' in data else data[
                            'title']

                    try:
                        year = re.findall(
                            '(\d{4})', data['premiered']
                        )[0] if 'tvshowtitle' in data else data['year']
                    except:
                        try:
                            year = data['year']
                        except:
                            year = None
                    try:
                        episode = data['episode']
                    except:
                        pass

                    query = {'keyword': title}
                    search_url = urlparse.urljoin(self.base_link, '/search')
                    search_url = search_url + '?' + urllib.urlencode(query)
                    result = proxies.request(search_url,
                                             headers=self.headers,
                                             proxy_options=proxy_options,
                                             use_web_proxy=self.proxyrequired,
                                             httpsskip=True)

                    log('INFO',
                        'get_sources-2',
                        '%s' % search_url,
                        dolog=False)

                    rs = client.parseDOM(
                        result, 'div', attrs={'class':
                                              '[^"]*film-list[^"]*'})[0]
                    #print rs

                    r = client.parseDOM(rs, 'div', attrs={'class': 'item'})
                    #print r

                    r = [(client.parseDOM(i, 'a', ret='href'),
                          client.parseDOM(i, 'a', attrs={'class': 'name'}))
                         for i in r]
                    #print r

                    r = [(i[0][0], i[1][0]) for i in r
                         if len(i[0]) > 0 and len(i[1]) > 0]
                    #print r

                    r = [(re.sub('http.+?//.+?/', '/',
                                 i[0]), re.sub('&#\d*;', '', i[1])) for i in r]
                    #print r

                    if 'season' in data:
                        r = [(i[0], re.sub(' \(\w*\)', '', i[1])) for i in r]

                        possible_hits = []
                        for i in r:
                            if cleantitle.get(title).lower() == cleantitle.get(
                                    i[1]).lower():
                                possible_hits.append((i[0], [[i[1], u'1']]))

                        #title += '%01d' % int(data['season'])
                        url = [(i[0], re.findall('(.+?) (\d+)$', i[1]))
                               for i in r]

                        for i in possible_hits:
                            url.append(i)

                        url = [(i[0], i[1][0][0], i[1][0][1]) for i in url
                               if len(i[1]) > 0]

                        url = [
                            i for i in url
                            if cleantitle.get(title) in cleantitle.get(i[1])
                        ]

                        url = [
                            i for i in url if '%01d' %
                            int(data['season']) == '%01d' % int(i[2])
                        ]

                        if len(url) == 0:
                            url = [
                                i for i in r if cleantitle.get(title) ==
                                cleantitle.get(i[1])
                            ]
                        if len(url) == 0:
                            url = [
                                i for i in r if cleantitle.get(title) ==
                                cleantitle.get(i[1] + str(season))
                            ]
                    else:
                        url = [
                            i for i in r
                            if cleantitle.get(title) in cleantitle.get(i[1])
                        ]

                    if len(url) == 0:
                        log(
                            'FAIL', 'get_sources',
                            'Could not find a matching title: %s' %
                            cleantitle.title_from_key(key))
                        return sources

                    for urli in url:
                        url = urli[0]
                        url = urlparse.urljoin(self.base_link, url)
                        urls.append(url)

                except Exception as e:
                    raise Exception(e)

            for url in urls:
                try:
                    try:
                        url, episode = re.compile(
                            '(.+?)\?episode=(\d*)$').findall(url)[0]
                    except:
                        pass

                    log('INFO', 'get_sources-3', url, dolog=False)

                    referer = url
                    result = resultT = proxies.request(
                        url,
                        headers=self.headers,
                        limit='0',
                        proxy_options=proxy_options,
                        use_web_proxy=self.proxyrequired,
                        httpsskip=True)

                    alina = client.parseDOM(result, 'title')[0]

                    atr = [
                        i for i in client.parseDOM(result, 'title')
                        if len(re.findall('(\d{4})', i)) > 0
                    ][-1]
                    if 'season' in data:
                        years = [
                            '%s' % str(year),
                            '%s' % str(int(year) + 1),
                            '%s' % str(int(year) - 1)
                        ]
                        mychk = False
                        for y in years:
                            if y in atr:
                                mychk = True
                        result = result if mychk == True else None
                        if mychk == True:
                            break
                    else:
                        result = result if year in atr else None

                    if result != None:
                        break
                except Exception as e:
                    log('FAIL',
                        'get_sources-3',
                        '%s : %s' % (url, e),
                        dolog=False)

            if result == None:
                log(
                    'FAIL', 'get_sources',
                    'Could not find a matching title: %s' %
                    cleantitle.title_from_key(key))
                return sources

            try:
                myts1 = re.findall(r'data-ts="(.*?)"', result)[0]
                myts = str(int(myts1))
            except:
                try:
                    b, resp = self.decode_ts(myts1)
                    if b == False:
                        raise Exception('Could not decode ts')
                    else:
                        myts = str(int(resp))
                        log('INFO',
                            'get_sources-3',
                            'could not parse ts ! will try and use decoded : %s'
                            % myts,
                            dolog=False)
                except:
                    if self.serverts != None:
                        myts = str(self.serverts)
                        log('INFO',
                            'get_sources-3',
                            'could not parse ts ! will use borrowed one : %s' %
                            myts,
                            dolog=False)
                    else:
                        log('INFO',
                            'get_sources-3',
                            'could not parse ts ! will use generated one : %s'
                            % myts,
                            dolog=False)

            trailers = []
            links_m = []

            if testing == False:
                try:
                    matches = re.compile(
                        'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
                    ).findall(result)
                    for match in matches:
                        try:
                            if 'youtube.com' in match:
                                match = match.replace('embed/', 'watch?v=')
                                trailers.append(match)
                        except:
                            pass
                except Exception as e:
                    pass

                for trailer in trailers:
                    links_m = resolvers.createMeta(trailer,
                                                   self.name,
                                                   self.logo,
                                                   '720p',
                                                   links_m,
                                                   key,
                                                   vidtype='Trailer',
                                                   testing=testing)

            riptype = None
            try:
                quality = client.parseDOM(result,
                                          'span',
                                          attrs={'class':
                                                 'quality'})[0].lower()
            except:
                quality = 'hd'
            if quality == 'cam' or quality == 'ts':
                quality = '480p'
                riptype = 'CAM'
            elif quality == 'hd' or 'hd ' in quality:
                quality = '720p'
                riptype = 'BRRIP'
            else:
                quality = '480p'
                riptype = 'BRRIP'

            result_servers = self.get_servers(url, proxy_options=proxy_options)

            try:
                server_no = client.parseDOM(result_servers,
                                            'div',
                                            attrs={'class': 'server active'},
                                            ret='data-name')[0]
            except:
                server_no = []

            result_servers = client.parseDOM(result_servers,
                                             'ul',
                                             attrs={'data-range-id': "0"})

            servers = []
            #servers = client.parseDOM(result, 'li', attrs = {'data-type': 'direct'})
            servers = zip(client.parseDOM(result_servers, 'a', ret='data-id'),
                          client.parseDOM(result_servers, 'a'))

            servers = [(i[0], re.findall('(\d+)', i[1])) for i in servers]
            servers = [(i[0], ''.join(i[1][:1])) for i in servers]

            try:
                servers = [
                    i for i in servers
                    if '%01d' % int(i[1]) == '%01d' % int(episode)
                ]
            except:
                pass

            for s in servers[:len(servers)]:
                try:

                    headers = {'X-Requested-With': 'XMLHttpRequest'}
                    hash_url = urlparse.urljoin(self.base_link, self.hash_link)
                    query = {
                        'ts': myts,
                        'id': s[0],
                        'update': '0',
                        'server': str(server_no)
                    }

                    query.update(self.__get_token(query))
                    hash_url = hash_url + '?' + urllib.urlencode(query)
                    headers['Referer'] = urlparse.urljoin(url, s[0])
                    headers['Cookie'] = self.headers['Cookie']

                    log('INFO', 'get_sources-4', '%s' % hash_url, dolog=False)
                    result = proxies.request(hash_url,
                                             headers=headers,
                                             limit='0',
                                             proxy_options=proxy_options,
                                             use_web_proxy=self.proxyrequired,
                                             httpsskip=True)
                    result = json.loads(result)

                    if 'error' in result and result['error'] == True:
                        token_error = True
                        query.update(
                            self.__get_token(query, token_error=token_error))
                        hash_url = hash_url + '?' + urllib.urlencode(query)
                        result = proxies.request(
                            hash_url,
                            headers=headers,
                            limit='0',
                            proxy_options=proxy_options,
                            use_web_proxy=self.proxyrequired,
                            httpsskip=True)
                        result = json.loads(result)

                        query = {'id': s[0], 'update': '0'}
                        query.update(
                            self.__get_token(query, token_error=token_error))
                    else:
                        token_error = False
                        queryx = {'id': s[0], 'update': '0'}
                        query.update(self.__get_token(queryx))

                    url = url + '?' + urllib.urlencode(query)
                    #result = client2.http_get(url, headers=headers)

                    #quality = '360p'
                    if '1080' in s[1]:
                        quality = '1080p'
                        #riptype = 'BRRIP'
                    elif '720' in s[1] or 'hd' in s[1].lower():
                        quality = '720p'
                        #riptype = 'BRRIP'
                    elif '480' in s[1]:
                        quality = '480p'
                        #riptype = 'BRRIP'
                    elif 'cam' in s[1].lower() or 'ts' in s[1].lower():
                        quality = '480p'
                        #riptype = 'CAM'
                    else:
                        quality = '480p'
                        #riptype = 'CAM'

                    log('INFO', 'get_sources-5', result, dolog=False)

                    if result['target'] != "-":
                        pass
                    else:
                        grabber = result['grabber']
                        grab_data = grabber
                        grabber_url = urlparse.urljoin(self.base_link,
                                                       self.grabber_api)

                        if '?' in grabber:
                            grab_data = grab_data.split('?')
                            grabber_url = grab_data[0]
                            grab_data = grab_data[1]

                        grab_server = str(
                            urlparse.parse_qs(grab_data)['server'][0])

                        b, resp = self.decode_t(result['params']['token'])
                        if b == False:
                            raise Exception(resp)
                        token = resp
                        b, resp = self.decode_t(result['params']['options'])
                        if b == False:
                            raise Exception(resp)
                        options = resp

                        grab_query = {
                            'ts': myts,
                            grabber_url: '',
                            'id': result['params']['id'],
                            'server': grab_server,
                            'mobile': '0',
                            'token': token,
                            'options': options
                        }
                        tk = self.__get_token(grab_query, token_error)

                        if tk == None:
                            raise Exception('video token algo')
                        grab_info = {'token': token, 'options': options}
                        del query['server']
                        query.update(grab_info)
                        query.update(tk)

                        sub_url = result['subtitle']
                        if sub_url == None or len(sub_url) == 0:
                            sub_url = None

                        if '?' in grabber:
                            grabber += '&' + urllib.urlencode(query)
                        else:
                            grabber += '?' + urllib.urlencode(query)

                        if grabber != None and not grabber.startswith('http'):
                            grabber = 'http:' + grabber

                        log('INFO', 'get_sources-6', grabber, dolog=False)

                        result = proxies.request(
                            grabber,
                            headers=headers,
                            referer=url,
                            limit='0',
                            proxy_options=proxy_options,
                            use_web_proxy=self.proxyrequired,
                            httpsskip=True)

                        result = json.loads(result)

                    if 'data' in result.keys():
                        result = [
                            i['file'] for i in result['data'] if 'file' in i
                        ]

                        for i in result:
                            links_m = resolvers.createMeta(i,
                                                           self.name,
                                                           self.logo,
                                                           quality,
                                                           links_m,
                                                           key,
                                                           riptype,
                                                           sub_url=sub_url,
                                                           testing=testing)
                    else:
                        target = result['target']
                        b, resp = self.decode_t(target)
                        if b == False:
                            raise Exception(resp)
                        target = resp
                        sub_url = result['subtitle']
                        if sub_url == None or len(sub_url) == 0:
                            sub_url = None

                        if target != None and not target.startswith('http'):
                            target = 'http:' + target

                        links_m = resolvers.createMeta(target,
                                                       self.name,
                                                       self.logo,
                                                       quality,
                                                       links_m,
                                                       key,
                                                       riptype,
                                                       sub_url=sub_url,
                                                       testing=testing)

                except Exception as e:
                    log('FAIL', 'get_sources-7', '%s' % e, dolog=False)

            sources += [l for l in links_m]

            if len(sources) == 0:
                log(
                    'FAIL', 'get_sources',
                    'Could not find a matching title: %s' %
                    cleantitle.title_from_key(key))
                return sources

            log('SUCCESS',
                'get_sources',
                '%s sources : %s' %
                (cleantitle.title_from_key(key), len(sources)),
                dolog=not testing)
            return sources
        except Exception as e:
            log('ERROR', 'get_sources', '%s' % e, dolog=not testing)
            return sources
Esempio n. 56
0
    def get_movie(self,
                  imdb,
                  title,
                  year,
                  proxy_options=None,
                  key=None,
                  testing=False):
        try:
            #print "PRIMEWIRE get_movie %s" % title
            result = None
            query = urlparse.urljoin(self.base_link, self.key_link)
            #print "key ------------ %s" % key

            #key = proxies.request(key, 'searchform', proxy_options=proxy_options, use_web_proxy=self.proxyrequired)
            query = proxies.request(query,
                                    proxy_options=proxy_options,
                                    use_web_proxy=self.proxyrequired)
            self.log('SUCCESS',
                     'get_movie-1',
                     'query',
                     dolog=testing,
                     disp=False)

            query = client.parseDOM(query,
                                    'input',
                                    ret='value',
                                    attrs={'name': 'key'})[0]
            self.log('SUCCESS',
                     'get_movie-1b',
                     'query',
                     dolog=testing,
                     disp=False)
            #print "key ------------ %s" % key

            query = self.moviesearch_link % (urllib.quote_plus(
                cleantitle.query(title)), query)
            self.log('SUCCESS',
                     'get_movie-1c',
                     'query',
                     dolog=testing,
                     disp=False)

            query = urlparse.urljoin(self.base_link, query)
            self.log('SUCCESS',
                     'get_movie-1d',
                     'query',
                     dolog=testing,
                     disp=False)

            #result = str(proxies.request(query, 'index_item', proxy_options=proxy_options, use_web_proxy=self.proxyrequired))
            result = proxies.request(query,
                                     proxy_options=proxy_options,
                                     use_web_proxy=self.proxyrequired)
            self.log('SUCCESS',
                     'get_movie-2',
                     'result',
                     dolog=testing,
                     disp=False)

            #if 'page=2' in result or 'page%3D2' in result: result += str(proxies.request(query + '&page=2', 'index_item', proxy_options=proxy_options, use_web_proxy=self.proxyrequired))
            if 'page=2' in result or 'page%3D2' in result:
                result += str(
                    proxies.request(query + '&page=2',
                                    proxy_options=proxy_options,
                                    use_web_proxy=self.proxyrequired))
                self.log('SUCCESS',
                         'get_movie-3',
                         '',
                         dolog=testing,
                         disp=False)

            result = client.parseDOM(result,
                                     'div',
                                     attrs={'class': 'index_item.+?'})

            title = 'watch' + cleantitle.get(title)
            years = [
                '(%s)' % str(year),
                '(%s)' % str(int(year) + 1),
                '(%s)' % str(int(year) - 1)
            ]

            result = [(client.parseDOM(i, 'a', ret='href'),
                       client.parseDOM(i, 'a', ret='title')) for i in result]
            result = [(i[0][0], i[1][0]) for i in result
                      if len(i[0]) > 0 and len(i[1]) > 0]
            result = [i for i in result if any(x in i[1] for x in years)]

            r = []
            for i in result:
                u = i[0]
                try:
                    u = urlparse.parse_qs(urlparse.urlparse(u).query)['u'][0]
                except:
                    pass
                try:
                    u = urlparse.parse_qs(urlparse.urlparse(u).query)['q'][0]
                except:
                    pass
                r += [(u, i[1])]

            #print result
            match = [
                i[0] for i in r if title == cleantitle.get(i[1])
                and self.lose_match_year(year, i[1])
            ]

            #print "match %s" % match

            match2 = [i[0] for i in r]
            match2 = [x for y, x in enumerate(match2) if x not in match2[:y]]
            if match2 == []: return

            #print "match2 %s" % match2

            url = ''
            if len(match2) == 1:
                url = self.base_link + match2[0]
            else:
                for i in match2[:5]:
                    try:
                        if len(match) > 0:
                            url = match[0]
                            break
                        #r = proxies.request(urlparse.urljoin(self.base_link, i), 'choose_tabs', proxy_options=proxy_options, use_web_proxy=self.proxyrequired)
                        r = proxies.request(urlparse.urljoin(
                            self.base_link, i),
                                            proxy_options=proxy_options,
                                            use_web_proxy=self.proxyrequired)
                        self.log('SUCCESS',
                                 'get_movie-4',
                                 'r',
                                 dolog=testing,
                                 disp=False)
                        if imdb != None and imdb in str(r):
                            url = i
                            break
                        r = client.parseDOM(r,
                                            'div',
                                            attrs={'class': 'movie_info'})
                        #print "tag -- %s" % r
                        if year != None and year in str(r):
                            url = i
                            break
                    except:
                        pass

            url = re.findall('(?://.+?|)(/.+)', url)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            #print "PRIMEWIRE get_movie %s" % url

            self.log('SUCCESS', 'get_movie', '%s' % url, dolog=testing)
            return url
        except Exception as e:
            self.log('ERROR', 'get_movie', '%s' % e, dolog=testing)
            self.log('ERROR', 'get_movie', '%s' % '-- query --', dolog=testing)
            self.log('ERROR', 'get_movie', '%s' % query, dolog=testing)
            self.log('ERROR',
                     'get_movie',
                     '%s' % '-- result --',
                     dolog=testing)
            self.log('ERROR', 'get_movie', '%s' % result, dolog=testing)
            return
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = client.request(url)
            #print result

            result = result.replace('\n', '')
            print result

            quality = re.compile('>Links - Quality(.+?)<').findall(result)[0]
            quality = quality.strip()
            print("Q", quality)

            if quality == 'CAM' or quality == 'TS': quality = 'CAM'
            elif quality == 'SCREENER': quality = 'SCR'
            else: quality = 'SD'

            links = client.parseDOM(result, 'div', attrs={'id': 'links'})[0]
            links = links.split('link_name')

            for i in links:
                try:
                    url = client.parseDOM(i, 'a', ret='href')[0]
                    try:
                        url = urlparse.parse_qs(
                            urlparse.urlparse(url).query)['u'][0]
                    except:
                        pass
                    try:
                        url = urlparse.parse_qs(
                            urlparse.urlparse(url).query)['q'][0]
                    except:
                        pass
                    url = urlparse.urlparse(url).query
                    url = base64.b64decode(url)
                    url = re.findall('((?:http|https)://.+?/.+?)(?:&|$)',
                                     url)[0]
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')
                    print("URL1", url)

                    host = re.findall(
                        '([\w]+[.][\w]+)$',
                        urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if not host in hostDict: raise Exception()
                    try:
                        host = host.split('.')[0]
                    except:
                        pass

                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')
                    print("URL4", host)

                    sources.append({
                        'source': host.split('.')[0],
                        'quality': 'SD',
                        'provider': 'Movie25',
                        'url': url
                    })

                except:
                    pass

            return sources
        except:
            return sources
Esempio n. 58
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() is False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['title'].replace(':', '').lower()
            year = data['year']

            query = '%s %s' % (data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = urlparse.urljoin(self.base_link, self.post_link)

            post = 'do=search&subaction=search&search_start=0&full_search=0&result_from=1&story=%s' % urllib.quote_plus(
                query)

            r = client.request(url, post=post)
            r = client.parseDOM(r, 'div', attrs={'class': 'box-out margin'})
            r = [(dom_parser2.parse_dom(i,
                                        'div',
                                        attrs={'class': 'news-title'}))
                 for i in r if data['imdb'] in i]
            r = [(dom_parser2.parse_dom(i[0], 'a', req='href')) for i in r
                 if i]
            r = [(i[0].attrs['href'], i[0].content) for i in r if i]

            hostDict = hostprDict + hostDict

            for item in r:
                try:
                    name = item[1]
                    y = re.findall('\((\d{4})\)', name)[0]
                    if not y == year: raise Exception()

                    s = re.findall(
                        '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))',
                        name)
                    s = s[0] if s else '0'
                    data = client.request(item[0])
                    data = dom_parser2.parse_dom(data,
                                                 'div',
                                                 attrs={'id': 'r-content'})
                    data = re.findall(
                        '\s*<b><a href=.+?>(.+?)</b>.+?<u><b><a href="(.+?)".+?</a></b></u>',
                        data[0].content, re.DOTALL)
                    u = [(i[0], i[1], s) for i in data if i]

                    for name, url, size in u:
                        try:
                            if '4K' in name:
                                quality = '4K'
                            elif '1080p' in name:
                                quality = '1080p'
                            elif '720p' in name:
                                quality = '720p'
                            elif any(i in ['dvdscr', 'r5', 'r6']
                                     for i in name):
                                quality = 'SCR'
                            elif any(i in [
                                    'camrip', 'tsrip', 'hdcam', 'hdts',
                                    'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'
                            ] for i in name):
                                quality = 'CAM'
                            else:
                                quality = '720p'

                            info = []
                            if '3D' in name or '.3D.' in url:
                                info.append('3D')
                                quality = '1080p'
                            if any(i in ['hevc', 'h265', 'x265']
                                   for i in name):
                                info.append('HEVC')
                            try:
                                size = re.findall(
                                    '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))',
                                    size)[-1]
                                div = 1 if size.endswith(
                                    ('Gb', 'GiB', 'GB')) else 1024
                                size = float(re.sub('[^0-9|/.|/,]', '',
                                                    size)) / div
                                size = '%.2f GB' % size
                                info.append(size)
                            except:
                                pass

                            info = ' | '.join(info)

                            url = client.replaceHTMLCodes(url)
                            url = url.encode('utf-8')
                            if any(x in url
                                   for x in ['.rar', '.zip', '.iso', 'turk']):
                                continue

                            if 'ftp' in url:
                                host = 'COV'
                                direct = True
                            else:
                                direct = False
                                host = 'turbobit.net'
                            # if not host in hostDict: continue

                            host = client.replaceHTMLCodes(host)
                            host = host.encode('utf-8')

                            sources.append({
                                'source': host,
                                'quality': quality,
                                'language': 'en',
                                'url': url,
                                'info': info,
                                'direct': direct,
                                'debridonly': False
                            })

                        except:
                            pass
                except:
                    pass

            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('UltraHD - Exception: \n' + str(failure))
            return sources
Esempio n. 59
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []
            agent = cache.get(client.randomagent, 180)

            if url == None: return sources

            if '?episode=' in url:
                print 'Jest serial'
                try:
                    url, episode = re.findall('(.+?)\?episode=(\d*)$', url)[0]
                except:
                    return sources
                episode_num = 'episode %02d:' % int(episode)
                #print episode_num, url
                url = urlparse.urljoin(self.base_link, url)
                headers = {'Referer': url, 'User-Agent': agent}
                r, headers, content, cookie = client.request(url,
                                                             limit='0',
                                                             output='extended',
                                                             headers=headers)
                u = client.parseDOM(r,
                                    'a',
                                    ret='href',
                                    attrs={'class':
                                           'mod-btn mod-btn-watch'})[0]
                headers['Referer'] = u
                mid, episode, server = re.findall(
                    '-(\d+)/(\d+)-(\d+)/watching\.html$', u)[0]
                u = urlparse.urljoin(self.base_link,
                                     self.series_link % (mid, server, episode))
                headers['X-Requested-With'] = 'XMLHttpRequest'
                r = client.request(u, headers=headers, cookie=cookie)
                #print r
                #print u
                r = zip(
                    client.parseDOM(r,
                                    'li',
                                    ret='onclick',
                                    attrs={'class': 'episode-item '}),
                    client.parseDOM(r, 'li', attrs={'class': 'episode-item '}))
                r = [(i[0], client.parseDOM(i[1], 'a', ret='title')[0])
                     for i in r]
                #            r = [(i[0], re.findall('(.+?) - season (\d+)$', i[1].lower())) for i in r]
                r = [(re.findall('load_episode\((\d+),(\d+)\)',
                                 i[0])[0], re.findall('(.+?:)',
                                                      i[1].lower())[0])
                     for i in r]
                #print r
                #print("Episode", episode_num)
                r = [i[0] for i in r if str(i[1]) == episode_num]
                print r

            else:

                url = urlparse.urljoin(self.base_link, url)
                headers = {'Referer': url, 'User-Agent': agent}
                r, headers, content, cookie = client.request(url,
                                                             limit='0',
                                                             output='extended',
                                                             headers=headers)
                u = client.parseDOM(r,
                                    'a',
                                    ret='href',
                                    attrs={'class':
                                           'mod-btn mod-btn-watch'})[0]
                headers['Referer'] = u
                mid, episode, server = re.findall(
                    '-(\d+)/(\d+)-(\d+)/watching\.html$', u)[0]
                u = urlparse.urljoin(self.base_link,
                                     self.server_link % (mid, server, episode))
                headers['X-Requested-With'] = 'XMLHttpRequest'
                r = client.request(u, headers=headers, cookie=cookie)
                r = re.findall('onclick=\"load_episode\((\d+),(\d+)\)\"', r)
            links = []

            for i in r:
                try:
                    key_gen = self.random_generator()
                    episode_id = i[0]
                    hash_id = self.uncensored(episode_id + self.di8j1v[56:80],
                                              key_gen)
                    cookie = '%s%s%s=%s' % (self.di8j1v[12:24], episode_id,
                                            self.di8j1v[34:46], key_gen)
                    request_url2 = self.base_link + '/ajax/v2_get_sources/' + episode_id + '.html?hash=' + urllib.quote(
                        hash_id)
                    headers = {
                        'Cookie': cookie,
                        'Referer': headers['Referer'] + '\+' + cookie,
                        'x-requested-with': 'XMLHttpRequest',
                        'User-Agent': agent
                    }
                    result = client.request(request_url2, headers=headers)
                    #print "RESULT", result, request_url2
                    q = json.loads(result)['playlist'][0]['sources']
                    for j in q:
                        links.append(client.googletag(j['file'])[0])
                except:
                    pass

            for i in links:
                print "IIIIIIIIIIIIIIIIIIIIIIIII", i
                sources.append({
                    'source': 'gvideo',
                    'quality': i['quality'],
                    'provider': 'Yesmovies',
                    'url': i['url']
                })

            return sources
        except Exception as e:
            control.log('ERROR Yesmo %s' % e)
            return sources
    def get_movie(self, imdb, title, year):
        try:
            #query = self.search_link % (urllib.quote_plus(cleantitle.query(title)), str(int(year)-1), str(int(year)+1))
            #query = urlparse.urljoin(self.base_link, query)

            query = self.search_link % urllib.quote_plus(
                cleantitle.query(title))
            query = urlparse.urljoin(self.base_link, query)

            result = str(self.request(query, 'movie_table'))
            if 'page=2' in result or 'page%3D2' in result:
                result += str(self.request(query + '&page=2', 'movie_table'))

            result = client.parseDOM(result,
                                     'div',
                                     attrs={'class': 'movie_table'})

            title = cleantitle.get(title)
            years = [
                '(%s)' % str(year),
                '(%s)' % str(int(year) + 1),
                '(%s)' % str(int(year) - 1)
            ]

            result = [(client.parseDOM(i, 'a', ret='href'),
                       client.parseDOM(i, 'img', ret='alt')) for i in result]
            result = [(i[0][0], i[1][0]) for i in result
                      if len(i[0]) > 0 and len(i[1]) > 0]
            result = [i for i in result if any(x in i[1] for x in years)]

            try:
                result = [
                    (urlparse.parse_qs(urlparse.urlparse(i[0]).query)['q'][0],
                     i[1]) for i in result
                ]
            except:
                pass
            try:
                result = [
                    (urlparse.parse_qs(urlparse.urlparse(i[0]).query)['u'][0],
                     i[1]) for i in result
                ]
            except:
                pass
            try:
                result = [(urlparse.urlparse(i[0]).path, i[1]) for i in result]
            except:
                pass

            match = [
                i[0] for i in result
                if title == cleantitle.get(i[1]) and '(%s)' % str(year) in i[1]
            ]

            match2 = [i[0] for i in result]
            match2 = [x for y, x in enumerate(match2) if x not in match2[:y]]
            if match2 == []: return

            for i in match2[:5]:
                try:
                    if len(match) > 0:
                        url = match[0]
                        break
                    result = self.request(urlparse.urljoin(self.base_link, i),
                                          'link_name')
                    if imdb in str(result):
                        url = i
                        break
                except:
                    pass

            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return