예제 #1
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url == None: return sources
         html = client.request(url)
         links = re.compile('<iframe width=".+?src="(.+?)"',
                            re.DOTALL).findall(html)
         for link in links:
             quality, info = source_utils.get_release_quality(link, url)
             host = link.split('//')[1].replace('www.', '')
             host = host.split('/')[0].split('.')[0].title()
             sources.append({
                 'source': host,
                 'quality': quality,
                 'language': 'en',
                 'url': link,
                 'direct': False,
                 'debridonly': False
             })
         return sources
     except:
         failure = traceback.format_exc()
         log_utils.log('MyBobMovies - sources - Exception: \n' +
                       str(failure))
         return sources
예제 #2
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return

            url = '%s/serie/%s' % (self.base_link, url)

            r = proxy.request(url, 'tv shows')
            r = client.parseDOM(r, 'li', attrs = {'itemprop': 'episode'})

            t = cleantitle.get(title)

            r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs = {'itemprop': 'name'}), re.compile('(\d{4}-\d{2}-\d{2})').findall(i)) for i in r]
            r = [(i[0], i[1][0].split('&nbsp;')[-1], i[2]) for i in r if i[1]] + [(i[0], None, i[2]) for i in r if not i[1]]
            r = [(i[0], i[1], i[2][0]) for i in r if i[2]] + [(i[0], i[1], None) for i in r if not i[2]]
            r = [(i[0][0], i[1], i[2]) for i in r if i[0]]

            url = [i for i in r if t == cleantitle.get(i[1]) and premiered == i[2]][:1]
            if not url: url = [i for i in r if t == cleantitle.get(i[1])]
            if len(url) > 1 or not url: url = [i for i in r if premiered == i[2]]
            if len(url) > 1 or not url: raise Exception() 

            url = url[0][0]
            url = proxy.parse(url)

            url = re.findall('(?://.+?|)(/.+)', url)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            failure = traceback.format_exc()
            log_utils.log('XWatchSeries - Exception: \n' + str(failure))
            return
예제 #3
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            r = proxy.request(url, 'tv shows')

            links = client.parseDOM(r, 'a', ret='href', attrs = {'target': '.+?'})
            links = [x for y,x in enumerate(links) if x not in links[:y]]

            for i in links:
                try:
                    url = i
                    url = proxy.parse(url)
                    url = urlparse.parse_qs(urlparse.urlparse(url).query)['r'][0]
                    url = url.decode('base64')
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if not host in hostDict: raise Exception()
                    host = host.encode('utf-8')

                    sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('XWatchSeries - Exception: \n' + str(failure))
            return sources
예제 #4
0
    def search(self, query_bases, options):
        i = 0
        j = 0
        result = None
        for option in options:
            
            for query_base in query_bases :
                q = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query_base+option)
                q = q.replace("  ", " ").replace(" ", "+")

                log_utils.log("RLSSCN query : " + q)
                
                url = self.search_link % (q)
                html = requests.get(url)

                log_utils.log("RLSSCN try test " + str(i) + " - html : " + str(html))

                if html.status_code == 200 :
                    log_utils.log("RLSSCN test " + str(i) + " Ok")
                    url = client.parseDOM(html.content, "h2", attrs={"class": "title"})
                    url = client.parseDOM(url, "a", ret='href')
                    log_utils.log("RLSSCN test " + str(i) + " : " + str(url))
                    if len(url) > 0:
                    	html = requests.get(url[0])
                    	if html.status_code == 200 :
                        	return html.content
                else :    
                    log_utils.log("RLSSCN test "+ str(i) + " return code : " + result.status_code + "- next test " + str(i+1))
                    i += 1
                    
        return None
예제 #5
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if url == None: return
         urldata = urlparse.parse_qs(url)
         urldata = dict((i, urldata[i][0]) for i in urldata)
         title = urldata['title'].replace(':', ' ').lower()
         year = urldata['year']
         search_id = title.lower()
         start_url = urlparse.urljoin(self.base_link, self.search_link % (search_id.replace(' ','+') + '+' + year))
         headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'}
         html = client.request(start_url,headers=headers)
         Links = re.compile('a href="(.+?)" title="(.+?)"',re.DOTALL).findall(html)
         for link,name in Links:
             if title.lower() in name.lower(): 
                 if year in name:
                     holder = client.request(link,headers=headers)
                     Alterjnates = re.compile('<button class="text-capitalize dropdown-item" value="(.+?)"',re.DOTALL).findall(holder)
                     for alt_link in Alterjnates:
                         alt_url = alt_link.split ("e=")[1]
                         valid, host = source_utils.is_host_valid(alt_url, hostDict)
                         sources.append({'source':host,'quality':'1080p','language': 'en','url':alt_url,'info':[],'direct':False,'debridonly':False})
                     
         return sources
     except:
         failure = traceback.format_exc()
         log_utils.log('1080PMovies - Exception: \n' + str(failure))
         return sources
예제 #6
0
    def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
        try:
            t = cleantitle.get(tvshowtitle)

            q = urllib.quote_plus(cleantitle.query(tvshowtitle))
            p = urllib.urlencode({'term': q})

            r = client.request(self.search_link, post=p, XHR=True)
            try: r = json.loads(r)
            except: r = None
            r = None

            if r:
                r = [(i['seo_url'], i['value'], i['label']) for i in r if 'value' in i and 'label' in i and 'seo_url' in i]
            else:
                r = proxy.request(self.search_link_2 % q, 'tv shows')
                r = client.parseDOM(r, 'div', attrs = {'valign': '.+?'})
                r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title'), client.parseDOM(i, 'a')) for i in r]
                r = [(i[0][0], i[1][0], i[2][0]) for i in r if i[0] and i[1] and i[2]]

            r = [(i[0], i[1], re.findall('(\d{4})', i[2])) for i in r]
            r = [(i[0], i[1], i[2][-1]) for i in r if i[2]]
            r = [i for i in r if t == cleantitle.get(i[1]) and year == i[2]]

            url = r[0][0]
            url = proxy.parse(url)

            url = url.strip('/').split('/')[-1]
            url = url.encode('utf-8')
            return url
        except:
            failure = traceback.format_exc()
            log_utils.log('XWatchSeries - Exception: \n' + str(failure))
            return
예제 #7
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         url = {'imdb': imdb, 'title': title, 'year': year}
         return url
     except:
         failure = traceback.format_exc()
         log_utils.log('RLSSCN - Exception: \n' + str(failure))
         return
예제 #8
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         url = {'imdb': imdb, 'title': title, 'year': year}
         url = urllib.urlencode(url)
         return url
     except:
         failure = traceback.format_exc()
         log_utils.log('UltraHD - Exception: \n' + str(failure))
         return
예제 #9
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            aliases = eval(data['aliases'])
            headers = {}

            if 'tvshowtitle' in data:
                ep = data['episode']
                url = '%s/film/%s-season-%01d/watching.html?ep=%s' % (self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), ep)
                r = client.request(url, headers=headers, timeout='10', output='geturl')

                if url == None:
                    url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers)

            else:
                url = self.searchMovie(data['title'], data['year'], aliases, headers)

            if url == None: raise Exception()

            r = client.request(url, headers=headers, timeout='10')
            r = client.parseDOM(r, 'div', attrs={'class': 'les-content'})
            if 'tvshowtitle' in data:
                ep = data['episode']
                links = client.parseDOM(r, 'a', attrs={'episode-data': ep}, ret='player-data')
            else:
                links = client.parseDOM(r, 'a', ret='player-data')

            for link in links:
                if '123movieshd' in link or 'seriesonline' in link:
                    r = client.request(link, headers=headers, timeout='10')
                    r = re.findall('(https:.*?redirector.*?)[\'\"]', r)

                    for i in r:
                        try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False})
                        except: pass
                else:
                    try:
                        host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(link.strip().lower()).netloc)[0]
                        if not host in hostDict: raise Exception()
                        host = client.replaceHTMLCodes(host)
                        host = host.encode('utf-8')

                        sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': link, 'direct': False, 'debridonly': False})
                    except:
                        pass

            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('Series9 - Exception: \n' + str(failure))
            return sources
예제 #10
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         url['episode'] = episode
         url['season'] = season
         url['premiered'] = premiered
         return url
     except:
         failure = traceback.format_exc()
         log_utils.log('RLSSCN - Exception: \n' + str(failure))
         return
예제 #11
0
 def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
     try:
         aliases.append({'country': 'us', 'title': tvshowtitle})
         url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year, 'aliases': aliases}
         url = urllib.urlencode(url)
         return url
     except:
         failure = traceback.format_exc()
         log_utils.log('Series9 - Exception: \n' + str(failure))
         return  
예제 #12
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if url == None: return
         url = urlparse.parse_qs(url)
         url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
         url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
         url = urllib.urlencode(url)
         return url
     except:
         failure = traceback.format_exc()
         log_utils.log('Series9 - Exception: \n' + str(failure))
         return  
예제 #13
0
def resolver(url, debrid):
    try:
        debrid_resolver = [resolver for resolver in debrid_resolvers if resolver.name == debrid][0]

        debrid_resolver.login()
        _host, _media_id = debrid_resolver.get_host_and_id(url)
        stream_url = debrid_resolver.get_media_url(_host, _media_id)

        return stream_url
    except Exception as e:
        log_utils.log('%s Resolve Failure: %s' % (debrid, e), log_utils.LOGWARNING)
        return None
예제 #14
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         return urllib.urlencode({
             'imdb': imdb,
             'title': title,
             'localtitle': localtitle,
             'year': year
         })
     except:
         failure = traceback.format_exc()
         log_utils.log('Library - Exception: \n' + str(failure))
         return
예제 #15
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
            query = '%s S%02dE%02d' % (data['tvshowtitle'], int(
                data['season']), int(data['episode']))
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            url = self.search_link % urllib.quote_plus(query)
            r = urlparse.urljoin(self.base_link, url)
            r = client.request(r)
            r = client.parseDOM(r, 'item')
            title = client.parseDOM(r, 'title')[0]
            if hdlr in title:
                r = re.findall(
                    '<h3.+?>(.+?)</h3>\s*<h5.+?<strong>(.+?)</strong.+?h3.+?adze.+?href="(.+?)">.+?<h3',
                    r[0], re.DOTALL)
                for name, size, url in r:
                    quality, info = source_utils.get_release_quality(name, url)
                    try:
                        size = re.sub('i', '', size)
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        pass

                    info = ' | '.join(info)

                    valid, host = source_utils.is_host_valid(url, hostDict)
                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True
                    })
            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('SeriesCR - Exception: \n' + str(failure))
            return sources
예제 #16
0
 def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
     try:
         url = {
             'imdb': imdb,
             'tvdb': tvdb,
             'tvshowtitle': tvshowtitle,
             'year': year
         }
         url = urllib.urlencode(url)
         return url
     except:
         failure = traceback.format_exc()
         log_utils.log('RLSBB - Exception: \n' + str(failure))
         return
예제 #17
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            url = urlparse.urljoin(self.base_link, self.search_link)
            url = url  % (title.replace(':', ' ').replace(' ', '+'))

            search_results = client.request(url)
            match = re.compile('<h1><a href="(.+?)" rel="bookmark">(.+?)</a></h1>',re.DOTALL).findall(search_results)
            for item_url,item_title in match:
                if cleantitle.get(title) in cleantitle.get(item_title):
                    if year in str(item_title):
                        return item_url
            return
        except:
            failure = traceback.format_exc()
            log_utils.log('CoolMovieZone - Exception: \n' + str(failure))
            return
예제 #18
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url == None: return sources

            html = client.request(url)
            Links = re.compile('<td align="center"><strong><a href="(.+?)"',re.DOTALL).findall(html)
            for link in Links:
                host = link.split('//')[1].replace('www.','')
                host = host.split('/')[0].split('.')[0].title()
                sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': link, 'direct': False, 'debridonly': False})
            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('CoolMovieZone - Exception: \n' + str(failure))
            return sources
예제 #19
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url is None:
                return

            url = urlparse.parse_qs(url)
            url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
            url.update({
                'premiered': premiered,
                'season': season,
                'episode': episode
            })
            return urllib.urlencode(url)
        except:
            failure = traceback.format_exc()
            log_utils.log('Library - Exception: \n' + str(failure))
            return
예제 #20
0
 def searchShow(self, title, season, aliases, headers):
     try:
         title = cleantitle.normalize(title)
         search = '%s Season %01d' % (title, int(season))
         url = urlparse.urljoin(self.base_link, self.search_link % cleantitle.geturl(search))
         r = client.request(url, headers=headers, timeout='15')
         r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
         r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
         r = [(i[0], i[1], re.findall('(.*?)\s+-\s+Season\s+(\d)', i[1])) for i in r]
         r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0]
         url = [i[0] for i in r if self.matchAlias(i[2][0], aliases) and i[2][1] == season][0]
         url = urlparse.urljoin(self.base_link, '%s/watching.html' % url)
         return url
     except:
         failure = traceback.format_exc()
         log_utils.log('Series9 - Exception: \n' + str(failure))
         return  
예제 #21
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            search_id = title.replace(':', ' ').replace(' ', '+').lower()
            start_url = urlparse.urljoin(self.base_link,
                                         self.search_link % (search_id))

            search_results = client.request(start_url)
            match = re.compile('<header>.+?href="(.+?)" title="(.+?)"',
                               re.DOTALL).findall(search_results)
            for item_url, item_title in match:
                movie_name, movie_year = re.findall("(.*?)(\d+)",
                                                    item_title)[0]
                if not cleantitle.get(title) == cleantitle.get(movie_name):
                    continue
                if not year in movie_year:
                    continue
                return item_url
        except:
            failure = traceback.format_exc()
            log_utils.log('HDPopcorn - Exception: \n' + str(failure))
            return
예제 #22
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         search_id = title.lower()
         url = urlparse.urljoin(self.base_link, self.search_link)
         url = url % (search_id.replace(':', '%3A').replace(
             '&', '%26').replace("'", '%27').replace(' ', '+').replace(
                 '...', ' '))
         search_results = client.request(url)
         match = re.compile('<div id="post.+?href="(.+?)".+?title="(.+?)"',
                            re.DOTALL).findall(search_results)
         for movie_url, movie_title in match:
             clean_title = cleantitle.get(title)
             movie_title = movie_title.replace('&#8230', ' ').replace(
                 '&#038', ' ').replace('&#8217', ' ').replace('...', ' ')
             clean_movie_title = cleantitle.get(movie_title)
             if clean_movie_title in clean_title:
                 return movie_url
         return
     except:
         failure = traceback.format_exc()
         log_utils.log('MyBobMovies - movie - Exception: \n' + str(failure))
         return
예제 #23
0
 def searchShow(self, title, season, aliases, headers):
     try:
         title = cleantitle.normalize(title)
         search = '%s Season %01d' % (title, int(season))
         url = urlparse.urljoin(
             self.base_link, self.search_link %
             urllib.quote_plus(cleantitle.getsearch(search)))
         log_utils.log('shit Returned: %s' % str(url), log_utils.LOGNOTICE)
         r = client.request(url, headers=headers, timeout='15')
         r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
         r = zip(client.parseDOM(r, 'a', ret='href'),
                 client.parseDOM(r, 'a', ret='title'))
         r = [(i[0], i[1], re.findall('(.*?)\s+-\s+Season\s+(\d)', i[1]))
              for i in r]
         r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0]
         url = [
             i[0] for i in r
             if self.matchAlias(i[2][0], aliases) and i[2][1] == season
         ][0]
         return url
     except:
         return
예제 #24
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url == None: return sources

            html = client.request(url)

            Links = re.compile('<source.+?src="(.+?)"',
                               re.DOTALL).findall(html)
            for link in Links:
                sources.append({
                    'source': 'BNW',
                    'quality': 'SD',
                    'language': 'en',
                    'url': link,
                    'direct': True,
                    'debridonly': False
                })
            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('BNWMovies - Exception: \n' + str(failure))
            return sources
예제 #25
0
    def searchMovie(self, title, year, aliases, headers):
        try:
            title = cleantitle.normalize(title)
            url = urlparse.urljoin(self.base_link, self.search_link % cleantitle.geturl(title))
            r = client.request(url, headers=headers, timeout='15')
            r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
            r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
            results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r]
            try:
                r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
                url = [i[0] for i in r if self.matchAlias(i[1], aliases) and (year == i[2])][0]
            except:
                url = None
                pass

            if (url == None):
                url = [i[0] for i in results if self.matchAlias(i[1], aliases)][0]

            url = urlparse.urljoin(self.base_link, '%s/watching.html' % url)
            return url
        except:
            failure = traceback.format_exc()
            log_utils.log('Series9 - Exception: \n' + str(failure))
            return  
예제 #26
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            scrape = title.lower().replace(' ', '+').replace(':', '')

            start_url = self.search_link % (self.goog, scrape, year)

            html = client.request(start_url)
            results = re.compile('href="(.+?)"', re.DOTALL).findall(html)
            for url in results:
                if self.base_link in url:
                    if 'webcache' in url:
                        continue
                    if cleantitle.get(title) in cleantitle.get(url):
                        chkhtml = client.request(url)
                        chktitle = re.compile('<title.+?>(.+?)</title>',
                                              re.DOTALL).findall(chkhtml)[0]
                        if cleantitle.get(title) in cleantitle.get(chktitle):
                            if year in chktitle:
                                return url
            return
        except:
            failure = traceback.format_exc()
            log_utils.log('BNWMovies - Exception: \n' + str(failure))
            return
예제 #27
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['title'].replace(':','').lower()
            year = data['year']

            query = '%s %s' % (data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = urlparse.urljoin(self.base_link, self.post_link)

            post = 'do=search&subaction=search&search_start=0&full_search=0&result_from=1&story=%s' % urllib.quote_plus(query)

            r = client.request(url, post=post)
            r = client.parseDOM(r, 'div', attrs={'class': 'box-out margin'})
            r = [(dom_parser2.parse_dom(i, 'div', attrs={'class':'news-title'})) for i in r if data['imdb'] in i]
            r = [(dom_parser2.parse_dom(i[0], 'a', req='href')) for i in r if i]
            r = [(i[0].attrs['href'], i[0].content) for i in r if i]

            hostDict = hostprDict + hostDict

            for item in r:
                try:
                    name = item[1]
                    y = re.findall('\((\d{4})\)', name)[0]
                    if not y == year: raise Exception()

                    s = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', name)
                    s = s[0] if s else '0'
                    data = client.request(item[0])
                    data = dom_parser2.parse_dom(data, 'div', attrs={'id': 'r-content'})
                    data = re.findall('\s*<b><a href=.+?>(.+?)</b>.+?<u><b><a href="(.+?)".+?</a></b></u>',
                                      data[0].content, re.DOTALL)
                    u = [(i[0], i[1], s) for i in data if i]

                    for name, url, size in u:
                        try:
                            if '4K' in name:
                                quality = '4K'
                            elif '1080p' in name:
                                quality = '1080p'
                            elif '720p' in name:
                                quality = '720p'
                            elif any(i in ['dvdscr', 'r5', 'r6'] for i in name):
                                quality = 'SCR'
                            elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts']
                                     for i in name):
                                quality = 'CAM'
                            else: quality = '720p'

                            info = []
                            if '3D' in name or '.3D.' in url: info.append('3D'); quality = '1080p'
                            if any(i in ['hevc', 'h265', 'x265'] for i in name): info.append('HEVC')
                            try:
                                size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', size)[-1]
                                div = 1 if size.endswith(('Gb', 'GiB', 'GB')) else 1024
                                size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                                size = '%.2f GB' % size
                                info.append(size)
                            except:
                                pass

                            info = ' | '.join(info)

                            url = client.replaceHTMLCodes(url)
                            url = url.encode('utf-8')
                            if any(x in url for x in ['.rar', '.zip', '.iso', 'turk']):continue

                            if 'ftp' in url: host = 'COV'; direct = True;
                            else: direct = False; host= 'turbobit.net'
                            #if not host in hostDict: continue

                            host = client.replaceHTMLCodes(host)
                            host = host.encode('utf-8')

                            sources.append({'source': host, 'quality': quality, 'language': 'en',
                                            'url': url, 'info': info, 'direct': direct, 'debridonly': False})

                        except:
                            pass
                except:
                    pass

            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('UltraHD - Exception: \n' + str(failure))
            return sources
예제 #28
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']
            premDate = ''

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)

            query = query.replace("&", "and")
            query = query.replace("  ", " ")
            query = query.replace(" ", "-")

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            url = "http://rlsbb.to/" + query  # this overwrites a bunch of previous lines!
            if 'tvshowtitle' not in data:
                url = url + "-1080p"  # NB: I don't think this works anymore! 2b-checked.

            r = client.request(url)  # curl as DOM object

            if r == None and 'tvshowtitle' in data:
                season = re.search('S(.*?)E', hdlr)
                season = season.group(1)
                query = title
                query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)
                query = query + "-S" + season
                query = query.replace("&", "and")
                query = query.replace("  ", " ")
                query = query.replace(" ", "-")
                url = "http://rlsbb.to/" + query
                r = client.request(url)

            # looks like some shows have had episodes from the current season released in s00e00 format before switching to YYYY-MM-DD
            # this causes the second fallback search above for just s00 to return results and stops it from searching by date (ex. http://rlsbb.to/vice-news-tonight-s02)
            # so loop here if no items found on first pass and force date search second time around
            for loopCount in range(0, 2):
                if loopCount == 1 or (
                        r == None and 'tvshowtitle' in data
                ):  # s00e00 serial failed: try again with YYYY-MM-DD
                    # http://rlsbb.to/the-daily-show-2018-07-24                                 ... example landing urls
                    # http://rlsbb.to/stephen-colbert-2018-07-24                                ... case and "date dots" get fixed by rlsbb
                    #query= re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)','',data['tvshowtitle'])   # this RE copied from above is just trash

                    premDate = re.sub(
                        '[ \.]', '-', data['premiered']
                    )  # date looks usually YYYY-MM-DD but dunno if always
                    query = re.sub('[\\\\:;*?"<>|/\-\']', '',
                                   data['tvshowtitle']
                                   )  # quadruple backslash = one backslash :p
                    query = query.replace("&", " and ").replace(
                        "  ", " ").replace(
                            " ",
                            "-")  # throw in extra spaces around & just in case
                    query = query + "-" + premDate

                    url = "http://rlsbb.to/" + query
                    url = url.replace('The-Late-Show-with-Stephen-Colbert',
                                      'Stephen-Colbert')  #
                    #url = url.replace('Some-Specific-Show-Title-No2','Scene-Title2')           # shows I want...
                    #url = url.replace('Some-Specific-Show-Title-No3','Scene-Title3')           #         ...but theTVDB title != Scene release

                    r = client.request(url)

                posts = client.parseDOM(r, "div", attrs={
                    "class": "content"
                })  # get all <div class=content>...</div>
                hostDict = hostprDict + hostDict  # ?
                items = []
                for post in posts:
                    try:
                        u = client.parseDOM(
                            post, 'a',
                            ret='href')  # get all <a href=..... </a>
                        for i in u:  # foreach href url
                            try:
                                name = str(i)
                                if hdlr in name.upper(): items.append(name)
                                elif len(premDate
                                         ) > 0 and premDate in name.replace(
                                             ".", "-"):
                                    items.append(
                                        name
                                    )  # s00e00 serial failed: try again with YYYY-MM-DD
                                # NOTE: the vast majority of rlsbb urls are just hashes! Future careful link grabbing would yield 2x or 3x results
                            except:
                                pass
                    except:
                        pass

                if len(items) > 0: break

            seen_urls = set()

            for item in items:
                try:
                    info = []

                    url = str(item)
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    if url in seen_urls: continue
                    seen_urls.add(url)

                    host = url.replace("\\", "")
                    host2 = host.strip('"')
                    host = re.findall(
                        '([\w]+[.][\w]+)$',
                        urlparse.urlparse(host2.strip().lower()).netloc)[0]

                    if not host in hostDict: raise Exception()
                    if any(x in host2 for x in ['.rar', '.zip', '.iso']):
                        continue

                    if '720p' in host2:
                        quality = 'HD'
                    elif '1080p' in host2:
                        quality = '1080p'
                    else:
                        quality = 'SD'

                    info = ' | '.join(info)
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')
                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': host2,
                        'info': info,
                        'direct': False,
                        'debridonly': True
                    })
                    # why is this hardcoded to debridonly=True? seems like overkill but maybe there's a resource-management reason?
                except:
                    pass
            check = [i for i in sources if not i['quality'] == 'CAM']
            if check: sources = check
            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('RLSBB - Exception: \n' + str(failure))
            return sources
예제 #29
0
def request(url,
            close=True,
            redirect=True,
            error=False,
            proxy=None,
            post=None,
            headers=None,
            mobile=False,
            XHR=False,
            limit=None,
            referer=None,
            cookie=None,
            compression=True,
            output='',
            timeout='30'):
    try:
        if not url:
            return

        handlers = []

        if not proxy == None:
            handlers += [
                urllib2.ProxyHandler({'http': '%s' % (proxy)}),
                urllib2.HTTPHandler
            ]
            opener = urllib2.build_opener(*handlers)
            opener = urllib2.install_opener(opener)

        if output == 'cookie' or output == 'extended' or not close == True:
            cookies = cookielib.LWPCookieJar()
            handlers += [
                urllib2.HTTPHandler(),
                urllib2.HTTPSHandler(),
                urllib2.HTTPCookieProcessor(cookies)
            ]
            opener = urllib2.build_opener(*handlers)
            opener = urllib2.install_opener(opener)

        if (2, 7, 8) < sys.version_info < (2, 7, 12):
            try:
                import ssl
                ssl_context = ssl.create_default_context()
                ssl_context.check_hostname = False
                ssl_context.verify_mode = ssl.CERT_NONE
                handlers += [urllib2.HTTPSHandler(context=ssl_context)]
                opener = urllib2.build_opener(*handlers)
                opener = urllib2.install_opener(opener)
            except:
                pass

        if url.startswith('//'): url = 'http:' + url

        _headers = {}
        try:
            _headers.update(headers)
        except:
            pass
        if 'User-Agent' in _headers:
            pass
        elif not mobile == True:
            #headers['User-Agent'] = agent()
            _headers['User-Agent'] = cache.get(randomagent, 1)
        else:
            _headers['User-Agent'] = 'Apple-iPhone/701.341'
        if 'Referer' in _headers:
            pass
        elif referer is not None:
            _headers['Referer'] = referer
        if not 'Accept-Language' in _headers:
            _headers['Accept-Language'] = 'en-US'
        if 'X-Requested-With' in _headers:
            pass
        elif XHR == True:
            _headers['X-Requested-With'] = 'XMLHttpRequest'
        if 'Cookie' in _headers:
            pass
        elif not cookie == None:
            _headers['Cookie'] = cookie
        if 'Accept-Encoding' in _headers:
            pass
        elif compression and limit is None:
            _headers['Accept-Encoding'] = 'gzip'

        if redirect == False:

            #old implementation
            #class NoRedirection(urllib2.HTTPErrorProcessor):
            #    def http_response(self, request, response): return response

            #opener = urllib2.build_opener(NoRedirection)
            #opener = urllib2.install_opener(opener)

            class NoRedirectHandler(urllib2.HTTPRedirectHandler):
                def http_error_302(self, req, fp, code, msg, headers):
                    infourl = urllib.addinfourl(fp, headers,
                                                req.get_full_url())
                    infourl.status = code
                    infourl.code = code
                    return infourl

                http_error_300 = http_error_302
                http_error_301 = http_error_302
                http_error_303 = http_error_302
                http_error_307 = http_error_302

            opener = urllib2.build_opener(NoRedirectHandler())
            urllib2.install_opener(opener)

            try:
                del _headers['Referer']
            except:
                pass

        if isinstance(post, dict):
            post = utils.byteify(post)
            post = urllib.urlencode(post)

        url = utils.byteify(url)

        request = urllib2.Request(url, data=post)
        _add_request_header(request, _headers)

        try:
            response = urllib2.urlopen(request, timeout=int(timeout))
        except urllib2.HTTPError as response:

            if response.code == 503:
                cf_result = response.read(5242880)
                try:
                    encoding = response.info().getheader('Content-Encoding')
                except:
                    encoding = None
                if encoding == 'gzip':
                    cf_result = gzip.GzipFile(
                        fileobj=StringIO.StringIO(cf_result)).read()

                if 'cf-browser-verification' in cf_result:

                    netloc = '%s://%s' % (urlparse.urlparse(url).scheme,
                                          urlparse.urlparse(url).netloc)

                    if not netloc.endswith('/'): netloc += '/'

                    ua = _headers['User-Agent']

                    cf = cache.get(cfcookie().get, 168, netloc, ua, timeout)

                    _headers['Cookie'] = cf

                    request = urllib2.Request(url, data=post)
                    _add_request_header(request, _headers)

                    response = urllib2.urlopen(request, timeout=int(timeout))
                else:
                    log_utils.log(
                        'Request-Error (%s): %s' % (str(response.code), url),
                        log_utils.LOGDEBUG)
                    if error == False: return
            else:
                log_utils.log(
                    'Request-Error (%s): %s' % (str(response.code), url),
                    log_utils.LOGDEBUG)
                if error == False: return

        if output == 'cookie':
            try:
                result = '; '.join(
                    ['%s=%s' % (i.name, i.value) for i in cookies])
            except:
                pass
            try:
                result = cf
            except:
                pass
            if close == True: response.close()
            return result

        elif output == 'geturl':
            result = response.geturl()
            if close == True: response.close()
            return result

        elif output == 'headers':
            result = response.headers
            if close == True: response.close()
            return result

        elif output == 'chunk':
            try:
                content = int(response.headers['Content-Length'])
            except:
                content = (2049 * 1024)
            if content < (2048 * 1024): return
            result = response.read(16 * 1024)
            if close == True: response.close()
            return result

        elif output == 'file_size':
            try:
                content = int(response.headers['Content-Length'])
            except:
                content = '0'
            response.close()
            return content

        if limit == '0':
            result = response.read(224 * 1024)
        elif not limit == None:
            result = response.read(int(limit) * 1024)
        else:
            result = response.read(5242880)

        try:
            encoding = response.info().getheader('Content-Encoding')
        except:
            encoding = None
        if encoding == 'gzip':
            result = gzip.GzipFile(fileobj=StringIO.StringIO(result)).read()

        if 'sucuri_cloudproxy_js' in result:
            su = sucuri().get(result)

            _headers['Cookie'] = su

            request = urllib2.Request(url, data=post)
            _add_request_header(request, _headers)

            response = urllib2.urlopen(request, timeout=int(timeout))

            if limit == '0':
                result = response.read(224 * 1024)
            elif not limit == None:
                result = response.read(int(limit) * 1024)
            else:
                result = response.read(5242880)

            try:
                encoding = response.info().getheader('Content-Encoding')
            except:
                encoding = None
            if encoding == 'gzip':
                result = gzip.GzipFile(
                    fileobj=StringIO.StringIO(result)).read()

        if 'Blazingfast.io' in result and 'xhr.open' in result:
            netloc = '%s://%s' % (urlparse.urlparse(url).scheme,
                                  urlparse.urlparse(url).netloc)
            ua = _headers['User-Agent']
            _headers['Cookie'] = cache.get(bfcookie().get, 168, netloc, ua,
                                           timeout)

            result = _basic_request(url,
                                    headers=_headers,
                                    post=post,
                                    timeout=timeout,
                                    limit=limit)

        if output == 'extended':
            try:
                response_headers = dict([(item[0].title(), item[1])
                                         for item in response.info().items()])
            except:
                response_headers = response.headers
            response_code = str(response.code)
            try:
                cookie = '; '.join(
                    ['%s=%s' % (i.name, i.value) for i in cookies])
            except:
                pass
            try:
                cookie = cf
            except:
                pass
            if close == True: response.close()
            return (result, response_code, response_headers, _headers, cookie)
        else:
            if close == True: response.close()
            return result
    except Exception as e:
        log_utils.log('Request-Error: (%s) => %s' % (str(e), url),
                      log_utils.LOGDEBUG)
        return
예제 #30
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            hostDict = hostprDict + hostDict

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            try:
                feed = True

                url = self.search_link % urllib.quote_plus(query)
                url = urlparse.urljoin(self.base_link, url)

                r = client.request(url)
                if r == None: feed = False

                posts = client.parseDOM(r, 'item')
                if not posts: feed = False

                items = []

                for post in posts:
                    try:
                        u = client.parseDOM(post, 'enclosure', ret='url')
                        u = [(i.strip('/').split('/')[-1], i) for i in u]
                        items += u
                    except:
                        pass
            except:
                pass

            try:
                if feed == True: raise Exception()

                url = self.search_link_2 % urllib.quote_plus(query)
                url = urlparse.urljoin(self.base_link, url)

                r = client.request(url)

                posts = client.parseDOM(r, 'div', attrs={'class': 'post'})

                items = []
                dupes = []

                for post in posts:
                    try:
                        t = client.parseDOM(post, 'a')[0]
                        t = re.sub('<.+?>|</.+?>', '', t)

                        x = re.sub(
                            '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)',
                            '', t)
                        if not cleantitle.get(title) in cleantitle.get(x):
                            raise Exception()
                        y = re.findall(
                            '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]',
                            t)[-1].upper()
                        if not y == hdlr: raise Exception()

                        fmt = re.sub(
                            '(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)',
                            '', t.upper())
                        fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
                        fmt = [i.lower() for i in fmt]
                        if not any(i in ['1080p', '720p'] for i in fmt):
                            raise Exception()

                        if len(dupes) > 2: raise Exception()
                        dupes += [x]

                        u = client.parseDOM(post, 'a', ret='href')[0]

                        r = client.request(u)
                        u = client.parseDOM(r, 'a', ret='href')
                        u = [(i.strip('/').split('/')[-1], i) for i in u]
                        items += u
                    except:
                        pass
            except:
                pass

            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    t = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)',
                        '', name)

                    if not cleantitle.get(t) == cleantitle.get(title):
                        raise Exception()

                    y = re.findall(
                        '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]',
                        name)[-1].upper()

                    if not y == hdlr: raise Exception()

                    quality, info = source_utils.get_release_quality(
                        name, item[1])

                    url = item[1]
                    if any(x in url for x in ['.rar', '.zip', '.iso']):
                        raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = re.findall(
                        '([\w]+[.][\w]+)$',
                        urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if not host in hostDict: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True
                    })
                except:
                    pass

            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('SceneRls - Exception: \n' + str(failure))
            return sources