def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            r = proxy.request(url, 'tv shows')

            links = client.parseDOM(r, 'a', ret='href', attrs = {'target': '.+?'})
            links = [x for y,x in enumerate(links) if x not in links[:y]]

            for i in links:
                try:
                    url = i
                    url = proxy.parse(url)
                    url = urlparse.parse_qs(urlparse.urlparse(url).query)['r'][0]
                    url = url.decode('base64')
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if not host in hostDict: raise Exception()
                    host = host.encode('utf-8')

                    sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('XWatchSeries - Exception: \n' + str(failure))
            return sources
Exemplo n.º 2
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url == None: return sources

            html = client.request(url)
            try:
                iframe = client.parseDOM(html, 'iframe', attrs = {'class': 'embed-responsive-item'}, ret='src')[0]
                host = iframe.split('//')[1].replace('www.','')
                host = host.split('/')[0].split('.')[0].title()
                sources.append({'source':host,'quality':'SD','language': 'en','url':iframe,'direct':False,'debridonly':False})
            except:
                flashvar = client.parseDOM(html, 'param', attrs = {'name': 'flashvars'}, ret='value')[0]
                link = flashvar.split('file=')[1]
                host = link.split('//')[1].replace('www.','')
                host = host.split('/')[0].split('.')[0].title()
                sources.append({'source':host,'quality':'SD','language': 'en','url':link,'direct':False,'debridonly':False})

            containers = client.parseDOM(html, 'div', attrs={'class':'dwn-box'})

            for list in containers:
                link = client.parseDOM(list, 'a', attrs={'rel':'nofollow'}, ret='href')[0]
                redirect = client.request(link, output='geturl')
                quality,info = source_utils.get_release_quality(redirect)
                sources.append({'source':'DirectLink','quality':quality,'language': 'en','url':redirect,'info':info,'direct':True,'debridonly':False})
            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('CoolTV - Exception: \n' + str(failure))
            return
    def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
        try:
            t = cleantitle.get(tvshowtitle)

            q = urllib.quote_plus(cleantitle.query(tvshowtitle))
            p = urllib.urlencode({'term': q})

            r = client.request(self.search_link, post=p, XHR=True)
            try: r = json.loads(r)
            except: r = None
            r = None

            if r:
                r = [(i['seo_url'], i['value'], i['label']) for i in r if 'value' in i and 'label' in i and 'seo_url' in i]
            else:
                r = proxy.request(self.search_link_2 % q, 'tv shows')
                r = client.parseDOM(r, 'div', attrs = {'valign': '.+?'})
                r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title'), client.parseDOM(i, 'a')) for i in r]
                r = [(i[0][0], i[1][0], i[2][0]) for i in r if i[0] and i[1] and i[2]]

            r = [(i[0], i[1], re.findall('(\d{4})', i[2])) for i in r]
            r = [(i[0], i[1], i[2][-1]) for i in r if i[2]]
            r = [i for i in r if t == cleantitle.get(i[1]) and year == i[2]]

            url = r[0][0]
            url = proxy.parse(url)

            url = url.strip('/').split('/')[-1]
            url = url.encode('utf-8')
            return url
        except:
            failure = traceback.format_exc()
            log_utils.log('XWatchSeries - Exception: \n' + str(failure))
            return
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return

            url = '%s/serie/%s' % (self.base_link, url)

            r = proxy.request(url, 'tv shows')
            r = client.parseDOM(r, 'li', attrs = {'itemprop': 'episode'})

            t = cleantitle.get(title)

            r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs = {'itemprop': 'name'}), re.compile('(\d{4}-\d{2}-\d{2})').findall(i)) for i in r]
            r = [(i[0], i[1][0].split(' ')[-1], i[2]) for i in r if i[1]] + [(i[0], None, i[2]) for i in r if not i[1]]
            r = [(i[0], i[1], i[2][0]) for i in r if i[2]] + [(i[0], i[1], None) for i in r if not i[2]]
            r = [(i[0][0], i[1], i[2]) for i in r if i[0]]

            url = [i for i in r if t == cleantitle.get(i[1]) and premiered == i[2]][:1]
            if not url: url = [i for i in r if t == cleantitle.get(i[1])]
            if len(url) > 1 or not url: url = [i for i in r if premiered == i[2]]
            if len(url) > 1 or not url: raise Exception() 

            url = url[0][0]
            url = proxy.parse(url)

            url = re.findall('(?://.+?|)(/.+)', url)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            failure = traceback.format_exc()
            log_utils.log('XWatchSeries - Exception: \n' + str(failure))
            return
Exemplo n.º 5
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url == None: return
            urldata = urlparse.parse_qs(url)
            urldata = dict((i, urldata[i][0]) for i in urldata)
            title = urldata['title'].replace(':', ' ').lower()
            year = urldata['year']

            search_id = title.lower()
            start_url = self.search_link % (self.base_link, search_id.replace(' ','%20'))

            headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'}
            html = client.request(start_url,headers=headers)
            Links = re.compile('"post","link":"(.+?)","title".+?"rendered":"(.+?)"',re.DOTALL).findall(html)
            for link,name in Links:
                link = link.replace('\\','')
                if title.lower() in name.lower(): 
                    if year in name:
                        holder = client.request(link,headers=headers)
                        new = re.compile('<iframe src="(.+?)"',re.DOTALL).findall(holder)[0]
                        end = client.request(new,headers=headers)
                        final_url = re.compile('<iframe src="(.+?)"',re.DOTALL).findall(end)[0]
                        valid, host = source_utils.is_host_valid(final_url, hostDict)
                        sources.append({'source':host,'quality':'1080p','language': 'en','url':final_url,'info':[],'direct':False,'debridonly':False})
            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('1080PMovies - Exception: \n' + str(failure))
            return sources
Exemplo n.º 6
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            url = self.search_link % (cleantitle.geturl(title), year)

            q = urlparse.urljoin(self.base_link, url)

            r = proxy.geturl(q)
            if not r == None: return url

            t = cleantitle.get(title)

            q = self.search_link_2 % urllib.quote_plus(cleantitle.query(title))
            q = urlparse.urljoin(self.base_link, q)

            r = client.request(q)

            r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a'))
            r = [(i[0], re.findall('(?:\'|\")(.+?)(?:\'|\")', i[1])) for i in r]
            r = [(i[0], [re.findall('(.+?)\((\d{4})', x) for x in i[1]]) for i in r]
            r = [(i[0], [x[0] for x in i[1] if x]) for i in r]
            r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if i[1]]
            r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]]

            url = re.findall('(?://.+?|)(/.+)', r[0])[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            failure = traceback.format_exc()
            log_utils.log('ProjectFree - Exception: \n' + str(failure))
            return
Exemplo n.º 7
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         r = client.request(url)
         try:
             match = re.compile('iframe id="odbIframe" src="(.+?)"').findall(r)
             for url in match:
                 host = url.split('//')[1].replace('www.', '')
                 host = host.split('/')[0].lower()
                 sources.append({
                     'source': host,
                     'quality': 'HD',
                     'language': 'en',
                     'url': url,
                     'direct': False,
                     'debridonly': False
                 })
         except Exception:
             failure = traceback.format_exc()
             log_utils.log('ODB - Exception: \n' + str(failure))
             return sources
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('ODB - Exception: \n' + str(failure))
         return sources
     return sources
Exemplo n.º 8
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if url == None: return
         urldata = urlparse.parse_qs(url)
         urldata = dict((i, urldata[i][0]) for i in urldata)
         title = urldata['title'].replace(':', ' ').lower()
         year = urldata['year']
         search_id = title.lower()
         start_url = urlparse.urljoin(self.base_link, self.search_link % (search_id.replace(' ','+') + '+' + year))
         headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'}
         html = client.request(start_url,headers=headers)
         Links = re.compile('a href="(.+?)" title="(.+?)"',re.DOTALL).findall(html)
         for link,name in Links:
             if title.lower() in name.lower(): 
                 if year in name:
                     holder = client.request(link,headers=headers)
                     Alterjnates = re.compile('<button class="text-capitalize dropdown-item" value="(.+?)"',re.DOTALL).findall(holder)
                     for alt_link in Alterjnates:
                         alt_url = alt_link.split ("e=")[1]
                         valid, host = source_utils.is_host_valid(alt_url, hostDict)
                         sources.append({'source':host,'quality':'1080p','language': 'en','url':alt_url,'info':[],'direct':False,'debridonly':False})
                     
         return sources
     except:
         failure = traceback.format_exc()
         log_utils.log('1080PMovies - Exception: \n' + str(failure))
         return sources
Exemplo n.º 9
0
 def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
     try:
         return urllib.urlencode({'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'localtvshowtitle': localtvshowtitle, 'year': year})
     except:
         failure = traceback.format_exc()
         log_utils.log('Library - Exception: \n' + str(failure))
         return
Exemplo n.º 10
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return

            url = urlparse.parse_qs(url)
            url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
            url['premiered'], url['season'], url['episode'] = premiered, season, episode
            try:
                clean_title = cleantitle.geturl(url['tvshowtitle'])+'-season-%d' % int(season)
                search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
                search_results = client.request(search_url)
                parsed = client.parseDOM(search_results, 'div', {'id': 'movie-featured'})
                parsed = [(client.parseDOM(i, 'a', ret='href'), re.findall('<b><i>(.+?)</i>', i)) for i in parsed]
                parsed = [(i[0][0], i[1][0]) for i in parsed if cleantitle.get(i[1][0]) == cleantitle.get(clean_title)]
                url = parsed[0][0]
            except:
                pass
            data = client.request(url)
            data = client.parseDOM(data, 'div', attrs={'id': 'details'})
            data = zip(client.parseDOM(data, 'a'), client.parseDOM(data, 'a', ret='href'))
            url = [(i[0], i[1]) for i in data if i[0] == str(int(episode))]

            return url[0][0]
        except:
            failure = traceback.format_exc()
            log_utils.log('CinemaMega - Exception: \n' + str(failure))
            return
Exemplo n.º 11
0
    def episode(self, data, imdb, tvdb, title, premiered, season, episode):
        try:
            seasonsPageURL = data['pageURL']

            # An extra step needed before sources() can be called. Get the episode page.
            # This code will crash if they change the website structure in the future.

            session = self._createSession(data['UA'], data['cookies'], data['referer'])
            xbmc.sleep(1000)
            r = self._sessionGET(seasonsPageURL, session)
            if r.ok:
                soup = BeautifulSoup(r.content, 'html.parser')
                mainDIV = soup.find('div', {'class': 'tv_container'})
                firstEpisodeDIV = mainDIV.find('div', {'class': 'show_season', 'data-id': season})
                # Filter the episode HTML entries to find the one that represents the episode we're after.
                episodeDIV = next((element for element in firstEpisodeDIV.next_siblings if not isinstance(
                    element, NavigableString) and next(element.a.strings, '').strip('E ') == episode), None)
                if episodeDIV:
                    return {
                        'pageURL': self.BASE_URL + episodeDIV.a['href'],
                        'UA': session.headers['User-Agent'],
                        'referer': seasonsPageURL,
                        'cookies': session.cookies.get_dict()
                    }
            return None
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('PrimewireGR - Exception: \n' + str(failure))
            return
Exemplo n.º 12
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return

            url = urlparse.parse_qs(url)
            url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])

            html = client.request(str(url['url']))
            results = client.parseDOM(html, 'strong')

            for content in results:
                try:
                    show_url, url_text = re.compile('href="(.+?)">(.+?)</a>',re.DOTALL).findall(content)[0]
                    # older links have "nofollow" after href, but not showing hosts on items I tested, so doesn't matter if those are "broken" for scraping.
                except:
                    continue
                chkstr = 'Season %s Episode %s' % (season, episode)
                chkstr2 = 'S%s Episode %s' % (season, episode)
                if (chkstr.lower() in url_text.lower()) or (chkstr2.lower() in url_text.lower()):
                    return show_url
            return
        except:
            failure = traceback.format_exc()
            log_utils.log('ICouchTuner - Exception: \n' + str(failure))
            return
Exemplo n.º 13
0
    def sources(self, url, hostDict, hostprDict):
        try:
            if url == None: return
            sources = []

            headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'}
            html = client.request(url,headers=headers)

            vidpage = re.compile('id="tab-movie".+?data-file="(.+?)"',re.DOTALL).findall(html)
        
            for link in vidpage:
                if 'trailer' not in link.lower():
                    link = urlparse.urljoin(self.base_link, link)
                    sources.append({'source':'DirectLink','quality':'SD','language': 'en','url':link,'info':[],'direct':True,'debridonly':False})
            other_links = re.findall('data-url="(.+?)"',html)
            for link in other_links:
                if link.startswith('//'):
                    link = 'http:'+link
                    sources.append({'source':'DirectLink','quality':'SD','language': 'en','url':link,'info':[],'direct':False,'debridonly':False})
                else:
                    sources.append({'source':'DirectLink','quality':'SD','language': 'en','url':link,'info':[],'direct':False,'debridonly':False})


            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('BobMovies - Exception: \n' + str(failure))
            return
Exemplo n.º 14
0
    def resolve(self, data):
        try:
            hostURL = None
            DELAY_PER_REQUEST = 1000  # In milliseconds.

            startTime = datetime.now()
            session = self._createSession(data['UA'], data['cookies'], data['referer'])
            r = self._sessionGET(data['pageURL'], session, allowRedirects=False)
            if r.ok:
                if 'Location' in r.headers:
                    hostURL = r.headers['Location']  # For most hosts they redirect.
                else:
                    # On rare cases they JS-pack the host link in the page source.
                    try:
                        hostURL = re.search(r'''go\(\\['"](.*?)\\['"]\);''', jsunpack.unpack(r.text)).group(1)
                    except Exception:
                        pass  # Or sometimes their page is just broken.

            # Do a little delay, if necessary, between resolve() calls.
            elapsed = int((datetime.now() - startTime).total_seconds() * 1000)
            if elapsed < DELAY_PER_REQUEST:
                xbmc.sleep(max(DELAY_PER_REQUEST - elapsed, 100))

            return hostURL
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('PrimewireGR - Exception: \n' + str(failure))
            return
Exemplo n.º 15
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            pages = []
            data = urlparse.parse_qs(url)
            data = dict((i, data[i][0]) for i in data)
            data.update({'season': season, 'episode': episode, 'title': title, 'premiered': premiered})

            season_base = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', season_base)
            tvshowtitle = data['tvshowtitle']
            tvshowtitle = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', tvshowtitle)

            query = query.replace("&", "and")
            query = query.replace("  ", " ")
            query = query.replace(" ", "+")
            tvshowtitle = tvshowtitle.replace("&", "and")
            tvshowtitle = tvshowtitle.replace("  ", " ")
            tvshowtitle = tvshowtitle.replace(" ", "+")

            start_url = urlparse.urljoin(self.base_link, self.search_link % (tvshowtitle, query))

            html = client.request(start_url)
            results = client.parseDOM(html, 'h2', attrs={'class':'entry-title'})
            for content in results:
                found_link = client.parseDOM(content, 'a', ret='href')[0]
                if self.base_link in found_link:
                    if cleantitle.get(data['tvshowtitle']) in cleantitle.get(found_link):
                        if cleantitle.get(season_base) in cleantitle.get(found_link):
                            pages.append(found_link)
            return pages
        except:
            failure = traceback.format_exc()
            log_utils.log('ALLRLS - Exception: \n' + str(failure))
            return pages
Exemplo n.º 16
0
 def sources(self, url, hostDict, hostprDict):
     try:
         if url == None: return
         sources = []
         html = client.request(url)
         html = html.split("type='video/mp4'")[1]
         match = re.compile('href="(.+?)"',re.DOTALL).findall(html)
         for link in match:
             if '1080' in link:
                 quality = '1080p'
             elif '720' in link:
                 quality = '720p'
             elif '480' in link:
                 quality = '480p'
             else:
                 quality = 'SD'
             if '.mkv' in link:
                 sources.append({'source': 'DirectLink', 'quality': quality, 'language': 'en', 'url': link, 'direct': True, 'debridonly': False})
             if '.mp4' in link:
                 sources.append({'source': 'DirectLink', 'quality': quality, 'language': 'en', 'url': link, 'direct': True, 'debridonly': False})
         return sources
     except:
         failure = traceback.format_exc()
         log_utils.log('FMovieD6 - Exception: \n' + str(failure))
         return
Exemplo n.º 17
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)
            posts = client.parseDOM(r, 'item')

            for post in posts:
                Links = client.parseDOM(post, 'enclosure', ret='url')
                if not len(Links) == None:
                    for vid_url in Links:
                        quality,info = source_utils.get_release_quality(url, vid_url)
                        host = vid_url.split('//')[1].replace('www.','')
                        host = host.split('/')[0].lower()
                        sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': vid_url, 'info': info, 'direct': False, 'debridonly': False})
            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('Best-Moviez - Exception: \n' + str(failure))
            return sources
Exemplo n.º 18
0
    def search(self, query_bases, options):
        i = 0
        j = 0
        result = None
        for option in options:
            
            for query_base in query_bases :
                q = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query_base+option)
                q = q.replace("  ", " ").replace(" ", "+")

                log_utils.log("RLSSCN query : " + q)
                
                url = self.search_link % (q)
                html = requests.get(url)

                log_utils.log("RLSSCN try test " + str(i) + " - html : " + str(html))

                if html.status_code == 200 :
                    log_utils.log("RLSSCN test " + str(i) + " Ok")
                    url = client.parseDOM(html.content, "h2", attrs={"class": "title"})
                    url = client.parseDOM(url, "a", ret='href')
                    log_utils.log("RLSSCN test " + str(i) + " : " + str(url))
                    if len(url) > 0:
                    	html = requests.get(url[0])
                    	if html.status_code == 200 :
                        	return html.content
                else :    
                    log_utils.log("RLSSCN test "+ str(i) + " return code : " + result.status_code + "- next test " + str(i+1))
                    i += 1
                    
        return None
Exemplo n.º 19
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     if url == None: return
     try:
         OPEN = client.request(url)
         headers = {'Origin':'http://hdpopcorns.co', 'Referer':url,
                    'X-Requested-With':'XMLHttpRequest', 'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'}
         try:
             params = re.compile('FileName1080p.+?value="(.+?)".+?FileSize1080p.+?value="(.+?)".+?value="(.+?)"',re.DOTALL).findall(OPEN)
             for param1, param2,param3 in params:
                 request_url = '%s/select-movie-quality.php' %(self.base_link)
                 form_data = {'FileName1080p':param1,'FileSize1080p':param2,'FSID1080p':param3}
             link = requests.post(request_url, data=form_data, headers=headers,timeout=3).content
             final_url = re.compile('<strong>1080p</strong>.+?href="(.+?)"',re.DOTALL).findall(link)[0]
             sources.append({'source': 'DirectLink', 'quality': '1080p', 'language': 'en', 'url': final_url, 'direct': True, 'debridonly': False})
         except:pass
         try:
             params = re.compile('FileName720p.+?value="(.+?)".+?FileSize720p".+?value="(.+?)".+?value="(.+?)"',re.DOTALL).findall(OPEN)
             for param1, param2,param3 in params:
                 request_url = '%s/select-movie-quality.php' %(self.base_link)
                 form_data = {'FileName720p':param1,'FileSize720p':param2,'FSID720p':param3}
             link = requests.post(request_url, data=form_data, headers=headers,timeout=3).content
             final_url = re.compile('<strong>720p</strong>.+?href="(.+?)"',re.DOTALL).findall(link)[0]
             sources.append({'source': 'DirectLink', 'quality': '720p', 'language': 'en', 'url': final_url, 'direct': True, 'debridonly': False})
         except:pass
         return sources
     except:
         failure = traceback.format_exc()
         log_utils.log('Popcorn - Exception: \n' + str(failure))
         return sources
Exemplo n.º 20
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if not url:
                return

            sep = '%dx%02d' % (int(season), int(episode))
            r = client.request(url, headers=self.headers)
            r = dom_parser2.parse_dom(r, 'span', attrs={'class': 'list'})
            r1 = dom_parser2.parse_dom(r, 'br')
            r1 = [dom_parser2.parse_dom(i, 'a', req='href') for i in r1]
            try:
                if int(season) == 1 and int(episode) == 1:
                    url = dom_parser2.parse_dom(r, 'a', req='href')[1].attrs['href']
                else:
                    for i in r1:
                        if sep in i[0].content:
                            url = urlparse.urljoin(self.base_url, i[0].attrs['href'])
            except:
                pass
            url = url[:-1]
            url = url.split('?v=')[1]
            url = self.list_url % url
            return url
        except:
            failure = traceback.format_exc()
            log_utils.log('IceFilms - Exception: \n' + str(failure))
            return
Exemplo n.º 21
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources
            url = urlparse.urljoin(self.base_link, url)
            for i in range(3):
                result = client.request(url, timeout=10)
                if not result == None: break
            
            dom = dom_parser.parse_dom(result, 'div', attrs={'class':'links', 'id': 'noSubs'})
            result = dom[0].content
            
            links = re.compile('<tr\s*>\s*<td><i\s+class="fa fa-youtube link-logo"></i>([^<]+).*?href="([^"]+)"\s+class="watch',re.DOTALL).findall(result)         
            for link in links[:5]:
                try:
                    url2 = urlparse.urljoin(self.base_link, link[1])
                    for i in range(2):
                        result2 = client.request(url2, timeout=3)
                        if not result2 == None: break                    
                    r = re.compile('href="([^"]+)"\s+class="action-btn').findall(result2)[0]
                    valid, hoster = source_utils.is_host_valid(r, hostDict)
                    if not valid: continue
                    urls, host, direct = source_utils.check_directstreams(r, hoster)
                    for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False})
                except:
                    pass           
            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('SeriesFree - Exception: \n' + str(failure))
            return sources
Exemplo n.º 22
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url == None: return sources
            html = client.request(url)

            source = re.compile('<iframe src="(.+?)"',re.DOTALL).findall(html)[0]
            if 'consistent.stream' in source:
                html = client.request(source)
                page = re.compile(""":title=["'](.+?)["']\>""").findall(html)[0]
                decode = client.replaceEscapeCodes(page)
                links = re.compile('"sources.+?"(http.+?)"',re.DOTALL).findall(decode)
                for link in links:
                    link = link.replace('\\','')
                    if '1080' in link:
                        quality='1080p'
                    elif '720' in link:
                        quality = '720p'
                    else:
                        quality = 'DVD'
                    host = link.split('//')[1].replace('www.','')
                    host = host.split('/')[0].split('.')[0].title()
                    sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': link, 'direct': False, 'debridonly': False})
            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('VexMovies - Exception: \n' + str(failure))
            return sources
Exemplo n.º 23
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            clean_title = cleantitle.geturl(title)
            search_url = self.search_link % (clean_title.replace('-','+'), year)
            headers = {'Host': 'http://icefilms1.unblocked.sh',
                       'Cache-Control': 'max-age=0',
                        'Connection': 'keep-alive',
                        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36',
                        'Upgrade-Insecure-Requests': '1',
                        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
                        'Accept-Encoding': 'gzip, deflate, br',
                        'Accept-Language': 'en-US,en;q=0.8'}

            r = client.request(search_url, headers=headers)
            r = dom_parser2.parse_dom(r, 'td')
            r = [dom_parser2.parse_dom(i, 'a', req='href') for i in r if "<div class='number'" in i.content]
            r = [(urlparse.urljoin(self.base_url, i[0].attrs['href'])) for i in r if title.lower() in i[0].content.lower() and year in i[0].content]
            url = r[0]
            url = url[:-1]
            url = url.split('?v=')[1]
            url = self.list_url % url
            return url
        except:
            failure = traceback.format_exc()
            log_utils.log('IceFilms - Exception: \n' + str(failure))
            return
Exemplo n.º 24
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
#           search_id = title.lower().replace(':', ' ').replace('-', ' ') # see __init__

#           start_url = urlparse.urljoin(self.base_link, (self.search_link % (search_id.replace(' ','%20'))))         
            start_url = urlparse.urljoin(self.base_link, (self.search_link % (title.replace(' ','%20'))))  
            
            headers={'User-Agent':client.randomagent()}
            html = client.request(start_url,headers=headers)    		
            
            match = re.compile('<span class="name"><a title="(.+?)" href="(.+?)".+?title="(.+?)"',re.DOTALL).findall(html)
            for name,item_url, link_year in match:
                if year in link_year:                                                        
                    if cleantitle.get(title) in cleantitle.get(name):
                        return item_url
            return
        except:
            failure = traceback.format_exc()
            log_utils.log('SolarMovie - Exception: \n' + str(failure))

        self.domains = ['solarmoviez.to', 'solarmoviez.ru']
        self.base_link = 'https://solarmoviez.ru'
        self.search_link = '/movie/search/%s.html'
        self.info_link = '/ajax/movie_info/%s.html?is_login=false'
        self.server_link = '/ajax/v4_movie_episodes/%s'
        self.embed_link = '/ajax/movie_embed/%s'
        self.token_link = '/ajax/movie_token?eid=%s&mid=%s'
        self.source_link = '/ajax/movie_sources/%s?x=%s&y=%s'
Exemplo n.º 25
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['title']
            year = data['year']

            url = urlparse.urljoin(self.base_link, self.search_link) 
            url = url % (title.replace(':', '').replace(' ','_'),year)

            search_results = client.request(url)

            varid = re.compile('var frame_url = "(.+?)"',re.DOTALL).findall(search_results)[0].replace('/embed/','/streamdrive/info/')
            res_chk = re.compile('class="title"><h1>(.+?)</h1>',re.DOTALL).findall(search_results)[0]
            varid = 'http:'+varid
            holder = client.request(varid)
            links = re.compile('"src":"(.+?)"',re.DOTALL).findall(holder)
            for link in links:
                vid_url = link.replace('\\','')
                if '1080' in res_chk:
                    quality = '1080p'
                elif '720' in res_chk:
                    quality = '720p'
                else:
                    quality = 'DVD'
                sources.append({'source': 'Googlelink', 'quality': quality, 'language': 'en', 'url': vid_url, 'direct': False, 'debridonly': False})
            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('Watch32 - Exception: \n' + str(failure))
            return sources
Exemplo n.º 26
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url is None:
                return sources

            r = client.request(url)
            quality = re.findall(">(\w+)<\/p", r)
            if quality[0] == "HD":
                quality = "720p"
            else:
                quality = "SD"
            r = dom_parser2.parse_dom(r, 'div', {'id': 'servers-list'})
            r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]

            for i in r[0]:
                url = {
                    'url': i.attrs['href'],
                    'data-film': i.attrs['data-film'],
                    'data-server': i.attrs['data-server'],
                    'data-name': i.attrs['data-name']}
                url = urllib.urlencode(url)
                sources.append({'source': i.content, 'quality': quality, 'language': 'en',
                                'url': url, 'direct': False, 'debridonly': False})
            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('FreePutlockers - Exception: \n' + str(failure))
            return sources
Exemplo n.º 27
0
 def resolve(self, url):
     try:
         urldata = urlparse.parse_qs(url)
         urldata = dict((i, urldata[i][0]) for i in urldata)
         post = {
             'ipplugins': 1, 'ip_film': urldata['data-film'],
             'ip_server': urldata['data-server'],
             'ip_name': urldata['data-name'],
             'fix': "0"}
         p1 = client.request('http://freeputlockers.org/ip.file/swf/plugins/ipplugins.php',
                             post=post, referer=urldata['url'], XHR=True)
         p1 = json.loads(p1)
         p2 = client.request(
             'http://freeputlockers.org/ip.file/swf/ipplayer/ipplayer.php?u=%s&s=%s&n=0' %
             (p1['s'],
              urldata['data-server']))
         p2 = json.loads(p2)
         p3 = client.request('http://freeputlockers.org/ip.file/swf/ipplayer/api.php?hash=%s' % (p2['hash']))
         p3 = json.loads(p3)
         n = p3['status']
         if n is False:
             p2 = client.request(
                 'http://freeputlockers.org/ip.file/swf/ipplayer/ipplayer.php?u=%s&s=%s&n=1' %
                 (p1['s'],
                  urldata['data-server']))
             p2 = json.loads(p2)
         url = "https:%s" % p2["data"].replace("\/", "/")
         return url
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('FreePutlockers - Exception: \n' + str(failure))
         return
Exemplo n.º 28
0
 def resolve(self, url):
     try:
         return url
     except:
         failure = traceback.format_exc()
         log_utils.log('ShowBox - Exception: \n' + str(failure))
         return
Exemplo n.º 29
0
    def _getSearchData(self, query, possibleTitles, year, session, isMovie):
        try:
            searchURL = self.BASE_URL + ('/?' if isMovie else '/?tv=&') + urlencode({'search_keywords': query})
            r = self._sessionGET(searchURL, session)
            if not r.ok:
                return None

            bestGuessesURLs = []

            soup = BeautifulSoup(r.content, 'html.parser')
            mainDIV = soup.find('div', role='main')
            for resultDIV in mainDIV.findAll('div', {'class': 'index_item'}, recursive=False):
                # Search result titles in Primewire.gr are usually "[Name of Movie/TVShow] (yyyy)".
                # Example: 'Star Wars Legends: Legacy of the Force (2015)'
                match = re.search(r'(.*?)(?:\s\((\d{4})\))?$', resultDIV.a['title'].lower().strip())
                resultTitle, resultYear = match.groups()
                if resultTitle in possibleTitles:
                    if resultYear == year:  # 'resultYear' = '(yyyy)', with parenthesis.
                        bestGuessesURLs.insert(0, resultDIV.a['href'])  # Use year to make better guesses.
                    else:
                        bestGuessesURLs.append(resultDIV.a['href'])

            if bestGuessesURLs:
                return {
                    'pageURL': self.BASE_URL + bestGuessesURLs[0],
                    'UA': session.headers['User-Agent'],
                    'referer': searchURL,
                    'cookies': session.cookies.get_dict(),
                }
            else:
                return None
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('PrimewireGR - Exception: \n' + str(failure))
            return
Exemplo n.º 30
0
    def __get_movie_url(self, data):
        try:
            query = data['title'].lower().replace(' ', '+')
            path = self.movie_search % query
            url = urlparse.urljoin(self.base_link, path)

            response = client.request(url, headers=self.headers)

            movie_id = json.loads(response)[0]['id']

            path = self.movie_details % movie_id
            url = urlparse.urljoin(self.base_link, path)

            response = client.request(url, headers=self.headers)
            token_encrypted = json.loads(response)['langs'][0]['sources'][0]['hash']

            token = self.__decrypt(token_encrypted)

            path = self.fetcher % token
            url = urlparse.urljoin(self.base_link, path)

            return url
        except:
            failure = traceback.format_exc()
            log_utils.log('ShowBox - Exception: \n' + str(failure))
            return
Exemplo n.º 31
0
	def resolve_magnet(self, magnet_url, info_hash, season, episode, title):
		from resources.lib.modules.source_utils import seas_ep_filter, extras_filter
		from resources.lib.cloud_scrapers.cloud_utils import cloud_check_title
		try:
			torrent_id = None
			rd_url = None
			match = False
			reason = ''
			extensions = supported_video_extensions()
			extras_filtering_list = extras_filter()
			info_hash = info_hash.lower()
			if not season: compare_title = re.sub(r'[^A-Za-z0-9]+', '.', title.replace('\'', '').replace('&', 'and').replace('%', '.percent')).lower()
			torrent_files = self._get(check_cache_url + '/' + info_hash)
			if not info_hash in torrent_files: return None
			torrent_id = self.add_magnet(magnet_url) # add_magent() returns id
			torrent_files = torrent_files[info_hash]['rd']
			torrent_files = [item for item in torrent_files if self.video_only(item, extensions)]
			if not season:
				m2ts_check = self.m2ts_check(torrent_files)
				if m2ts_check: m2ts_key, torrent_files = self.m2ts_key_value(torrent_files) 
			for item in torrent_files:
				try:
					correct_file_check = False
					item_values = [i['filename'] for i in item.values()]
					if season:
						for value in item_values:
							if '.m2ts' in value:
								log_utils.log('Real-Debrid: Can not resolve .m2ts season disk episode', level=log_utils.LOGDEBUG)
								continue
							correct_file_check = seas_ep_filter(season, episode, value)
							if correct_file_check: break
						if not correct_file_check:
							reason = value + '  :no matching video filename'
							continue
					elif not m2ts_check:
						for value in item_values:
							filename = re.sub(r'[^A-Za-z0-9]+', '.', value.replace('\'', '').replace('&', 'and').replace('%', '.percent')).lower()
							filename_info = filename.replace(compare_title, '') 
							if any(x in filename_info for x in extras_filtering_list): continue
							aliases = self.get_aliases(title)
							correct_file_check = cloud_check_title(title, aliases, filename)
							if correct_file_check: break
						if not correct_file_check:
							reason = filename + '  :no matching video filename'
							continue
					torrent_keys = item.keys()
					if len(torrent_keys) == 0: continue
					torrent_keys = ','.join(torrent_keys)
					self.add_torrent_select(torrent_id, torrent_keys)
					torrent_info = self.torrent_info(torrent_id)
					if 'error' in torrent_info: continue
					selected_files = [(idx, i) for idx, i in enumerate([i for i in torrent_info['files'] if i['selected'] == 1])]
					if season:
						correct_files = []
						append = correct_files.append
						correct_file_check = False
						for value in selected_files:
							correct_file_check = seas_ep_filter(season, episode, value[1]['path'])
							if correct_file_check:
								append(value[1])
								break
						if len(correct_files) == 0: continue
						episode_title = re.sub(r'[^A-Za-z0-9]+', '.', title.replace("\'", '').replace('&', 'and').replace('%', '.percent')).lower()
						for i in correct_files:
							compare_link = seas_ep_filter(season, episode, i['path'], split=True)
							compare_link = re.sub(episode_title, '', compare_link)
							if any(x in compare_link for x in extras_filtering_list): continue
							else:
								match = True
								break
						if match:
							index = [i[0] for i in selected_files if i[1]['path'] == correct_files[0]['path']][0]
							break
					elif m2ts_check:
						match, index = True, [i[0] for i in selected_files if i[1]['id'] == m2ts_key][0]
					else:
						match = False
						for value in selected_files:
							filename = re.sub(r'[^A-Za-z0-9]+', '.', value[1]['path'].rsplit('/', 1)[1].replace('\'', '').replace('&', 'and').replace('%', '.percent')).lower()
							filename_info = filename.replace(compare_title, '') 
							if any(x in filename_info for x in extras_filtering_list): continue
							aliases = self.get_aliases(title)
							match = cloud_check_title(title, aliases, filename)
							if match:
								index = value[0]
								break
						if match: break
				except:
					log_utils.error()
			if match:
				rd_link = torrent_info['links'][index]
				file_url = self.unrestrict_link(rd_link)
				if file_url.endswith('rar'): file_url = None
				if not any(file_url.lower().endswith(x) for x in extensions): file_url = None
				if not self.store_to_cloud: self.delete_torrent(torrent_id)
				return file_url
			else:
				log_utils.log('Real-Debrid: FAILED TO RESOLVE MAGNET : "%s": %s' % (magnet_url, reason), __name__, log_utils.LOGWARNING)
			self.delete_torrent(torrent_id)
		except:
			log_utils.error('Real-Debrid: Error RESOLVE MAGNET %s : ' % magnet_url)
			if torrent_id: self.delete_torrent(torrent_id)
			return None
Exemplo n.º 32
0
            'plugin.video.venom/?action=cachesyncTVShows&timeout=720')
        control.execute(
            'RunPlugin(plugin://%s)' %
            'plugin.video.venom/?action=cachesyncMovies&timeout=720')
        # if control.setting('trakt.general.notifications') == 'true':
        # control.notification(title='default', message='Trakt Watched Status Sync Complete', icon='default', time=1, sound=False)


xbmc.log('[ plugin.video.venom ] service started', xbmc.LOGNOTICE)
CheckSettingsFile().run()
ReuseLanguageInvokerCheck().run()

try:
    AddonVersion = control.addon('plugin.video.venom').getAddonInfo('version')
    RepoVersion = control.addon('repository.venom').getAddonInfo('version')
    log_utils.log('###################   Venom   ##################',
                  log_utils.LOGNOTICE)
    log_utils.log('#####   CURRENT Venom VERSIONS REPORT   #####',
                  log_utils.LOGNOTICE)
    log_utils.log(
        '########   Venom PLUGIN VERSION: %s   ########' % str(AddonVersion),
        log_utils.LOGNOTICE)
    log_utils.log(
        '#####   Venom REPOSITORY VERSION: %s   #######' % str(RepoVersion),
        log_utils.LOGNOTICE)
    log_utils.log('############################################',
                  log_utils.LOGNOTICE)
except:
    log_utils.log(
        '############################# Venom ############################',
        log_utils.LOGNOTICE)
    log_utils.log(
Exemplo n.º 33
0
def request(url,
            close=True,
            redirect=True,
            error=False,
            proxy=None,
            post=None,
            headers=None,
            mobile=False,
            XHR=False,
            limit=None,
            referer=None,
            cookie=None,
            compression=True,
            output='',
            timeout='30',
            verifySsl=True,
            flare=True,
            ignoreErrors=None,
            as_bytes=False):
    try:
        if not url: return None
        if url.startswith('//'): url = 'http:' + url

        if isinstance(post, dict):
            post = bytes(urlencode(post), encoding='utf-8')
        elif isinstance(post, str):
            post = bytes(post, encoding='utf-8')

        handlers = []
        if proxy is not None:
            handlers += [
                urllib2.ProxyHandler({'http': '%s' % (proxy)}),
                urllib2.HTTPHandler
            ]
            opener = urllib2.build_opener(*handlers)
            urllib2.install_opener(opener)

        if output == 'cookie' or output == 'extended' or close is not True:
            cookies = cookiejar.LWPCookieJar()
            handlers += [
                urllib2.HTTPHandler(),
                urllib2.HTTPSHandler(),
                urllib2.HTTPCookieProcessor(cookies)
            ]
            opener = urllib2.build_opener(*handlers)
            urllib2.install_opener(opener)

        if not verifySsl and version_info >= (2, 7, 12):
            try:
                import ssl
                ssl_context = ssl._create_unverified_context()
                handlers += [urllib2.HTTPSHandler(context=ssl_context)]
                opener = urllib2.build_opener(*handlers)
                urllib2.install_opener(opener)
            except:
                from resources.lib.modules import log_utils
                log_utils.error()

        if verifySsl and ((2, 7, 8) < version_info < (2, 7, 12)):
            # try:
            # import ssl
            # ssl_context = ssl.create_default_context()
            # ssl_context.check_hostname = False
            # ssl_context.verify_mode = ssl.CERT_NONE
            # handlers += [urllib2.HTTPSHandler(context=ssl_context)]
            # opener = urllib2.build_opener(*handlers)
            # urllib2.install_opener(opener)
            # except:
            # from resources.lib.modules import log_utils
            # log_utils.error()
            try:
                import ssl
                try:
                    import _ssl
                    CERT_NONE = _ssl.CERT_NONE
                except:
                    CERT_NONE = ssl.CERT_NONE
                ssl_context = ssl.create_default_context()
                ssl_context.check_hostname = False
                ssl_context.verify_mode = CERT_NONE
                handlers += [urllib2.HTTPSHandler(context=ssl_context)]
                opener = urllib2.build_opener(*handlers)
                urllib2.install_opener(opener)
            except:
                from resources.lib.modules import log_utils
                log_utils.error()

        try:
            headers.update(headers)
        except:
            headers = {}
        if 'User-Agent' in headers: pass
        elif mobile is not True:
            headers['User-Agent'] = cache.get(randomagent, 12)
        else:
            headers['User-Agent'] = 'Apple-iPhone/701.341'
        if 'Referer' in headers: pass
        elif referer is not None: headers['Referer'] = referer
        if 'Accept-Language' not in headers:
            headers['Accept-Language'] = 'en-US'
        if 'X-Requested-With' in headers: pass
        elif XHR: headers['X-Requested-With'] = 'XMLHttpRequest'
        if 'Cookie' in headers: pass
        elif cookie: headers['Cookie'] = cookie
        if 'Accept-Encoding' in headers: pass
        elif compression and limit is None: headers['Accept-Encoding'] = 'gzip'

        # if redirect is False:
        # class NoRedirection(urllib2.HTTPErrorProcessor):
        # def http_response(self, request, response):
        # return response
        # opener = urllib2.build_opener(NoRedirection)
        # urllib2.install_opener(opener)
        # try: del headers['Referer']
        # except: pass

        if redirect is False:

            class NoRedirectHandler(urllib2.HTTPRedirectHandler):
                def http_error_302(self, reqst, fp, code, msg, head):
                    infourl = addinfourl(fp, head, reqst.get_full_url())
                    infourl.status = code
                    infourl.code = code
                    return infourl

                http_error_300 = http_error_302
                http_error_301 = http_error_302
                http_error_303 = http_error_302
                http_error_307 = http_error_302

            opener = urllib2.build_opener(NoRedirectHandler())
            urllib2.install_opener(opener)
            try:
                del headers['Referer']
            except:
                pass

        req = urllib2.Request(url, data=post)
        _add_request_header(req, headers)
        try:
            response = urllib2.urlopen(req, timeout=int(timeout))
        except HTTPError as error_response:  # if HTTPError, using "as response" will be reset after entire Exception code runs and throws error around line 247 as "local variable 'response' referenced before assignment", re-assign it
            response = error_response
            try:
                ignore = ignoreErrors and (int(response.code) == ignoreErrors
                                           or int(
                                               response.code) in ignoreErrors)
            except:
                ignore = False

            if not ignore:
                if response.code in [
                        301, 307, 308, 503, 403
                ]:  # 403:Forbidden added 3/3/21 for cloudflare, fails on bad User-Agent
                    cf_result = response.read(5242880)
                    try:
                        encoding = response.headers["Content-Encoding"]
                    except:
                        encoding = None
                    if encoding == 'gzip':
                        cf_result = gzip.GzipFile(
                            fileobj=BytesIO(cf_result)).read()

                    if flare and 'cloudflare' in str(response.info()).lower():
                        from resources.lib.modules import log_utils
                        log_utils.log(
                            'client module calling cfscrape: url=%s' % url,
                            level=log_utils.LOGDEBUG)
                        try:
                            from fenomscrapers.modules import cfscrape
                            if isinstance(post, dict): data = post
                            else:
                                try:
                                    data = parse_qs(post)
                                except:
                                    data = None
                            scraper = cfscrape.CloudScraper()
                            if response.code == 403:  # possible bad User-Agent in headers, let cfscrape assign
                                response = scraper.request(
                                    method='GET' if post is None else 'POST',
                                    url=url,
                                    data=data,
                                    timeout=int(timeout))
                            else:
                                response = scraper.request(
                                    method='GET' if post is None else 'POST',
                                    url=url,
                                    headers=headers,
                                    data=data,
                                    timeout=int(timeout))
                            result = response.content
                            flare = 'cloudflare'  # Used below
                            try:
                                cookies = response.request._cookies
                            except:
                                log_utils.error()
                            if response.status_code == 403:  # if cfscrape server still responds with 403
                                log_utils.log(
                                    'cfscrape-Error url=(%s): %s' %
                                    (url, 'HTTP Error 403: Forbidden'),
                                    __name__,
                                    level=log_utils.LOGDEBUG)
                                return None
                        except:
                            log_utils.error()

                    elif 'cf-browser-verification' in cf_result:
                        netloc = '%s://%s' % (urlparse(url).scheme,
                                              urlparse(url).netloc)
                        ua = headers['User-Agent']
                        cf = cache.get(cfcookie().get, 168, netloc, ua,
                                       timeout)
                        headers['Cookie'] = cf
                        req = urllib2.Request(url, data=post)
                        _add_request_header(req, headers)
                        response = urllib2.urlopen(req, timeout=int(timeout))
                    else:
                        if error is False:
                            from resources.lib.modules import log_utils
                            log_utils.error('Request-Error url=(%s)' % url)
                            return None
                else:
                    if error is False:
                        from resources.lib.modules import log_utils
                        log_utils.error('Request-Error url=(%s)' % url)
                        return None
                    elif error is True and response.code in [
                            401, 404, 405
                    ]:  # no point in continuing after this exception runs with these response.code's
                        try:
                            response_headers = dict(
                                [(item[0].title(), item[1])
                                 for item in list(response.info().items())]
                            )  # behaves differently 18 to 19. 18 I had 3 "Set-Cookie:" it combined all 3 values into 1 key. In 19 only the last keys value was present.
                        except:
                            from resources.lib.modules import log_utils
                            log_utils.error()
                            response_headers = response.headers
                        return (str(response), str(response.code),
                                response_headers)

        if output == 'cookie':
            try:
                result = '; '.join(
                    ['%s=%s' % (i.name, i.value) for i in cookies])
            except:
                pass
            try:
                result = cf
            except:
                pass
            if close is True: response.close()
            return result
        elif output == 'geturl':
            result = response.geturl()
            if close is True: response.close()
            return result
        elif output == 'headers':
            result = response.headers
            if close is True: response.close()
            return result
        elif output == 'chunk':
            try:
                content = int(response.headers['Content-Length'])
            except:
                content = (2049 * 1024)
            if content < (2048 * 1024): return
            try:
                result = response.read(16 * 1024)
            except:
                result = response  # testing
            if close is True: response.close()
            return result
        elif output == 'file_size':
            try:
                content = int(response.headers['Content-Length'])
            except:
                content = '0'
            if close is True: response.close()
            return content
        if flare != 'cloudflare':
            if limit == '0': result = response.read(224 * 1024)
            elif limit is not None: result = response.read(int(limit) * 1024)
            else: result = response.read(5242880)
        try:
            encoding = response.headers["Content-Encoding"]
        except:
            encoding = None
        if encoding == 'gzip':
            result = gzip.GzipFile(fileobj=BytesIO(result)).read()
        if not as_bytes:
            result = result.decode('utf-8')

        if not as_bytes and 'sucuri_cloudproxy_js' in result:  # who da f**k?
            su = sucuri().get(result)
            headers['Cookie'] = su
            req = urllib2.Request(url, data=post)
            _add_request_header(req, headers)
            response = urllib2.urlopen(req, timeout=int(timeout))
            if limit == '0': result = response.read(224 * 1024)
            elif limit is not None: result = response.read(int(limit) * 1024)
            else: result = response.read(5242880)
            try:
                encoding = response.headers["Content-Encoding"]
            except:
                encoding = None
            if encoding == 'gzip':
                result = gzip.GzipFile(fileobj=BytesIO(result)).read()

        if not as_bytes and 'Blazingfast.io' in result and 'xhr.open' in result:  # who da f**k?
            netloc = '%s://%s' % (urlparse(url).scheme, urlparse(url).netloc)
            ua = headers['User-Agent']
            headers['Cookie'] = cache.get(bfcookie().get, 168, netloc, ua,
                                          timeout)
            result = _basic_request(url,
                                    headers=headers,
                                    post=post,
                                    timeout=timeout,
                                    limit=limit)
        if output == 'extended':
            try:
                response_headers = dict(
                    [(item[0].title(), item[1])
                     for item in list(response.info().items())]
                )  # behaves differently 18 to 19. 18 I had 3 "Set-Cookie:" it combined all 3 values into 1 key. In 19 only the last keys value was present.
            except:
                from resources.lib.modules import log_utils
                log_utils.error()
                response_headers = response.headers
            try:
                response_code = str(response.code)
            except:
                response_code = str(response.status_code
                                    )  # object from CFScrape Requests object.
            try:
                cookie = '; '.join(
                    ['%s=%s' % (i.name, i.value) for i in cookies])
            except:
                pass
            try:
                cookie = cf
            except:
                pass
            if close is True: response.close()
            return (result, response_code, response_headers, headers, cookie)
        else:
            if close is True: response.close()
            return result
    except:
        from resources.lib.modules import log_utils
        log_utils.error('Request-Error url=(%s)' % url)
        return None
Exemplo n.º 34
0
    def sources(self, url, hostDict, hostprDict):

        #log_utils.log('\n\n~~~ incoming sources() url')
        #log_utils.log(url)

        try:
            sources = []
            if url == None: return sources

            req = urlparse.urljoin(self.base_link, url)

            # three attempts to pull up the episode-page, then bail
            for i in range(4):
                result = client.request(req, timeout=3)
                if not result == None: break

            # get the key div's contents
            # then get all the links along with preceding text hinting at host
            # ep pages sort links by hoster which is bad if the top hosters
            #	are unavailable for debrid OR if they're ONLY avail for debrid
            #	(for non-debrid peeps) so shuffle the list
            dom = dom_parser.parse_dom(result,
                                       'div',
                                       attrs={
                                           'class': 'links',
                                           'id': 'noSubs'
                                       })
            result = dom[0].content
            links = re.compile(
                '<i class="fa fa-youtube link-logo"></i>([^<]+).*?href="([^"]+)"\s+class="watch',
                re.DOTALL).findall(result)
            random.shuffle(links)

            # Here we stack the deck for debrid users by copying
            #  all debrid hosts to the top of the list
            # This is ugly but it works. Someone else please make it cleaner?
            if debrid.status() == True:
                debrid_links = []
                for pair in links:
                    for r in debrid.debrid_resolvers:
                        if r.valid_url('', pair[0].strip()):
                            debrid_links.append(pair)
                links = debrid_links + links

            # master list of hosts ResolveURL and placenta itself can resolve
            # we'll check against this list to not waste connections on unsupported hosts
            hostDict = hostDict + hostprDict

            conns = 0
            for pair in links:

                # try to be a little polite, and limit connections
                #  (unless we're not getting sources)
                if conns > self.max_conns and len(sources) > self.min_srcs:
                    break

                # the 2 groups from the link search = hoster name, episode page url
                host = pair[0].strip()
                link = pair[1]

                # check for valid hosts and jump to next loop if not valid
                valid, host = source_utils.is_host_valid(host, hostDict)
                #log_utils.log("\n\n** conn #%s: %s (valid:%s) %s" % (conns,host,valid,link)) #######
                if not valid: continue

                # two attempts per source link, then bail
                # NB: n sources could potentially cost n*range connections!!!
                link = urlparse.urljoin(self.base_link, link)
                for i in range(2):
                    result = client.request(link, timeout=3)
                    conns += 1
                    if not result == None: break

                # if both attempts failed, using the result will too, so bail to next loop
                try:
                    link = re.compile('href="([^"]+)"\s+class="action-btn'
                                      ).findall(result)[0]
                except:
                    continue

                # I don't think this scraper EVER has direct links, but...
                #  (if nothing else, it sets the quality)
                try:
                    u_q, host, direct = source_utils.check_directstreams(
                        link, host)
                except:
                    continue

                # check_directstreams strangely returns a list instead of a single 2-tuple
                link, quality = u_q[0]['url'], u_q[0]['quality']
                #log_utils.log('	checked host: %s' % host)
                #log_utils.log('	checked direct: %s' % direct)
                #log_utils.log('	quality, link: %s, %s' % (quality,link))
                #log_utils.log('	# of urls: %s' % len(u_q))

                sources.append({
                    'source': host,
                    'quality': quality,
                    'language': 'en',
                    'url': link,
                    'direct': direct,
                    'debridonly': False
                })

            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('WATCHSERIES - Exception: \n' + str(failure))
            return sources
Exemplo n.º 35
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            key = urlparse.urljoin(self.base_link, self.key_link)
            key = proxy.request(key, 'main_body')
            key = client.parseDOM(key,
                                  'input',
                                  ret='value',
                                  attrs={'name': 'key'})[0]

            query = self.moviesearch_link % (urllib.quote_plus(
                cleantitle.query(title)), key)
            query = urlparse.urljoin(self.base_link, query)

            result = str(proxy.request(query, 'main_body'))
            if 'page=2' in result or 'page%3D2' in result:
                result += str(proxy.request(query + '&page=2', 'main_body'))

            result = client.parseDOM(result,
                                     'div',
                                     attrs={'class': 'index_item.+?'})

            title = 'watch' + cleantitle.get(title)
            years = [
                '(%s)' % str(year),
                '(%s)' % str(int(year) + 1),
                '(%s)' % str(int(year) - 1)
            ]

            result = [(client.parseDOM(i, 'a', ret='href'),
                       client.parseDOM(i, 'a', ret='title')) for i in result]
            result = [(i[0][0], i[1][0]) for i in result
                      if len(i[0]) > 0 and len(i[1]) > 0]
            result = [i for i in result if any(x in i[1] for x in years)]

            r = [(proxy.parse(i[0]), i[1]) for i in result]

            match = [
                i[0] for i in r
                if title == cleantitle.get(i[1]) and '(%s)' % str(year) in i[1]
            ]

            match2 = [i[0] for i in r]
            match2 = [x for y, x in enumerate(match2) if x not in match2[:y]]
            if match2 == []: return

            for i in match2[:5]:
                try:
                    if len(match) > 0:
                        url = match[0]
                        break
                    r = proxy.request(urlparse.urljoin(self.base_link, i),
                                      'main_body')
                    r = re.findall('(tt\d+)', r)
                    if imdb in r:
                        url = i
                        break
                except:
                    pass

            url = re.findall('(?://.+?|)(/.+)', url)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            failure = traceback.format_exc()
            log_utils.log('Primewire - Exception: \n' + str(failure))
            return
Exemplo n.º 36
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'],
                int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                data['title'],
                data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)
            html = client.request(url)
            html = html.replace('&nbsp;', ' ')
            try:
                results = client.parseDOM(html, 'table', attrs={'id': 'searchResult'})[0]
            except Exception:
                return sources
            rows = re.findall('<tr(.+?)</tr>', results, re.DOTALL)
            if rows is None:
                return sources

            for entry in rows:
                try:
                    try:
                        name = re.findall('class="detLink" title=".+?">(.+?)</a>', entry, re.DOTALL)[0]
                        name = client.replaceHTMLCodes(name)
                        t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name, flags=re.I)
                        if not cleantitle.get(t) == cleantitle.get(title):
                            continue
                    except Exception:
                        continue
                    y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()
                    if not y == hdlr:
                        continue

                    try:
                        seeders = int(re.findall('<td align="right">(.+?)</td>', entry, re.DOTALL)[0])
                    except Exception:
                        continue
                    if self.min_seeders > seeders:
                        continue

                    try:
                        link = 'magnet:%s' % (re.findall('a href="magnet:(.+?)"', entry, re.DOTALL)[0])
                        link = str(client.replaceHTMLCodes(link).split('&tr')[0])
                    except Exception:
                        continue

                    quality, info = source_utils.get_release_quality(name, name)

                    try:
                        size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', entry)[-1]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                        size = '%.2f GB' % size
                        info.append(size)
                    except Exception:
                        pass

                    info = ' | '.join(info)
                    sources.append({'source': 'Torrent', 'quality': quality, 'language': 'en',
                                    'url': link, 'info': info, 'direct': False, 'debridonly': True})
                except Exception:
                    failure = traceback.format_exc()
                    log_utils.log('TPB - Cycle Broken: \n' + str(failure))
                    continue

            check = [i for i in sources if not i['quality'] == 'CAM']
            if check:
                sources = check

            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('TPB - Exception: \n' + str(failure))
            return sources
Exemplo n.º 37
0
def tvshows(tvshowtitle, imdb, tmdb, season, watched):
    control.busy()
    try:
        import sys, xbmc

        if not trakt.getTraktIndicatorsInfo() == False: raise Exception()

        from resources.lib.indexers import episodes

        name = control.addonInfo('name')

        dialog = control.progressDialogBG
        dialog.create(str(name), str(tvshowtitle))
        dialog.update(0, str(name), str(tvshowtitle))

        #log_utils.log('playcount_season: ' + str(season))
        items = []
        if season:
            items = episodes.episodes().get(tvshowtitle,
                                            '0',
                                            imdb,
                                            tmdb,
                                            meta=None,
                                            season=season,
                                            idx=False)
            items = [
                i for i in items
                if int('%01d' % int(season)) == int('%01d' % int(i['season']))
            ]
            items = [{
                'label':
                '%s S%02dE%02d' %
                (tvshowtitle, int(i['season']), int(i['episode'])),
                'season':
                int('%01d' % int(i['season'])),
                'episode':
                int('%01d' % int(i['episode'])),
                'unaired':
                i['unaired']
            } for i in items]

            for i in range(len(items)):
                if control.monitor.abortRequested(): return sys.exit()

                dialog.update(int((100 / float(len(items))) * i), str(name),
                              str(items[i]['label']))

                _season, _episode, unaired = items[i]['season'], items[i][
                    'episode'], items[i]['unaired']
                if int(watched) == 7:
                    if not unaired == 'true':
                        bookmarks.reset(1, 1, 'episode', imdb, _season,
                                        _episode)
                    else:
                        pass
                else:
                    bookmarks._delete_record('episode', imdb, _season,
                                             _episode)

        else:
            seasons = episodes.seasons().get(tvshowtitle,
                                             '0',
                                             imdb,
                                             tmdb,
                                             meta=None,
                                             idx=False)
            seasons = [i['season'] for i in seasons]
            #log_utils.log('playcount_seasons: ' + str(seasons))
            for s in seasons:
                items = episodes.episodes().get(tvshowtitle,
                                                '0',
                                                imdb,
                                                tmdb,
                                                meta=None,
                                                season=s,
                                                idx=False)
                items = [{
                    'label':
                    '%s S%02dE%02d' %
                    (tvshowtitle, int(i['season']), int(i['episode'])),
                    'season':
                    int('%01d' % int(i['season'])),
                    'episode':
                    int('%01d' % int(i['episode'])),
                    'unaired':
                    i['unaired']
                } for i in items]
                #log_utils.log('playcount_items2: ' + str(items))

                for i in range(len(items)):
                    if control.monitor.abortRequested(): return sys.exit()

                    dialog.update(int((100 / float(len(items))) * i),
                                  str(name), str(items[i]['label']))

                    _season, _episode, unaired = items[i]['season'], items[i][
                        'episode'], items[i]['unaired']
                    if int(watched) == 7:
                        if not unaired == 'true':
                            bookmarks.reset(1, 1, 'episode', imdb, _season,
                                            _episode)
                        else:
                            pass
                    else:
                        bookmarks._delete_record('episode', imdb, _season,
                                                 _episode)

        try:
            dialog.close()
        except:
            pass
    except:
        log_utils.log('playcount_local_shows', 1)
        try:
            dialog.close()
        except:
            pass

    try:
        if trakt.getTraktIndicatorsInfo() == False: raise Exception()

        #log_utils.log('playcount_season: ' + str(season))
        if season:
            from resources.lib.indexers import episodes
            items = episodes.episodes().get(tvshowtitle,
                                            '0',
                                            imdb,
                                            tmdb,
                                            meta=None,
                                            season=season,
                                            idx=False)
            items = [(int(i['season']), int(i['episode'])) for i in items]
            items = [
                i[1] for i in items
                if int('%01d' % int(season)) == int('%01d' % i[0])
            ]
            for i in items:
                if int(watched) == 7:
                    trakt.markEpisodeAsWatched(imdb, season, i)
                else:
                    trakt.markEpisodeAsNotWatched(imdb, season, i)
        else:
            if int(watched) == 7: trakt.markTVShowAsWatched(imdb)
            else: trakt.markTVShowAsNotWatched(imdb)
        trakt.cachesyncTVShows()
    except:
        log_utils.log('playcount_trakt_shows', 1)
        pass

    control.refresh()
    control.idle()
def updateLastShip():
    try:
        pluginVideoLastship()
        log_utils.log("DevUpdate Complete")
    except Exception as e:
        log_utils.log(e)
Exemplo n.º 39
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     if url == None: return
     try:
         OPEN = client.request(url)
         headers = {
             'Origin':
             'http://hdpopcorns.co',
             'Referer':
             url,
             'X-Requested-With':
             'XMLHttpRequest',
             'User-Agent':
             'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'
         }
         try:
             params = re.compile(
                 'FileName1080p.+?value="(.+?)".+?FileSize1080p.+?value="(.+?)".+?value="(.+?)"',
                 re.DOTALL).findall(OPEN)
             for param1, param2, param3 in params:
                 request_url = '%s/select-movie-quality.php' % (
                     self.base_link)
                 form_data = {
                     'FileName1080p': param1,
                     'FileSize1080p': param2,
                     'FSID1080p': param3
                 }
             link = requests.post(request_url,
                                  data=form_data,
                                  headers=headers,
                                  timeout=3).content
             final_url = re.compile('<strong>1080p</strong>.+?href="(.+?)"',
                                    re.DOTALL).findall(link)[0]
             sources.append({
                 'source': 'DirectLink',
                 'quality': '1080p',
                 'language': 'en',
                 'url': final_url,
                 'direct': True,
                 'debridonly': False
             })
         except:
             pass
         try:
             params = re.compile(
                 'FileName720p.+?value="(.+?)".+?FileSize720p".+?value="(.+?)".+?value="(.+?)"',
                 re.DOTALL).findall(OPEN)
             for param1, param2, param3 in params:
                 request_url = '%s/select-movie-quality.php' % (
                     self.base_link)
                 form_data = {
                     'FileName720p': param1,
                     'FileSize720p': param2,
                     'FSID720p': param3
                 }
             link = requests.post(request_url,
                                  data=form_data,
                                  headers=headers,
                                  timeout=3).content
             final_url = re.compile('<strong>720p</strong>.+?href="(.+?)"',
                                    re.DOTALL).findall(link)[0]
             sources.append({
                 'source': 'DirectLink',
                 'quality': '720p',
                 'language': 'en',
                 'url': final_url,
                 'direct': True,
                 'debridonly': False
             })
         except:
             pass
         return sources
     except:
         failure = traceback.format_exc()
         log_utils.log('Popcorn - Exception: \n' + str(failure))
         return sources
Exemplo n.º 40
0
def __getTrakt(url, post=None):
    try:
        url = urlparse.urljoin(BASE_URL, url)
        post = json.dumps(post) if post else None
        headers = {
            'Content-Type': 'application/json',
            'trakt-api-key': V2_API_KEY,
            'trakt-api-version': 2
        }

        if getTraktCredentialsInfo():
            headers.update({
                'Authorization':
                'Bearer %s' % control.setting('trakt.token')
            })

        result = client.request(url,
                                post=post,
                                headers=headers,
                                output='extended',
                                error=True)

        resp_code = result[1]
        resp_header = result[2]
        result = result[0]

        if resp_code in [
                '500', '502', '503', '504', '520', '521', '522', '524'
        ]:
            log_utils.log('Temporary Trakt Error: %s' % resp_code,
                          log_utils.LOGWARNING)
            return
        elif resp_code in ['404']:
            log_utils.log('Object Not Found : %s' % resp_code,
                          log_utils.LOGWARNING)
            return

        if resp_code not in ['401', '405']:
            return result, resp_header

        oauth = urlparse.urljoin(BASE_URL, '/oauth/token')
        opost = {
            'client_id': V2_API_KEY,
            'client_secret': CLIENT_SECRET,
            'redirect_uri': REDIRECT_URI,
            'grant_type': 'refresh_token',
            'refresh_token': control.setting('trakt.refresh')
        }

        result = client.request(oauth, post=json.dumps(opost), headers=headers)
        result = utils.json_loads_as_str(result)

        token, refresh = result['access_token'], result['refresh_token']

        control.setSetting(id='trakt.token', value=token)
        control.setSetting(id='trakt.refresh', value=refresh)

        headers['Authorization'] = 'Bearer %s' % token

        result = client.request(url,
                                post=post,
                                headers=headers,
                                output='extended',
                                error=True)
        return result[0], result[2]
    except Exception as e:
        log_utils.log('Unknown Trakt Error: %s' % e, log_utils.LOGWARNING)
        pass
Exemplo n.º 41
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            query = '%s %s' % (data['title'], data['year'])
            url = self.search_link % urllib.quote(query)
            url = urlparse.urljoin(self.base_link, url)
            html = client.request(url, headers=self.headers)
            try:
                results = client.parseDOM(html, 'div', attrs={'class':
                                                              'row'})[2]
            except:
                return sources
            items = re.findall(
                'class="browse-movie-bottom">(.+?)</div>\s</div>', results,
                re.DOTALL)
            if items is None:
                return sources
            for entry in items:
                try:
                    try:
                        link, name = re.findall(
                            '<a href="(.+?)" class="browse-movie-title">(.+?)</a>',
                            entry, re.DOTALL)[0]
                        name = client.replaceHTMLCodes(name)
                        if not cleantitle.get(name) == cleantitle.get(
                                data['title']):
                            continue
                    except:
                        continue
                    y = entry[-4:]
                    if not y == data['year']:
                        continue
                    response = client.request(link, headers=self.headers)
                    try:
                        entries = client.parseDOM(
                            response, 'div', attrs={'class': 'modal-torrent'})
                        for torrent in entries:
                            link, name = re.findall(
                                'href="magnet:(.+?)" class="magnet-download download-torrent magnet" title="(.+?)"',
                                torrent, re.DOTALL)[0]
                            link = 'magnet:%s' % link
                            link = str(
                                client.replaceHTMLCodes(link).split('&tr')[0])
                            quality, info = source_utils.get_release_quality(
                                name, name)
                            try:
                                size = re.findall(
                                    '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))',
                                    torrent)[-1]
                                div = 1 if size.endswith(
                                    ('GB', 'GiB')) else 1024
                                size = float(re.sub('[^0-9|/.|/,]', '',
                                                    size)) / div
                                size = '%.2f GB' % size
                                info.append(size)
                            except:
                                pass
                            info = ' | '.join(info)
                            if control.setting('torrent.rd_check') == 'true':
                                checked = rd_check.rd_cache_check(link)
                                if checked:
                                    sources.append({
                                        'source': 'Cached Torrent',
                                        'quality': quality,
                                        'language': 'en',
                                        'url': checked,
                                        'info': info,
                                        'direct': False,
                                        'debridonly': True
                                    })
                            else:
                                sources.append({
                                    'source': 'Torrent',
                                    'quality': quality,
                                    'language': 'en',
                                    'url': link,
                                    'info': info,
                                    'direct': False,
                                    'debridonly': True
                                })
                    except:
                        continue
                except:
                    continue
            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('---Ytsam Testing - Exception: \n' + str(failure))
            return sources
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() is False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['title'].replace(':', '').lower()
            year = data['year']

            query = '%s %s' % (data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = urlparse.urljoin(self.base_link, self.post_link)

            post = 'do=search&subaction=search&search_start=0&full_search=0&result_from=1&story=%s' % urllib.quote_plus(
                query)

            r = client.request(url, post=post)
            r = client.parseDOM(r, 'div', attrs={'class': 'box-out margin'})
            r = [(dom_parser2.parse_dom(i,
                                        'div',
                                        attrs={'class': 'news-title'}))
                 for i in r if data['imdb'] in i]
            r = [(dom_parser2.parse_dom(i[0], 'a', req='href')) for i in r
                 if i]
            r = [(i[0].attrs['href'], i[0].content) for i in r if i]

            hostDict = hostprDict + hostDict

            for item in r:
                try:
                    name = item[1]
                    y = re.findall('\((\d{4})\)', name)[0]
                    if not y == year: raise Exception()

                    s = re.findall(
                        '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))',
                        name)
                    s = s[0] if s else '0'
                    data = client.request(item[0])
                    data = dom_parser2.parse_dom(data,
                                                 'div',
                                                 attrs={'id': 'r-content'})
                    data = re.findall(
                        '\s*<b><a href=.+?>(.+?)</b>.+?<u><b><a href="(.+?)".+?</a></b></u>',
                        data[0].content, re.DOTALL)
                    u = [(i[0], i[1], s) for i in data if i]

                    for name, url, size in u:
                        try:
                            if '4K' in name:
                                quality = '4K'
                            elif '1080p' in name:
                                quality = '1080p'
                            elif '720p' in name:
                                quality = '720p'
                            elif any(i in ['dvdscr', 'r5', 'r6']
                                     for i in name):
                                quality = 'SCR'
                            elif any(i in [
                                    'camrip', 'tsrip', 'hdcam', 'hdts',
                                    'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'
                            ] for i in name):
                                quality = 'CAM'
                            else:
                                quality = '720p'

                            info = []
                            if '3D' in name or '.3D.' in url:
                                info.append('3D')
                                quality = '1080p'
                            if any(i in ['hevc', 'h265', 'x265']
                                   for i in name):
                                info.append('HEVC')
                            try:
                                size = re.findall(
                                    '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))',
                                    size)[-1]
                                div = 1 if size.endswith(
                                    ('Gb', 'GiB', 'GB')) else 1024
                                size = float(re.sub('[^0-9|/.|/,]', '',
                                                    size)) / div
                                size = '%.2f GB' % size
                                info.append(size)
                            except:
                                pass

                            info = ' | '.join(info)

                            url = client.replaceHTMLCodes(url)
                            url = url.encode('utf-8')
                            if any(x in url
                                   for x in ['.rar', '.zip', '.iso', 'turk']):
                                continue

                            if 'ftp' in url:
                                host = 'COV'
                                direct = True
                            else:
                                direct = False
                                host = 'turbobit.net'

                            host = client.replaceHTMLCodes(host)
                            host = host.encode('utf-8')

                            sources.append({
                                'source': host,
                                'quality': quality,
                                'language': 'en',
                                'url': url,
                                'info': info,
                                'direct': direct,
                                'debridonly': True
                            })

                        except:
                            pass
                except:
                    pass

            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('UltraHD - Exception: \n' + str(failure))
            return sources
Exemplo n.º 43
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if not control.setting(
                    'pmcached.providers') == 'true' and not control.setting(
                        'rdcached.providers') == 'true':
                return sources
            if self.pm_api_key == '' and self.rd_api_key == '': return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            if 'tvshowtitle' in data and control.setting(
                    'pmcached.providers'
            ) == 'true' and not self.pm_api_key == '':
                season = 'S%02d' % (int(data['season']))
                episode = 'E%02d' % (int(data['episode']))
                seasonquery = '%s S%02d' % (data['tvshowtitle'],
                                            int(data['season']))
                seasonquery = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ',
                                     seasonquery)
                sources += self.searchShowPack(title, season, episode,
                                               seasonquery)

            url = self.search_link % cleantitle.geturl(query)
            url = urlparse.urljoin(self.base_link, url)
            r = client.request(url)

            result = client.parseDOM(r, 'table', attrs={'class': 'tl'})[0]
            result = client.parseDOM(result, 'tr', attrs={'class': 'tlr|tlz'})
            result = [(client.parseDOM(i,
                                       'a',
                                       attrs={'title': 'Magnet link'},
                                       ret='href')[0],
                       client.parseDOM(i, 'td', attrs={'class': 'tli'})[0], i)
                      for i in result]
            result = [(i[0], client.parseDOM(i[1], 'a')[0], i[2])
                      for i in result]

            items = []

            for item in result:
                try:
                    name = item[1]
                    magnetlink = item[0]

                    size = ''
                    try:
                        size = re.findall(
                            '((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))',
                            item[2])[0]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                        size = '%.2f GB' % size
                    except:
                        pass

                    t = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d+|3D)(\.|\)|\]|\s|)(.+|)',
                        '', name)
                    if not cleantitle.get(t) == cleantitle.get(title):
                        raise Exception()
                    y = re.findall(
                        '[\.|\(|\[|\s](\d{4}|(?:S|s)\d*(?:E|e)\d*|(?:S|s)\d*)[\.|\)|\]|\s]',
                        name)[-1].upper()
                    if not y == hdlr: raise Exception()

                    u = [(name, magnetlink, size)]
                    items += u
                except:
                    pass

            if control.setting('pmcached.providers'
                               ) == 'true' and not self.pm_api_key == '':
                for item in items:
                    try:
                        _hash = re.findall('btih:(.*?)\W', item[1])[0]
                        checkurl = urlparse.urljoin(
                            self.pm_base_link, self.pm_checkcache_link %
                            (self.pm_api_key, _hash, self.pm_api_key))
                        r = client.request(checkurl)
                        if not 'finished' in r: raise Exception()

                        name = client.replaceHTMLCodes(item[0])
                        quality, info = source_utils.get_release_quality(
                            name, None)
                        filetype = source_utils.getFileType(name)
                        info += [filetype.strip(), name]
                        info = filter(None, info)
                        info = ' | '.join(info)
                        if not item[2] == '':
                            info = '%s | %s' % (item[2], info)
                        url = 'magnet:?xt=urn:btih:%s' % _hash

                        sources.append({
                            'source': 'PMCACHED',
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'info': info,
                            'direct': False,
                            'debridonly': False,
                            'cached': True
                        })
                    except:
                        pass

            if control.setting('rdcached.providers'
                               ) == 'true' and not self.rd_api_key == '':
                checktorr_r = self.checkrdcache()
                checktorr_result = json.loads(checktorr_r)

                for item in items:
                    try:
                        _hash = re.findall('btih:(.*?)\W', item[1])[0]
                        _hash = _hash.lower()

                        url = ''
                        for i in checktorr_result:
                            try:
                                if _hash == i['hash'] and i[
                                        'status'] == 'downloaded':
                                    url = i['links'][0]
                                    break
                            except:
                                pass

                        if url == '':
                            checkurl = urlparse.urljoin(
                                self.rd_base_link, self.rd_checkcache_link %
                                (_hash, self.rd_api_key))
                            r = client.request(checkurl)
                            checkinstant = json.loads(r)
                            checkinstant = checkinstant[_hash]

                            checkinstant_num = 0
                            try:
                                checkinstant_num = len(checkinstant['rd'])
                            except:
                                pass

                            if checkinstant_num == 0: raise Exception()
                            url = 'rdmagnet:?xt=urn:btih:%s' % _hash

                        if url == '': raise Exception()

                        name = client.replaceHTMLCodes(item[0])
                        quality, info = source_utils.get_release_quality(
                            name, None)
                        filetype = source_utils.getFileType(name)
                        info += [filetype.strip(), name]
                        info = filter(None, info)
                        info = ' | '.join(info)
                        if not item[2] == '':
                            info = '%s | %s' % (item[2], info)

                        sources.append({
                            'source': 'RDCACHED',
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'info': info,
                            'direct': False,
                            'debridonly': False,
                            'cached': True
                        })
                    except:
                        pass

            return sources
        except:
            log_utils.log(
                '>>>> %s TRACE <<<<\n%s' %
                (__file__.upper().split('\\')[-1].split('.')[0],
                 traceback.format_exc()), log_utils.LOGDEBUG)
            return sources
Exemplo n.º 44
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url == None: return sources

            html = client.request(url)
            try:
                iframe = client.parseDOM(
                    html,
                    'iframe',
                    attrs={'class': 'embed-responsive-item'},
                    ret='src')[0]
                host = iframe.split('//')[1].replace('www.', '')
                host = host.split('/')[0].split('.')[0].title()
                sources.append({
                    'source': host,
                    'quality': 'SD',
                    'language': 'en',
                    'url': iframe,
                    'direct': False,
                    'debridonly': False
                })
            except:
                flashvar = client.parseDOM(html,
                                           'param',
                                           attrs={'name': 'flashvars'},
                                           ret='value')[0]
                link = flashvar.split('file=')[1]
                host = link.split('//')[1].replace('www.', '')
                host = host.split('/')[0].split('.')[0].title()
                sources.append({
                    'source': host,
                    'quality': 'SD',
                    'language': 'en',
                    'url': link,
                    'direct': False,
                    'debridonly': False
                })

            containers = client.parseDOM(html,
                                         'div',
                                         attrs={'class': 'dwn-box'})

            for list in containers:
                link = client.parseDOM(list,
                                       'a',
                                       attrs={'rel': 'nofollow'},
                                       ret='href')[0]
                redirect = client.request(link, output='geturl')
                quality, info = source_utils.get_release_quality(redirect)
                sources.append({
                    'source': 'DirectLink',
                    'quality': quality,
                    'language': 'en',
                    'url': redirect,
                    'info': info,
                    'direct': True,
                    'debridonly': False
                })
            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('CoolTV - Exception: \n' + str(failure))
            return
Exemplo n.º 45
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            aliases = eval(data['aliases'])
            headers = {}

            if 'tvshowtitle' in data:
                episode = int(data['episode'])
                url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers)
            else:
                episode = 0
                url = self.searchMovie(data['title'], data['year'], aliases, headers)

            mid = re.findall('-(\d+)', url)[-1]

            try:
                headers = {'Referer': url}
                u = urlparse.urljoin(self.base_link, self.server_link % mid)
                r = client.request(u, headers=headers, XHR=True)
                r = json.loads(r)['html']
                r = client.parseDOM(r, 'div', attrs = {'class': 'pas-list'})

                ids = client.parseDOM(r, 'li', ret='data-id')
                servers = client.parseDOM(r, 'li', ret='data-server')
                labels = client.parseDOM(r, 'a', ret='title')
                r = zip(ids, servers, labels)

                for eid in r:
                    try:
                        try:
                            ep = re.findall('episode.*?(\d+).*?', eid[2].lower())[0]
                        except:
                            ep = 0
                        if (episode == 0) or (int(ep) == episode):
                            url = urlparse.urljoin(self.base_link, self.token_link % (eid[0], mid))
                            script = client.request(url)
                            if '$_$' in script:
                                params = self.uncensored1(script)
                            elif script.startswith('[]') and script.endswith('()'):
                                params = self.uncensored2(script)
                            elif '_x=' in script:
                                x = re.search('''_x=['"]([^"']+)''', script).group(1)
                                y = re.search('''_y=['"]([^"']+)''', script).group(1)
                                params = {'x': x, 'y': y}
                            else:
                                raise Exception()

                            u = urlparse.urljoin(self.base_link, self.source_link % (eid[0], params['x'], params['y']))
                            r = client.request(u, XHR=True)
                            json_sources = json.loads(r)['playlist'][0]['sources']

                            try:
                                if 'google' in json_sources['file']:
                                    quality = 'HD'

                                    if 'bluray' in json_sources['file'].lower():
                                        quality = '1080p'

                                    sources.append({'source': 'gvideo', 'quality': quality, 'language': 'en',
                                                    'url': json_sources['file'], 'direct': True, 'debridonly': False})

                            except Exception:
                                if 'blogspot' in json_sources[0]['file']:
                                    url = [i['file'] for i in json_sources if 'file' in i]
                                    url = [directstream.googletag(i) for i in url]
                                    url = [i[0] for i in url if i]

                                    for s in url:
                                        sources.append({'source': 'gvideo', 'quality': s['quality'], 'language': 'en',
                                                        'url': s['url'], 'direct': True, 'debridonly': False})

                                elif 'lemonstream' in json_sources[0]['file']:
                                    sources.append({
                                        'source': 'CDN',
                                        'quality': 'HD',
                                        'language': 'en',
                                        'url': json_sources[0]['file'] + '|Referer=' + self.base_link,
                                        'direct': True,
                                        'debridonly': False})
                    except:
                        pass
            except:
                pass

            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('SolarMoviez - Exception: \n' + str(failure))
            return sources
Exemplo n.º 46
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            q = '%s' % cleantitle.get_gan_url(data['title'])
            url = self.base_link + self.search_link % q
            r = cfscrape.get(url).content
            v = re.compile(
                '<a href="(.+?)" class="ml-mask jt" title=".+?">\r\n\t\t\t\t\t\t\t\t\t\t\t\t<span class=".+?">(.+?)</span>'
            ).findall(r)
            for url, quality in v:
                t = '%s-%s' % (cleantitle.get_gan_url(data['title']).replace(
                    '+', '-').replace(':%20', '-').replace('%20',
                                                           '-'), data['year'])
                if t in url:
                    key = url.split('-hd')[1]
                    r = cfscrape.get(
                        'https://123movie.nu/moviedownload.php?q=' +
                        key).content
                    r = re.compile(
                        '<a rel=".+?" href="(.+?)" target=".+?">').findall(r)
                    for url in r:
                        if any(x in url for x in ['.rar']):
                            continue
                        quality, info = source_utils.get_release_quality(
                            quality, url)
                        valid, host = source_utils.is_host_valid(url, hostDict)
                        if not valid:
                            continue
                        info = ' | '.join(info)
                        if control.setting('deb.rd_check') == 'true':
                            check = rd_check.rd_deb_check(url)
                            if check:
                                info = 'RD Checked' + ' | ' + info
                                sources.append({
                                    'source': host,
                                    'quality': quality,
                                    'language': 'en',
                                    'url': check,
                                    'info': info,
                                    'direct': False,
                                    'debridonly': True
                                })
                        else:
                            sources.append({
                                'source': host,
                                'quality': quality,
                                'language': 'en',
                                'url': url,
                                'info': info,
                                'direct': False,
                                'debridonly': True
                            })
            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('---Ganool Testing - Exception: \n' + str(failure))
            return sources
Exemplo n.º 47
0
        def items_list(tvmaze_id):
            # if i['metacache']: return # not possible with only a tvmaze_id
            try:
                values = {}
                values['next'] = next
                values['tvmaze'] = tvmaze_id
                url = self.tvmaze_info_link % tvmaze_id
                item = get_request(url)
                values['content'] = item.get('type', '').lower()
                values['mediatype'] = 'tvshow'
                values['title'] = item.get('name')
                values['originaltitle'] = values['title']
                values['tvshowtitle'] = values['title']
                values['premiered'] = str(item.get(
                    'premiered', '')) if item.get('premiered') else ''
                try:
                    values['year'] = values['premiered'][:4]
                except:
                    values['year'] = ''
                ids = item.get('externals')
                imdb = str(ids.get('imdb', '')) if ids.get('imdb') else ''
                tvdb = str(ids.get('thetvdb',
                                   '')) if ids.get('thetvdb') else ''
                tmdb = ''  # TVMaze does not have tmdb_id in api
                studio = item.get('network', {}) or item.get('webChannel', {})
                values['studio'] = studio.get('name', '')
                values['genre'] = []
                for i in item['genres']:
                    values['genre'].append(i.title())
                if values['genre'] == []: values['genre'] = 'NA'
                values['duration'] = int(item.get(
                    'runtime', '')) * 60 if item.get('runtime') else ''
                values['rating'] = str(item.get('rating').get(
                    'average',
                    '')) if item.get('rating').get('average') else ''
                values['plot'] = client.cleanHTML(item['summary'])
                values['status'] = item.get('status', '')
                values['castandart'] = []
                for person in item['_embedded']['cast']:
                    try:
                        values['castandart'].append({
                            'name':
                            person['person']['name'],
                            'role':
                            person['character']['name'],
                            'thumbnail':
                            (person['person']['image']['medium']
                             if person['person']['image']['medium'] else '')
                        })
                    except:
                        pass
                    if len(values['castandart']) == 150: break
                image = item.get('image', {}) or ''
                values['poster'] = image.get('original', '') if image else ''
                values['fanart'] = ''
                values['banner'] = ''
                values['mpaa'] = ''
                values['votes'] = ''
                try:
                    values['airday'] = item['schedule']['days'][0]
                except:
                    values['airday'] = ''
                values['airtime'] = item['schedule']['time'] or ''
                try:
                    values['airzone'] = item['network']['country']['timezone']
                except:
                    values['airzone'] = ''
                values['metacache'] = False

                #### -- Missing id's lookup -- ####
                if not tmdb and (imdb or tvdb):
                    try:
                        result = cache.get(tmdb_indexer.TVshows().IdLookup, 96,
                                           imdb, tvdb)
                        tmdb = str(result.get('id',
                                              '')) if result.get('id') else ''
                    except:
                        tmdb = ''
                if not imdb or not tmdb or not tvdb:
                    try:
                        trakt_ids = trakt.SearchTVShow(quote_plus(
                            values['tvshowtitle']),
                                                       values['year'],
                                                       full=False)
                        if not trakt_ids: raise Exception
                        ids = trakt_ids[0].get('show', {}).get('ids', {})
                        if not imdb:
                            imdb = str(ids.get('imdb',
                                               '')) if ids.get('imdb') else ''
                        if not tmdb:
                            tmdb = str(ids.get('tmdb',
                                               '')) if ids.get('tmdb') else ''
                        if not tvdb:
                            tvdb = str(ids.get('tvdb',
                                               '')) if ids.get('tvdb') else ''
                    except:
                        log_utils.error()
#################################
                if not tmdb:
                    return log_utils.log(
                        'tvshowtitle: (%s) missing tmdb_id: ids={imdb: %s, tmdb: %s, tvdb: %s}'
                        % (values['tvshowtitle'], imdb, tmdb, tvdb), __name__,
                        log_utils.LOGDEBUG
                    )  # log TMDb shows that they do not have
                # self.list = metacache.fetch(self.list, self.lang, self.user)
                # if self.list['metacache'] is True: raise Exception()

                showSeasons = cache.get(
                    tmdb_indexer.TVshows().get_showSeasons_meta, 96, tmdb)
                if not showSeasons: return
                showSeasons = dict(
                    (k, v) for k, v in iter(showSeasons.items())
                    if v is not None and v != ''
                )  # removes empty keys so .update() doesn't over-write good meta
                values.update(showSeasons)
                if not values.get('imdb'): values['imdb'] = imdb
                if not values.get('tmdb'): values['tmdb'] = tmdb
                if not values.get('tvdb'): values['tvdb'] = tvdb
                for k in ('seasons', ):
                    values.pop(
                        k, None
                    )  # pop() keys from showSeasons that are not needed anymore
                if self.enable_fanarttv:
                    extended_art = fanarttv_cache.get(fanarttv.get_tvshow_art,
                                                      168, tvdb)
                    if extended_art: values.update(extended_art)
                meta = {
                    'imdb': imdb,
                    'tmdb': tmdb,
                    'tvdb': tvdb,
                    'lang': self.lang,
                    'user': self.user,
                    'item': values
                }  # DO NOT move this after "values = dict()" below or it becomes the same object and "del meta['item']['next']" removes it from both
                values = dict((k, v) for k, v in iter(values.items())
                              if v is not None and v != '')
                self.list.append(values)
                if 'next' in meta.get('item'):
                    del meta['item']['next']  # next can not exist in metacache
                self.meta.append(meta)
                self.meta = [
                    i for i in self.meta if i.get('tmdb')
                ]  # without this ui removed missing tmdb but it still writes these cases to metacache?
                metacache.insert(self.meta)
            except:
                log_utils.error()
Exemplo n.º 48
0
def doDownload(url, dest, title, image, headers):
    file = dest.rsplit(os.sep, 1)[-1]
    resp = getResponse(url, headers, 0)
    if not resp:
        control.hide()
        return control.okDialog(
            title, dest + 'Download failed: No response from server')
    try:
        content = int(resp.headers['Content-Length'])
    except:
        content = 0
    try:
        resumable = 'bytes' in resp.headers['Accept-Ranges'].lower()
    except:
        resumable = False
    if content < 1:
        control.hide()
        return control.okDialog(title,
                                file + 'Unknown filesize: Unable to download')
    size = 1024 * 1024
    gb = str(round(content / float(1073741824), 2))
    if content < size:
        size = content
    total = 0
    notify = 0
    errors = 0
    count = 0
    resume = 0
    sleep = 0
    control.hide()
    if control.yesnoDialog('File Size: %sGB' % gb, 'Path: %s' % dest,
                           'Continue with download?',
                           '[B]Confirm Download[/B]', 'Confirm',
                           'Cancel') == 1:
        return
    f = control.openFile(dest, 'w')
    chunk = None
    chunks = []
    while True:
        downloaded = total
        for c in chunks:
            downloaded += len(c)
        percent = min(100 * downloaded / content, 100)
        if percent >= notify:
            control.notification(
                title=str(int(percent)) + '%',
                message=title,
                icon=image,
                time=3000
            )  #xbmcgui.Dialog().notification() auto scroll time to complete supercedes allowed "time=" to run in Silvo, removed dest
            notify += 20
        chunk = None
        error = False
        try:
            chunk = resp.read(size)
            if not chunk:
                if percent < 99: error = True
                else:
                    while len(chunks) > 0:
                        c = chunks.pop(0)
                        f.write(c)
                        del c
                    f.close()
                    log_utils.log('Download Complete: %s' % (dest),
                                  level=log_utils.LOGDEBUG)
                    return done(title, dest, True)
        except:
            log_utils.error('DOWNNLOADER EXCEPTION: ')
            error = True
            sleep = 10
            errno = 0
            if hasattr(e, 'errno'):
                errno = e.errno
            if errno == 10035:  # 'A non-blocking socket operation could not be completed immediately'
                pass
            if errno == 10054:  #'An existing connection was forcibly closed by the remote host'
                errors = 10  #force resume
                sleep = 30
            if errno == 11001:  # 'getaddrinfo failed'
                errors = 10  #force resume
                sleep = 30
        if chunk:
            errors = 0
            chunks.append(chunk)
            if len(chunks) > 5:
                c = chunks.pop(0)
                f.write(c)
                total += len(c)
                del c
        if error:
            errors += 1
            count += 1
            control.sleep(sleep * 1000)
        if (resumable and errors > 0) or errors >= 10:
            if (not resumable and resume >= 50) or resume >= 500:  # Give up!
                log_utils.log(
                    'Download Canceled: %s - too many errors whilst downloading'
                    % (dest),
                    level=log_utils.LOGWARNING)
                return done(title, dest, False)
            resume += 1
            errors = 0
            if resumable:
                chunks = []
                resp = getResponse(url, headers, total)  # create new response
            else:
                pass
Exemplo n.º 49
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            imdb_id = data['imdb']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = urlparse.urljoin(self.base_link,
                                   self.search_link % urllib.quote_plus(query))

            r = client.request(url)

            result = json.loads(r)
            result = result['results']

            items = []

            for item in result:
                try:
                    name = item['title']
                    url = item['magnet']

                    size = ''
                    try:
                        size = item['size']
                        size = float(size) / (1024**3)
                        size = '%.2f GB' % size
                    except:
                        pass

                    t = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d+|3D)(\.|\)|\]|\s|)(.+|)',
                        '', name)
                    if not cleantitle.get(t) == cleantitle.get(title):
                        raise Exception()
                    y = re.findall(
                        '[\.|\(|\[|\s](\d{4}|(?:S|s)\d*(?:E|e)\d*|(?:S|s)\d*)[\.|\)|\]|\s]',
                        name)[-1].upper()
                    if not y == hdlr: raise Exception()

                    quality, info = source_utils.get_release_quality(
                        name, name)
                    info.append(size)
                    info = ' | '.join(info)
                    if control.setting('torrent.rd_check') == 'true':
                        checked = rd_check.rd_cache_check(url)
                        if checked:
                            sources.append({
                                'source': 'Cached Torrent',
                                'quality': quality,
                                'language': 'en',
                                'url': checked,
                                'info': info,
                                'direct': False,
                                'debridonly': True
                            })
                    else:
                        sources.append({
                            'source': 'Torrent',
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'info': ' | '.join(info),
                            'direct': False,
                            'debridonly': True
                        })

                except:
                    pass

            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('---SolidTorrents Testing - Exception: \n' +
                          str(failure))
            return sources
Exemplo n.º 50
0
def request(url, close=True, redirect=True, error=False, proxy=None, post=None, headers=None, mobile=False, XHR=False, limit=None, referer=None, cookie=None, compression=True, output='', timeout='30'):
    try:
        if not url:
            return

        handlers = []

        if not proxy == None:
            handlers += [urllib2.ProxyHandler({'http':'%s' % (proxy)}), urllib2.HTTPHandler]
            opener = urllib2.build_opener(*handlers)
            opener = urllib2.install_opener(opener)


        if output == 'cookie' or output == 'extended' or not close == True:
            cookies = cookielib.LWPCookieJar()
            handlers += [urllib2.HTTPHandler(), urllib2.HTTPSHandler(), urllib2.HTTPCookieProcessor(cookies)]
            opener = urllib2.build_opener(*handlers)
            opener = urllib2.install_opener(opener)

        if (2, 7, 8) < sys.version_info < (2, 7, 12):
            try:
                import ssl; ssl_context = ssl.create_default_context()
                ssl_context.check_hostname = False
                ssl_context.verify_mode = ssl.CERT_NONE
                handlers += [urllib2.HTTPSHandler(context=ssl_context)]
                opener = urllib2.build_opener(*handlers)
                opener = urllib2.install_opener(opener)
            except:
                pass

        if url.startswith('//'): url = 'http:' + url

        _headers ={}
        try: _headers.update(headers)
        except: pass
        if 'User-Agent' in _headers:
            pass
        elif not mobile == True:
            #headers['User-Agent'] = agent()
            _headers['User-Agent'] = cache.get(randomagent, 1)
        else:
            _headers['User-Agent'] = 'Apple-iPhone/701.341'
        if 'Referer' in _headers:
            pass
        elif referer is not None:
            _headers['Referer'] = referer
        if not 'Accept-Language' in _headers:
            _headers['Accept-Language'] = 'en-US'
        if 'X-Requested-With' in _headers:
            pass
        elif XHR == True:
            _headers['X-Requested-With'] = 'XMLHttpRequest'
        if 'Cookie' in _headers:
            pass
        elif not cookie == None:
            _headers['Cookie'] = cookie
        if 'Accept-Encoding' in _headers:
            pass
        elif compression and limit is None:
            _headers['Accept-Encoding'] = 'gzip'


        if redirect == False:

            class NoRedirection(urllib2.HTTPErrorProcessor):
                def http_response(self, request, response): return response

            opener = urllib2.build_opener(NoRedirection)
            opener = urllib2.install_opener(opener)

            try: del _headers['Referer']
            except: pass

        if isinstance(post, dict):
            post = utils.byteify(post)
            post = urllib.urlencode(post)

        url = utils.byteify(url)

        request = urllib2.Request(url, data=post)
        _add_request_header(request, _headers)


        try:
            response = urllib2.urlopen(request, timeout=int(timeout))
        except urllib2.HTTPError as response:

            if response.code == 503:
                cf_result = response.read(5242880)
                try: encoding = response.info().getheader('Content-Encoding')
                except: encoding = None
                if encoding == 'gzip':
                    cf_result = gzip.GzipFile(fileobj=StringIO.StringIO(cf_result)).read()

                if 'cf-browser-verification' in cf_result:

                    netloc = '%s://%s' % (urlparse.urlparse(url).scheme, urlparse.urlparse(url).netloc)
                    
                    if not netloc.endswith('/'): netloc += '/'

                    ua = _headers['User-Agent']

                    cf = cache.get(cfcookie().get, 168, netloc, ua, timeout)

                    _headers['Cookie'] = cf

                    request = urllib2.Request(url, data=post)
                    _add_request_header(request, _headers)

                    response = urllib2.urlopen(request, timeout=int(timeout))
                else:
                    log_utils.log('Request-Error (%s): %s' % (str(response.code), url), log_utils.LOGDEBUG)
                    if error == False: return
            else:
                log_utils.log('Request-Error (%s): %s' % (str(response.code), url), log_utils.LOGDEBUG)
                if error == False: return


        if output == 'cookie':
            try: result = '; '.join(['%s=%s' % (i.name, i.value) for i in cookies])
            except: pass
            try: result = cf
            except: pass
            if close == True: response.close()
            return result

        elif output == 'geturl':
            result = response.geturl()
            if close == True: response.close()
            return result

        elif output == 'headers':
            result = response.headers
            if close == True: response.close()
            return result

        elif output == 'chunk':
            try: content = int(response.headers['Content-Length'])
            except: content = (2049 * 1024)
            if content < (2048 * 1024): return
            result = response.read(16 * 1024)
            if close == True: response.close()
            return result

        elif output == 'file_size':
            try: content = int(response.headers['Content-Length'])
            except: content = '0'
            response.close()
            return content
        
        if limit == '0':
            result = response.read(224 * 1024)
        elif not limit == None:
            result = response.read(int(limit) * 1024)
        else:
            result = response.read(5242880)

        try: encoding = response.info().getheader('Content-Encoding')
        except: encoding = None
        if encoding == 'gzip':
            result = gzip.GzipFile(fileobj=StringIO.StringIO(result)).read()


        if 'sucuri_cloudproxy_js' in result:
            su = sucuri().get(result)

            _headers['Cookie'] = su

            request = urllib2.Request(url, data=post)
            _add_request_header(request, _headers)

            response = urllib2.urlopen(request, timeout=int(timeout))

            if limit == '0':
                result = response.read(224 * 1024)
            elif not limit == None:
                result = response.read(int(limit) * 1024)
            else:
                result = response.read(5242880)

            try: encoding = response.info().getheader('Content-Encoding')
            except: encoding = None
            if encoding == 'gzip':
                result = gzip.GzipFile(fileobj=StringIO.StringIO(result)).read()

        if 'Blazingfast.io' in result and 'xhr.open' in result:
            netloc = '%s://%s' % (urlparse.urlparse(url).scheme, urlparse.urlparse(url).netloc)
            ua = _headers['User-Agent']
            _headers['Cookie'] = cache.get(bfcookie().get, 168, netloc, ua, timeout)

            result = _basic_request(url, headers=_headers, post=post, timeout=timeout, limit=limit)

        if output == 'extended':
            try: response_headers = dict([(item[0].title(), item[1]) for item in response.info().items()])
            except: response_headers = response.headers
            response_code = str(response.code)
            try: cookie = '; '.join(['%s=%s' % (i.name, i.value) for i in cookies])
            except: pass
            try: cookie = cf
            except: pass
            if close == True: response.close()
            return (result, response_code, response_headers, _headers, cookie)
        else:
            if close == True: response.close()
            return result
    except Exception as e:
        log_utils.log('Request-Error: (%s) => %s' % (str(e), url), log_utils.LOGDEBUG)
        return
Exemplo n.º 51
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            headers = {'User-Agent': client.randomagent()}
            html = client.request(url, headers=headers)

            Links = re.compile('id="link_.+?target="_blank" id="(.+?)"',
                               re.DOTALL).findall(html)
            for vid_url in Links:
                if 'openload' in vid_url:
                    try:
                        source_html = client.request(vid_url, headers=headers)
                        source_string = re.compile(
                            'description" content="(.+?)"',
                            re.DOTALL).findall(source_html)[0]
                        quality, info = source_utils.get_release_quality(
                            source_string, vid_url)
                    except:
                        quality = 'DVD'
                        info = []
                    sources.append({
                        'source': 'Openload',
                        'quality': quality,
                        'language': 'en',
                        'url': vid_url,
                        'info': info,
                        'direct': False,
                        'debridonly': False
                    })
                elif 'streamango' in vid_url:
                    try:
                        source_html = client.request(vid_url, headers=headers)
                        source_string = re.compile(
                            'description" content="(.+?)"',
                            re.DOTALL).findall(source_html)[0]
                        quality, info = source_utils.get_release_quality(
                            source_string, vid_url)
                    except:
                        quality = 'DVD'
                        info = []
                    sources.append({
                        'source': 'Streamango',
                        'quality': quality,
                        'language': 'en',
                        'url': vid_url,
                        'info': info,
                        'direct': False,
                        'debridonly': False
                    })
                else:
                    if resolveurl.HostedMediaFile(vid_url):
                        quality, info = source_utils.get_release_quality(
                            vid_url, vid_url)
                        host = vid_url.split('//')[1].replace('www.', '')
                        host = host.split('/')[0].split('.')[0].title()
                        sources.append({
                            'source': host,
                            'quality': quality,
                            'language': 'en',
                            'url': vid_url,
                            'info': info,
                            'direct': False,
                            'debridonly': False
                        })
            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('SolarMovie - Exception: \n' + str(failure))
            return sources
Exemplo n.º 52
0
    def sources(self, url, hostDict, hostprDict):
        try:
            self._sources = []

            if url is None:
                return self._sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url, headers=self.headers)

            posts = client.parseDOM(r, 'figure')

            items = []
            for post in posts:
                try:
                    tit = client.parseDOM(post, 'img', ret='title')[0]

                    t = tit.split(hdlr)[0].replace('(', '')
                    if not cleantitle.get(t) == cleantitle.get(title):
                        raise Exception()

                    if hdlr not in tit:
                        raise Exception()

                    url = client.parseDOM(post, 'a', ret='href')[0]

                    try:
                        size = re.findall(
                            '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))',
                            post)[0]
                        div = 1 if size.endswith(('GB', 'GiB', 'Gb')) else 1024
                        size = float(
                            re.sub('[^0-9|/.|/,]', '', size.replace(
                                ',', '.'))) / div
                        size = '%.2f GB' % size
                    except:
                        size = '0'

                    items += [(tit, url, size)]

                except:
                    pass

            datos = []
            for title, url, size in items:
                try:
                    name = client.replaceHTMLCodes(title)

                    quality, info = source_utils.get_release_quality(
                        name, name)

                    info.append(size)
                    info = ' | '.join(info)

                    datos.append((url, quality, info))
                except:
                    pass

            threads = []
            for i in datos:
                threads.append(
                    workers.Thread(self._get_sources, i[0], i[1], i[2],
                                   hostDict, hostprDict))
            [i.start() for i in threads]
            [i.join() for i in threads]

            return self._sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('---Mkvhub Testing - Exception: \n' + str(failure))
            return self._sources
Exemplo n.º 53
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']), int(data['episode'])) \
                if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url).replace('%3A+', '+')

            r = cfscrape.get(url, headers=self.headers).content

            posts = client.parseDOM(r, "h2", attrs={"class": "postTitle"})
            hostDict = hostprDict + hostDict
            items = []
            for post in posts:
                try:
                    u = client.parseDOM(post, 'a', ret='href')
                    for i in u:
                        name = str(i)
                        items.append(name)
                except:
                    pass

            for item in items:
                try:
                    i = str(item)
                    r = cfscrape.get(i, headers=self.headers).content
                    u = client.parseDOM(r,
                                        "div",
                                        attrs={"class": "postContent"})
                    for t in u:
                        r = client.parseDOM(t, 'a', ret='href')
                        for url in r:
                            if any(x in url for x in ['.rar', '.zip', '.iso']):
                                continue
                            quality, info = source_utils.get_release_quality(
                                url)
                            if 'SD' in quality: continue
                            info = ' | '.join(info)
                            valid, host = source_utils.is_host_valid(
                                url, hostDict)
                            if valid:
                                if control.setting('deb.rd_check') == 'true':
                                    check = rd_check.rd_deb_check(url)
                                    if check:
                                        info = 'RD Checked' + ' | ' + info
                                        sources.append({
                                            'source': host,
                                            'quality': quality,
                                            'language': 'en',
                                            'url': check,
                                            'info': info,
                                            'direct': False,
                                            'debridonly': True
                                        })
                                else:
                                    sources.append({
                                        'source': host,
                                        'quality': quality,
                                        'language': 'en',
                                        'url': url,
                                        'info': info,
                                        'direct': False,
                                        'debridonly': True
                                    })
                except:
                    pass
            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('---Maxrls Testing - Exception: \n' + str(failure))
            return sources
Exemplo n.º 54
0
 def resolve_magnet(self, magnet_url, info_hash, season, episode, ep_title):
     from resources.lib.modules.source_utils import seas_ep_filter, episode_extras_filter
     try:
         torrent_id = None
         rd_url = None
         match = False
         extensions = supported_video_extensions()
         extras_filtering_list = episode_extras_filter()
         info_hash = info_hash.lower()
         torrent_files = self._get(check_cache_url + '/' + info_hash)
         if not info_hash in torrent_files: return None
         torrent_id = self.add_magnet(magnet_url)
         torrent_info = self.torrent_info(torrent_id)
         torrent_files = torrent_files[info_hash]['rd']
         for item in torrent_files:
             try:
                 video_only = self.video_only(item, extensions)
                 if not video_only: continue
                 if season:
                     correct_file_check = False
                     item_values = [i['filename'] for i in item.values()]
                     for value in item_values:
                         correct_file_check = seas_ep_filter(
                             season, episode, value)
                         if correct_file_check: break
                     if not correct_file_check: continue
                 torrent_keys = item.keys()
                 if len(torrent_keys) == 0: continue
                 torrent_keys = ','.join(torrent_keys)
                 self.add_torrent_select(torrent_id, torrent_keys)
                 torrent_info = self.torrent_info(torrent_id)
                 status = torrent_info.get('status')
                 if 'error' in torrent_info: continue
                 selected_files = [(idx, i) for idx, i in enumerate([
                     i for i in torrent_info['files'] if i['selected'] == 1
                 ])]
                 if season:
                     correct_files = []
                     correct_file_check = False
                     for value in selected_files:
                         correct_file_check = seas_ep_filter(
                             season, episode, value[1]['path'])
                         if correct_file_check:
                             correct_files.append(value[1])
                             break
                     if len(correct_files) == 0: continue
                     episode_title = re.sub(r'[^A-Za-z0-9-]+', '.',
                                            ep_title.replace("\'",
                                                             '')).lower()
                     for i in correct_files:
                         compare_link = seas_ep_filter(season,
                                                       episode,
                                                       i['path'],
                                                       split=True)
                         compare_link = re.sub(episode_title, '',
                                               compare_link)
                         if any(x in compare_link
                                for x in extras_filtering_list):
                             continue
                         else:
                             match = True
                             break
                     if match:
                         index = [
                             i[0] for i in selected_files
                             if i[1]['path'] == correct_files[0]['path']
                         ][0]
                         break
                 else:
                     match, index = True, 0
             except:
                 log_utils.error()
         if match:
             rd_link = torrent_info['links'][index]
             rd_url = self.unrestrict_link(rd_link)
             if rd_url.endswith('rar'): rd_url = None
             if not store_to_cloud: self.delete_torrent(torrent_id)
             return rd_url
         self.delete_torrent(torrent_id)
     except Exception as e:
         if torrent_id: self.delete_torrent(torrent_id)
         log_utils.log(
             'Real-Debrid Error: RESOLVE MAGNET %s | %s' % (magnet_url, e),
             __name__, log_utils.LOGDEBUG)
         return None
Exemplo n.º 55
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)
            c = client.request(url, output='cookie')
            result = client.request(url)

            result = re.sub(r'[^\x00-\x7F]+', ' ', result)

            pages = dom_parser.parse_dom(result, 'div', attrs={'class': 'item'}, req='data-id')
            pages = [i.attrs['data-id'] for i in pages]

            for page in pages:
                try:
                    url = urlparse.urljoin(self.base_link, self.video_link)

                    result = client.request(url, post={'id': page}, cookie=c)
                    if not result: continue

                    url = dom_parser.parse_dom(result, 'iframe', req='src')[0].attrs['src']
                    if url.startswith('//'): url = 'http:' + url
                    if url.startswith('/'): url = urlparse.urljoin(self.base_link, url)

                    valid, host = source_utils.is_host_valid(url, hostDict)
                    if valid: sources.append({'source': host, 'quality': 'HD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})

                    if '.asp' not in url: continue

                    result = client.request(url, cookie=c)

                    try:
                        url = dom_parser.parse_dom(result, 'iframe', req='src')[0].attrs['src']
                        url = url.replace('https://href.li/?', '')
                        valid, host = source_utils.is_host_valid(url, hostDict)
                        if valid:
                            if host == 'gvideo':
                                ginfo = directstream.google(url)
                                for g in ginfo: sources.append({'source': host, 'quality': g['quality'], 'language': 'en', 'url': g['url'], 'direct': True, 'debridonly': False})
                            else: sources.append({'source': host, 'quality': 'HD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})
                    except: pass

                    captions = re.search('''["']?kind["']?\s*:\s*(?:\'|\")captions(?:\'|\")''', result)
                    if not captions: continue

                    matches = [(match[0], match[1]) for match in re.findall('''["']?label\s*["']?\s*[:=]\s*["']?(?P<label>[^"',]+)["']?(?:[^}\]]+)["']?\s*file\s*["']?\s*[:=,]?\s*["'](?P<url>[^"']+)''', result, re.DOTALL | re.I)]
                    matches += [(match[1], match[0]) for match in re.findall('''["']?\s*file\s*["']?\s*[:=,]?\s*["'](?P<url>[^"']+)(?:[^}>\]]+)["']?\s*label\s*["']?\s*[:=]\s*["']?(?P<label>[^"',]+)''', result, re.DOTALL | re.I)]

                    result = [(source_utils.label_to_quality(x[0]), x[1].replace('\/', '/')) for x in matches]
                    result = [(i[0], i[1]) for i in result if not i[1].endswith('.vtt')]

                    for quality, url in result: sources.append({'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('SezonlukDizi - Exception: \n' + str(failure))
            return sources
Exemplo n.º 56
0
def __getTrakt(url, post=None):
    try:
        url = urllib_parse.urljoin(
            BASE_URL, url) if not url.startswith(BASE_URL) else url
        post = json.dumps(post) if post else None
        headers = {
            'Content-Type': 'application/json',
            'trakt-api-key': V2_API_KEY,
            'trakt-api-version': '2'
        }

        if getTraktCredentialsInfo():
            headers.update({
                'Authorization':
                'Bearer %s' % control.setting('trakt.token')
            })

        # need to fix client.request post
        # result = client.request(url, post=post, headers=headers, output='extended', error=True)
        # result = utils.byteify(result)
        # resp_code = result[1]
        # resp_header = result[2]
        # result = result[0]

        if not post:
            r = requests.get(url, headers=headers, timeout=30)
        else:
            r = requests.post(url, data=post, headers=headers, timeout=30)
        r.encoding = 'utf-8'

        resp_code = str(r.status_code)
        resp_header = r.headers
        result = r.text

        if resp_code in [
                '423', '500', '502', '503', '504', '520', '521', '522', '524'
        ]:
            log_utils.log('Trakt Error: %s' % str(resp_code))
            control.infoDialog('Trakt Error: ' + str(resp_code), sound=True)
            return
        elif resp_code in ['429']:
            log_utils.log('Trakt Rate Limit Reached: %s' % str(resp_code))
            control.infoDialog('Trakt Rate Limit Reached: ' + str(resp_code),
                               sound=True)
            return
        elif resp_code in ['404']:
            log_utils.log('Object Not Found : %s' % str(resp_code))
            return

        if resp_code not in ['401', '405', '403']:
            return result, resp_header

        oauth = urllib_parse.urljoin(BASE_URL, '/oauth/token')
        opost = {
            'client_id': V2_API_KEY,
            'client_secret': CLIENT_SECRET,
            'redirect_uri': REDIRECT_URI,
            'grant_type': 'refresh_token',
            'refresh_token': control.setting('trakt.refresh')
        }

        # result = client.request(oauth, post=json.dumps(opost), headers=headers)
        # result = utils.json_loads_as_str(result)

        result = requests.post(oauth,
                               data=json.dumps(opost),
                               headers=headers,
                               timeout=30).json()
        log_utils.log('Trakt token refresh: ' + repr(result))

        token, refresh = result['access_token'], result['refresh_token']
        control.setSetting(id='trakt.token', value=token)
        control.setSetting(id='trakt.refresh', value=refresh)

        headers['Authorization'] = 'Bearer %s' % token

        # result = client.request(url, post=post, headers=headers, output='extended', error=True)
        # result = utils.byteify(result)
        # return result[0], result[2]

        if not post:
            r = requests.get(url, headers=headers, timeout=30)
        else:
            r = requests.post(url, data=post, headers=headers, timeout=30)
        r.encoding = 'utf-8'
        return r.text, r.headers

    except:
        log_utils.log('getTrakt Error', 1)
        pass
Exemplo n.º 57
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            html = client.request(url)
            url_list = re.compile('<h2><a href="([^"]+)"',
                                  re.DOTALL).findall(html)

            hostDict = hostprDict + hostDict

            for url in url_list:
                if cleantitle.get(title) in cleantitle.get(url):
                    html = client.request(url)
                    links = re.compile('href="([^"]+)" rel="nofollow"',
                                       re.DOTALL).findall(html)
                    for vid_url in links:
                        if 'ouo.io' in vid_url:
                            continue
                        if 'sh.st' in vid_url:
                            continue
                        if 'linx' in vid_url:
                            continue
                        if '.rar' not in vid_url:
                            if '.srt' not in vid_url:
                                quality, info = source_utils.get_release_quality(
                                    url, vid_url)
                                host = vid_url.split('//')[1].replace(
                                    'www.', '')
                                host = host.split('/')[0].lower()
                                sources.append({
                                    'source': host,
                                    'quality': quality,
                                    'language': 'en',
                                    'url': vid_url,
                                    'info': info,
                                    'direct': False,
                                    'debridonly': False
                                })
            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('2DDL - Exception: \n' + str(failure))
            return sources
Exemplo n.º 58
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() is False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
            data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
            data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            html = client.request(url)
            posts = client.parseDOM(html, 'item')

            hostDict = hostprDict + hostDict

            items = []

            for post in posts:
                try:
                    t = client.parseDOM(post, 'title')[0]
                    u = client.parseDOM(post, 'a', ret='href')
                    s = re.search('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', post)
                    s = s.groups()[0] if s else '0'
                    items += [(t, i, s) for i in u]
                except:
                    pass

            for item in items:
                try:

                    url = item[1]
                    if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    valid, host = source_utils.is_host_valid(url, hostDict)
                    if not valid: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name, flags=re.I)

                    if not cleantitle.get(t) == cleantitle.get(title): raise Exception()

                    y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()

                    if not y == hdlr: raise Exception()

                    quality, info = source_utils.get_release_quality(name, url)

                    try:
                        size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', item[2])[-1]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        pass

                    info = ' | '.join(info)

                    sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info,
                                    'direct': False, 'debridonly': True})
                except:
                    pass

            check = [i for i in sources if not i['quality'] == 'CAM']
            if check: sources = check

            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('2DDL - Exception: \n' + str(failure))
            return sources
Exemplo n.º 59
0
    def build_cm(self, bmtype, **kwargs):
        try:
            cm = []
            name = kwargs.get('name')
            action = kwargs.get('action')
            icon = kwargs.get('icon')
            url = kwargs.get('url')
            if bmtype == 'Channels':
                chan_id = kwargs.get('id')
                dbase = name + '|' + chan_id + '|' + action + '|' + icon + '|' + url
                if action in self.bookmarks[bmtype]:
                    if chan_id in self.bookmarks[bmtype][action]:
                        cm.append(('Remove Bookmark',
                                   'RunPlugin(%s?action=%s&url=%s)' %
                                   (sys.argv[0], 'remove_channel',
                                    dbase.encode('base64'))))
                    else:
                        cm.append(
                            ('Add Bookmark', 'RunPlugin(%s?action=%s&url=%s)' %
                             (sys.argv[0], 'add_channel',
                              dbase.encode('base64'))))
                else:
                    cm.append(
                        ('Add Bookmark', 'RunPlugin(%s?action=%s&url=%s)' %
                         (sys.argv[0], 'add_channel', dbase.encode('base64'))))
            elif bmtype == 'Podcasts':
                show_id = kwargs.get('id')
                dbase = name + '|' + show_id + '|' + action + '|' + icon + '|' + url
                if action in self.bookmarks[bmtype]:
                    if show_id in self.bookmarks[bmtype][action]:
                        cm.append(('Remove Bookmark',
                                   'RunPlugin(%s?action=%s&url=%s)' %
                                   (sys.argv[0], 'remove_podcast',
                                    dbase.encode('base64'))))
                    else:
                        cm.append(
                            ('Add Bookmark', 'RunPlugin(%s?action=%s&url=%s)' %
                             (sys.argv[0], 'add_podcast',
                              dbase.encode('base64'))))
                else:
                    cm.append(
                        ('Add Bookmark', 'RunPlugin(%s?action=%s&url=%s)' %
                         (sys.argv[0], 'add_podcast', dbase.encode('base64'))))
            elif bmtype == 'Radio':
                station_id = kwargs.get('id')
                dbase = name + '|' + station_id + '|' + action + '|' + icon + '|' + url
                if action in self.bookmarks[bmtype]:
                    if station_id in self.bookmarks[bmtype][action]:
                        cm.append(('Remove Bookmark',
                                   'RunPlugin(%s?action=%s&url=%s)' %
                                   (sys.argv[0], 'remove_radio',
                                    dbase.encode('base64'))))
                    else:
                        cm.append(
                            ('Add Bookmark', 'RunPlugin(%s?action=%s&url=%s)' %
                             (sys.argv[0], 'add_radio',
                              dbase.encode('base64'))))
                else:
                    cm.append(
                        ('Add Bookmark', 'RunPlugin(%s?action=%s&url=%s)' %
                         (sys.argv[0], 'add_radio', dbase.encode('base64'))))
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('Bookmarks - Context Exception: \n' + str(failure))

        return cm
Exemplo n.º 60
0
    )
    control.execute(
        'RunPlugin(plugin://%s)' %
        'plugin.video.incursion/?action=moviesToLibrarySilent&url=traktcollection'
    )


try:
    ModuleVersion = control.addon('script.module.incursion').getAddonInfo(
        'version')
    AddonVersion = control.addon('plugin.video.incursion').getAddonInfo(
        'version')
    #RepoVersion = control.addon('repository.colossus').getAddonInfo('version')

    log_utils.log(
        '######################### INCURSION ############################',
        log_utils.LOGNOTICE)
    log_utils.log(
        '####### CURRENT INCURSION VERSIONS REPORT ######################',
        log_utils.LOGNOTICE)
    log_utils.log('### INCURSION PLUGIN VERSION: %s ###' % str(AddonVersion),
                  log_utils.LOGNOTICE)
    log_utils.log('### INCURSION SCRIPT VERSION: %s ###' % str(ModuleVersion),
                  log_utils.LOGNOTICE)
    #log_utils.log('### INCURSION REPOSITORY VERSION: %s ###' % str(RepoVersion), log_utils.LOGNOTICE)
    log_utils.log(
        '###############################################################',
        log_utils.LOGNOTICE)
except:
    log_utils.log(
        '######################### INCURSION ############################',