def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = client.source(url) #c = client.request(u output='cookie', error=True) links = client.parseDOM(result, 'tr') links = [(client.parseDOM(i, 'a', attrs = {'class': 'watch'}, ret='data-iframe')[0], client.parseDOM(i, 'img', ret='alt')[0], client.parseDOM(i, 'td', attrs={'class':'text-center'})[0]) for i in links] for i in links: try: url1 = '%s?%s' % (url, i[0]) url1 = url1.encode('utf-8') #print ("Q",videoquality.solvequality(url),url) sources.append({'source': i[1].encode('utf-8'), 'quality': 'SD', 'provider': 'Alltube', 'url': url1, 'vtype':i[2].encode('utf-8')}) except Exception as e: control.log('Alltube sources Exception: %s' % e) pass #control.log('Alltube sources : %s' % sources) return sources except: return sources
def resolve(self, url): link = client.source(url) url=re.compile('src="(.+?)" style').findall(link)[0] link = client.source(url) try: url=re.compile("window.atob\('(.+?)'\)\)").findall(link)[0] func_count = len(re.findall('window\.atob', link)) print(">>>>>>>> ILE",func_count) for _i in xrange(func_count): url = base64.decodestring(url) url=re.compile("<source src='(.+?)'").findall(url)[0] control.log(">> u2 %s |ENcoded %s",url, resolvers.request(url)) url = resolvers.request(url) except: try: url=re.compile('src="(.+?)"').findall(link)[0] host = urlparse.urlparse(url).netloc host = host.replace('www.', '').replace('embed.', '') host = host.rsplit('.', 1)[0] host = host.lower() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') url = resolvers.request(url) except:pass #print("--------------->>>>> URL",url) return url
def range(self, url): control.idle() yes = control.yesnoDialog(control.lang(30425).encode('utf-8'), '', '') if not yes: return if not control.condVisibility('Window.IsVisible(infodialog)') and not control.condVisibility('Player.HasVideo'): control.infoDialog(control.lang(30421).encode('utf-8'), time=10000000) self.infoDialog = True from resources.lib.indexers import movies items = movies.movies().get(url, idx=False) if items == None: items = [] for i in items: control.log('## ITEMS %s' % i['title']) for i in items: try: if xbmc.abortRequested == True: return sys.exit() self.add('%s (%s)' % (i['title'], i['year']), i['title'], i['year'], i['imdb'], i['tmdb'], range=True) except: pass if self.infoDialog == True: control.infoDialog(control.lang(30423).encode('utf-8'), time=1) if self.library_setting == 'true' and not control.condVisibility('Library.IsScanningVideo'): control.execute('UpdateLibrary(video)')
def episodes(imdb, tvdb, season, episode, watched): traktMode = False if trakt.getTraktCredentials() == False else True watched = int(watched) control.log('>>> Watched %s' % watched) try: if traktMode == True: raise Exception() from metahandler import metahandlers metaget = metahandlers.MetaData(preparezip=False) metaget.get_meta('tvshow', '', imdb_id=imdb) metaget.get_episode_meta('', imdb, season, episode) metaget.change_watched('episode', '', imdb, season=season, episode=episode, year='', watched=watched) except: pass try: if traktMode == False: raise Exception() if watched == 7: trakt.markEpisodeAsWatched(tvdb, season, episode) else: trakt.markEpisodeAsNotWatched(tvdb, season, episode) trakt.syncTVShows() except: pass control.refresh()
def get_movie(self, imdb, title, year): try: urls = [] url = self.moviesearch_link % (cleantitle.geturl(title), year) r = urlparse.urljoin(self.base_link, url) r = client.request(r) posts = client.parseDOM(r, 'div', attrs = {'class': 'post'}) for post in posts: extra = False tags = client.parseDOM(post, 'a', attrs = {'rel' : 'category tag'}) for tag in tags: #Make sure it isnt an extra if tag == 'Extras': extra = True break if extra == False: containerDiv = client.parseDOM(post, 'div', attrs = {'class' : 'posttitle'}) if not containerDiv: containerDiv = client.parseDOM(post, 'div', attrs = {'class' : 'expandposttitle'}) href = client.parseDOM(containerDiv, 'a', ret='href')[0] title = client.parseDOM(containerDiv,'a', ret='title')[0] href = href.encode('utf-8') title = title.encode('utf-8') urls.append({'url' : href, 'title' : title}) return urls except Exception as e: control.log('wrzcraft error') control.log(e) return
def get_cached_url(self, url, data='', cache_limit=8): try: dbcon = database.connect(control.sourcescachedUrl) dbcur = dbcon.cursor() #dbcur.execute( # "CREATE TABLE IF NOT EXISTS rel_url (""source TEXT, ""imdb_id TEXT, ""season TEXT, ""episode TEXT, ""rel_url TEXT, ""UNIQUE(source, imdb_id, season, episode)"");") dbcur.execute( "CREATE TABLE IF NOT EXISTS url_cache (url VARCHAR(255) NOT NULL, data VARCHAR(255), response, res_header, timestamp, PRIMARY KEY(url, data))") except: pass try: if data is None: data = '' html = '' res_header = [] created = 0 now = time.time() age = now - created limit = 60 * 60 * cache_limit dbcur.execute('SELECT timestamp, response, res_header FROM url_cache WHERE url = %s and data=%s' % (url,data)) rows = dbcur.fetchall() control.log('DB ROWS: Url: %s, ' % (rows)) if rows: created = float(rows[0][0]) res_header = json.loads(rows[0][2]) age = now - created if age < limit: html = rows[0][1] control.log('DB Cache: Url: %s, Data: %s, Cache Hit: %s, created: %s, age: %.2fs (%.2fh), limit: %ss' % ( url, data, bool(html), created, age, age / (60 * 60), limit)) return created, res_header, html except: return
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources for item in url: newLink = client.request(item['url']) divArea = client.parseDOM(newLink, 'div', attrs = {"class": "postarea"}) match = client.parseDOM(divArea, "a", ret = "href", attrs = {'rel': 'nofollow'}) for link in match: if re.match('((?!\.part[0-9]).)*$', link, flags=re.IGNORECASE) and '://' in link: host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(link.strip().lower()).netloc)[0].split('.')[0] scheme = urlparse.urlparse(link).scheme if host in hostDict and scheme: if '1080' in link: quality = '1080p' elif '720' in link: quality = 'HD' else: quality = 'SD' fileLink = client.replaceHTMLCodes(link) fileLink = fileLink.encode('utf-8') sources.append({ 'source': host, 'quality': quality, 'provider': 'wrzcraft', 'url': fileLink }) return sources except Exception as e: control.log('ERROR wrzcraft sources %s' % e) return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = client.request(url) #result = result.encode('windows-1254') #print result result = re.sub(r'[^\x00-\x7F]+', ' ', result) #print result pages = [] try: r = client.parseDOM(result, 'div', attrs = {'id': 'embed'}) print ("r",r[0]) pages.append(client.parseDOM(r, 'iframe', ret='src')[0]) print pages except: pass try: r = client.parseDOM(result, 'div', attrs = {'id': 'playerMenu'})[0] r = client.parseDOM(r, 'div', ret='data-id', attrs = {'class': 'item'})[0] r = client.request(urlparse.urljoin(self.base_link, self.video_link), post=urllib.urlencode( {'id': r} )) pages.append(client.parseDOM(r, 'iframe', ret='src')[0]) except: pass for page in pages: try: if not 'http' in page: page = 'http:'+page result = client.request(page) #print result captions = re.search('kind\s*:\s*(?:\'|\")captions(?:\'|\")', result) if not captions: raise Exception() result = re.compile('"?file"?\s*:\s*"([^"]+)"\s*,\s*"?label"?\s*:\s*"(\d+)p?[^"]*"').findall(result) print result links = [(i[0], '1080p') for i in result if int(i[1]) >= 1080] links += [(i[0], 'HD') for i in result if 720 <= int(i[1]) < 1080] links += [(i[0], 'SD') for i in result if 480 <= int(i[1]) < 720] for i in links: if not 'http' in i[0]: myurl = 'http:'+i[0] else: myurl = [0] sources.append({'source': 'gvideo', 'quality': i[1], 'provider': 'Sezonlukdizi', 'url': myurl}) except: pass return sources except Exception as e: control.log('ERROR sezonlukidz %s' % e) return sources
def resolve(url): try: url = url.replace('/embed-', '/') url = re.compile('//.+?/([\w]+)').findall(url)[0] page = 'http://allmyvideos.net/%s' % url control.log('AAA Page %s' % page) result = client.request(page, close=False) post = {} f = client.parseDOM(result, 'form', attrs = {'action': ''}) k = client.parseDOM(f, 'input', ret='name', attrs = {'type': 'hidden'}) for i in k: post.update({i: client.parseDOM(f, 'input', ret='value', attrs = {'name': i})[0]}) post = urllib.urlencode(post) result = client.request(page, post=post) url = re.compile('"file" *: *"(http.+?)"').findall(result) #control.log('AAA Page %s' % url) url = url[-1] url += '&direct=false&ua=false' xbmc.sleep(2000) #return url + '|' + urllib.urlencode({ 'User-Agent': client.IE_USER_AGENT }) return url except: return
def get_episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return #url = urlparse.parse_qs(url) #url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url]) #tvshowtitle = cleantitle.query10(url['tvshowtitle']) tvshowtitle = url.split('/')[-1] r = '/tv-show/%s/season/%01d/episode/%01d' % (tvshowtitle, int(season), int(episode)) #y = '/tv-show/%s/season/%01d' % (tvshowtitle, int(season)) control.log('AAAA y >>>>>> %s' % r) #result = client.request(urlparse.urljoin(self.base_link, y)) #print "ResUlt get_episode",result #result = client.parseDOM(result,'span', attrs={'class':'dat'})[0] #if url['year'] == str(result.strip()): # url = r.encode('utf-8') # control.log('>>>>>> Putlocker URL %s' % url) # return url return r except: return
def check(self, i): try: control.log(">>>>>>>>>>>>>>> ONE CHECK %s" % (i[0])) url = client.replaceHTMLCodes(i[0]) url = url.encode('utf-8') host = urlparse.urlparse(url).netloc host = host.replace('www.', '').replace('embed.', '') host = host.rsplit('.', 1)[0] host = host.lower() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') #control.log("##OneClickWatch %s - url %s" % (host, i[0])) #if host in i[2]: check = url = resolvers.request(url) if host == 'openload': check = openload.check(url) elif host == 'uptobox': check = uptobox.check(url) elif host == 'cloudzilla': check = cloudzilla.check(url) elif host == 'zstream': check = zstream.check(url) elif host == 'videomega': check = videomega.check(url) else: raise Exception() if check == None or check == False: raise Exception() self.sources.append({'source': host, 'quality': i[1], 'provider': 'Oneclickwatch', 'url': url}) except: pass
def get_movie(self, imdb, title, year): try: query = urlparse.urljoin(self.base_link, self.search_link) query = query % urllib.quote_plus(title) #for i in range(5): r = client.request(query) # if not r == None: break t = cleantitle.get(title) r = client.parseDOM(r, 'div', attrs = {'class': 'col-lg.+?'}) print("R1",r) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r] r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0] r = [(i[0], i[1], re.findall('(\d{4})', i[1])) for i in r] r = [(i[0], i[1], i[2][-1]) for i in r if len(i[2]) > 0] r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0] print("R6", r) url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except Exception as e: control.log('ERROR XMOVIES GET %s' % e) return
def get_sources(self, url, hosthdDict, hostDict, locDict): control.log('######### DIZILAB ## %s ' % url) try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) #result = client.source(url) result = client2.http_get(url) try: url = re.compile('"episode_player".*?src="([^"]+)"').findall(result) links = [(i[0], '1080p') for i in url if int(i[1]) >= 1080] links += [(i[0], 'HD') for i in url if 720 <= int(i[1]) < 1080] links += [(i[0], 'SD') for i in url if 480 <= int(i[1]) < 720] if not 'SD' in [i[1] for i in links]: links += [(i[0], 'SD') for i in url if 360 <= int(i[1]) < 480] for i in links: sources.append({'source': 'gvideo', 'quality': i[1], 'provider': 'Dizilab', 'url': i[0]}) except: pass try: url = client.parseDOM(result, 'iframe', ret='src') url = [i for i in url if 'openload.' in i][0] sources.append({'source': 'openload.co', 'quality': client.file_quality_openload(url)['quality'], 'provider': 'Dizilab', 'url': url}) except: pass return sources except: return sources
def get_movie(self, imdb, title, year): try: query = self.search_link % (urllib.quote_plus(title)) query = urlparse.urljoin(self.base_link, query) result = client.request(query) result = client.parseDOM(result, 'div', attrs={'class': 'movie clearfix'}) result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs={'class': 'title-pl'}), client.parseDOM(i, 'span', attrs={'class': 'title-en'}), client.parseDOM(i, 'img', ret='src'), client.parseDOM(i, 'p'), client.parseDOM(i, 'p', attrs={'class': 'plot'})) for i in result ] result = [(i[0][0], u" ".join(i[1]+i[2]), re.findall('(\d{4})', i[4][0])) for i in result] result = [i for i in result if cleantitle.movie(title) in cleantitle.movie(i[1])] years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)] result = [i[0] for i in result if any(x in i[2] for x in years)][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') control.log('FILMYTO URL %s' % url) return url except Exception as e: control.log('FILMYTO getmovie ERROR %s' % e) return
def request(url, post=None, headers=None, mobile=False, safe=False, timeout='30'): try: control.log('[cloudflare] request %s' % url) try: headers.update(headers) except: headers = {} agent = cache.get(cloudflareAgent, 168) if not 'User-Agent' in headers: headers['User-Agent'] = agent u = '%s://%s' % (urlparse.urlparse(url).scheme, urlparse.urlparse(url).netloc) cookie = cache.get(cloudflareCookie, 168, u, post, headers, mobile, safe, timeout) result = client.request(url, cookie=cookie, post=post, headers=headers, mobile=mobile, safe=safe, timeout=timeout, output='response', error=True) if result[0] == '503': agent = cache.get(cloudflareAgent, 0) ; headers['User-Agent'] = agent cookie = cache.get(cloudflareCookie, 0, u, post, headers, mobile, safe, timeout) result = client.request(url, cookie=cookie, post=post, headers=headers, mobile=mobile, safe=safe, timeout=timeout) else: result= result[1] #control.log('[cloudflare] result %s' % result) return result except: return
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] mylinks = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = client.source(url) resulttype = client.parseDOM(result, 'ul',attrs={'class':'tab-content'}, ret='id') for j in resulttype: linkstype = client.parseDOM(result, 'ul', attrs={'class': 'tab-content', 'id':j})[0] links1 = client.parseDOM(linkstype, 'a', ret='href', attrs={'class':'video-link'}) links2 = client.parseDOM(linkstype, 'a', attrs={'class':'video-link'}) #links = [i for i in links if i[0][0].startswith('http')] for k in range(len(links1)): if links1[k].startswith('http'): mylinks.append([links1[k], links2[k].split('.')[0], j]) for i in mylinks: try: control.log(' IITV LinkType %s' % str(i)) vtype = 'BD' if i[2] == 'lecPL': vtype = 'Lektor' if i[2] == 'subPL': vtype = 'Napisy' if i[2] == 'org': vtype = 'Orginalny' sources.append({'source': i[1], 'quality': 'SD', 'provider': 'IITV', 'url': i[0], 'vtype':vtype}) except: pass return sources except: return sources
def sourcesResolve(self, url, provider): try: control.log('Provider:%s URL:%s' % (provider,url)) provider = provider.lower() control.log('XXX Provider:%s url:%s' %(provider,url)) if not provider.endswith(('_mv', '_tv', '_mv_tv')): sourceDict = [] for package, name, is_pkg in pkgutil.walk_packages(__path__): sourceDict.append((name, is_pkg)) for i in sourceDict: print("A",i[0], "B", i[0].startswith(provider + '_'), provider) #print str(provider) in str(i[0]) #print type(provider), type(i[0]) provider = [i[0] for i in sourceDict if i[1] == False and i[0].startswith(provider + '_')][0] source = __import__(provider, globals(), locals(), [], -1).source() url = source.resolve(url) if url == False or url == None: raise Exception() try: headers = dict(urlparse.parse_qsl(url.rsplit('|', 1)[1])) except: headers = dict('') if url.startswith('http') and '.m3u8' in url: result = client.request(url.split('|')[0], headers=headers, output='geturl', timeout='20') if result == None: raise Exception() elif url.startswith('http'): result = client.request(url.split('|')[0], headers=headers, output='chunk', timeout='20') if result == None: raise Exception() self.url = url return url except: return False
def get_movie(self, imdb, title, year): try: t = cleantitle.get(title) q = '/search/%s.html' % (urllib.quote_plus(cleantitle.query(title))) q = urlparse.urljoin(self.base_link, q) for i in range(3): r = client.request(q) if not r == None: break r = client.parseDOM(r, 'div', attrs = {'class': 'ml-item'}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r] r = [(i[0][0], i[1][0]) for i in r if i[0] and i[1]] r = [i[0] for i in r if t == cleantitle.get(i[1])][:2] r = [(i, re.findall('(\d+)', i)[-1]) for i in r] for i in r: try: y, q = cache.get(self.ymovies_info, 9000, i[1]) if not y == year: raise Exception() return urlparse.urlparse(i[0]).path except: pass except Exception as e: control.log('Error yesmovies %s' % e) return
def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: if control.setting('alluc_user'): if control.setting('realdebrid_token') or control.setting('premiumize_user'): self.moviesearch_link = '/api/search/download?user=%s&password=%s&query=%s' else: self.moviesearch_link = '/api/search/stream/?user=%s&password=%s&query=%s' tvshowtitle, year = re.compile('(.+?) [(](\d{4})[)]$').findall(url)[0] season, episode = season.zfill(2), episode.zfill(2) query = '%s s%se%s' % (tvshowtitle, season, episode) query = self.moviesearch_link % (control.setting('alluc_user'), control.setting('alluc_password'), urllib.quote_plus(query)) r = urlparse.urljoin(self.base_link, query) r = r + "+%23newlinks" r = requests.get(r).json() for item in r['result']: if len(item['hosterurls']) == 1 and 'en' in item['lang']: tmp = item['hosterurls'][0]['url'] tmp = client.replaceHTMLCodes(tmp) tmp = tmp.encode('utf-8') title = item['title'].encode('utf-8') self.stream_url.append({'url': tmp, 'hoster': item['hostername'], 'title': title }) return self.stream_url except Exception as e: control.log('alluc error tv') control.log(e) return
def get_movie(self, imdb, title, year): try: if control.setting('alluc_user'): if control.setting('realdebrid_token') or control.setting('premiumize_user'): self.moviesearch_link = '/api/search/download?user=%s&password=%s&query=%s+%s' else: self.moviesearch_link = '/api/search/stream/?user=%s&password=%s&query=%s+%s' url = self.moviesearch_link % (control.setting('alluc_user'), control.setting('alluc_password'),cleantitle.geturl(title), year) r = urlparse.urljoin(self.base_link, url) r = r + "+%23newlinks" r = client.request(r) r1 = json.loads(r) for item in r1['result']: if len(item['hosterurls']) == 1 and 'en' in item['lang']: tmp = item['hosterurls'][0]['url'] tmp = client.replaceHTMLCodes(tmp) tmp = tmp.encode('utf-8') title = item['title'].encode('utf-8') self.stream_url.append({'url': tmp, 'hoster': item['hostername'], 'title': title }) return self.stream_url except Exception as e: control.log(e) return
def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) t = cleantitle.get(data['tvshowtitle']) title = data['tvshowtitle'] season = '%01d' % int(season) ; episode = '%01d' % int(episode) year = re.findall('(\d{4})', date)[0] years = [str(year), str(int(year)+1), str(int(year)-1)] r = cache.get(self.ymovies_info_season, 720, title, season) r = [(i[0], re.findall('(.+?)\s+(?:-|)\s+season\s+(\d+)$', i[1].lower())) for i in r] r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if i[1]] r = [i[0] for i in r if t == cleantitle.get(i[1]) and season == '%01d' % int(i[2])][:2] r = [(i, re.findall('(\d+)', i)[-1]) for i in r] for i in r: try: y, q = cache.get(self.ymovies_info, 9000, i[1]) mychk = False years = [str(year),str(int(year) + 1),str(int(year) - 1)] for x in years: if str(y) == x: mychk = True if mychk == False: raise Exception() return urlparse.urlparse(i[0]).path, (episode) except: pass except Exception as e: control.log('Error yesmovies %s' % e) return
def resolve(url): try: url = url.replace("/embed-", "/") url = re.compile("//.+?/([\w]+)").findall(url)[0] page = "http://allmyvideos.net/%s" % url control.log("AAA Page %s" % page) result = client.request(page, close=False) post = {} f = client.parseDOM(result, "form", attrs={"action": ""}) k = client.parseDOM(f, "input", ret="name", attrs={"type": "hidden"}) for i in k: post.update({i: client.parseDOM(f, "input", ret="value", attrs={"name": i})[0]}) post = urllib.urlencode(post) result = client.request(page, post=post) url = re.compile('"file" *: *"(http.+?)"').findall(result) # control.log('AAA Page %s' % url) url = url[-1] url += "&direct=false&ua=false" xbmc.sleep(2000) # return url + '|' + urllib.urlencode({ 'User-Agent': client.IE_USER_AGENT }) return url except: return
def get_movie(self, imdb, title, year): try: query = self.search_link % urllib.quote(title) query = urlparse.urljoin(self.base_link, query) result = client2.http_get(query) title = cleantitle.movie(title) years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)] r = client.parseDOM(result, 'div', attrs = {'class': 'ml-item'}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r] r = [(i[0][0], i[1][-1]) for i in r if len(i[0]) > 0 and len(i[1]) > 0] r = [(re.sub('http.+?//.+?/','', i[0]), i[1]) for i in r] r = [('/'.join(i[0].split('/')[:2]), i[1]) for i in r] r = [x for y,x in enumerate(r) if x not in r[:y]] r = [i for i in r if title == cleantitle.movie(i[1])] u = [i[0] for i in r][0] url = urlparse.urljoin(self.base_link, u) url = urlparse.urlparse(url).path url = client.replaceHTMLCodes(url) url = url.encode('utf-8') control.log("@@@@@@@@@@@@@@@ URL %s" % url) return url except: return
def check(self, i, hostDict): try: url = client.replaceHTMLCodes(i[0]) url = url.encode('utf-8') result = '' result = client.request(urlparse.urljoin(self.base_link, url), headers=self.headers) url = re.compile('class=[\'|\"]*myButton.+?href=[\'|\"|\s|\<]*(.+?)[\'|\"|\s|\>]').findall(result)[0] print("URL2",url,i[1]) control.log("WATCHSERIES CHECK %s | url: %s" % (url,i[0])) url = client.replaceHTMLCodes(url) host = urlparse.urlparse(url).netloc host = host.replace('www.', '').replace('embed.', '') host = host.lower() if not host in hostDict: control.log("WATCHSERIES HOST %s" % host) raise Exception() host = host.rsplit('.', 1)[0] host = client.replaceHTMLCodes(host) host = host.encode('utf-8') self.sources.append({'source': host, 'quality': i[1], 'provider': 'Watchseries', 'url': url}) except: pass
def get_sources(self, url, hosthdDict, hostDict, locDict): try: self.sources =[] mylinks = [] hostDict = hostDict.sort() for i in hostDict: control.log("WA HO %s" % i) if url == None: return self.sources url = url.replace('/json/', '/') result = '' result, headers, content, cookie = client.request(urlparse.urljoin(self.base_link, url), output='extended') #result, headers, content, cookie = client.request(url, limit='0', output='extended') self.headers['Referer'] = urlparse.urljoin(self.base_link, url) self.headers['Cookie'] = cookie result = result.replace('\n','') result = result.decode('iso-8859-1').encode('utf-8') result = client.parseDOM(result, 'div', attrs = {'id': 'lang_1'})[0] links = re.compile('href=[\'|\"|\s|\<]*(.+?)[\'|\"|\s|\>].+?title=[\'|\"|\s|\<]*(.+?)[\'|\"|\s|\>]').findall(result) links = [x for y,x in enumerate(links) if x not in links[:y]] for i in links: try: host = i[1] host = host.split('.', 1)[0] host = host.strip().lower() #if not host in hostDict: raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') url = i[0] url = client.replaceHTMLCodes(url) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass if not url.startswith('http'): url = urlparse.urljoin(self.base_link, url) if not '/cale/' in url: raise Exception() url = url.encode('utf-8') url = url.replace('/json/', '/') url = urlparse.urlparse(url).path mylinks.append([url, 'SD']) except: pass threads = [] for i in mylinks: threads.append(workers.Thread(self.check, i, hostDict)) [i.start() for i in threads] for i in range(0, 10 * 2): is_alive = [x.is_alive() for x in threads] if all(x == False for x in is_alive): break time.sleep(1) return self.sources except: return self.sources
def resolve(url): try: referer = url result = client.request(url) post = {} f = client.parseDOM(result, "form", attrs={"name": "F1"})[0] k = client.parseDOM(f, "input", ret="name", attrs={"type": "hidden"}) for i in k: post.update({i: client.parseDOM(f, "input", ret="value", attrs={"name": i})[0]}) post = urllib.urlencode(post) for i in range(0, 3): try: result = client.request(url, post=post, referer=referer) url = re.search( "<a\shref\s*=['\"](.+?)['\"]\s*>\s*<span\sclass\s*=\s*['\"]button_upload green['\"]\s*>", result ).group(1) control.log("UPTOBOX URL: %s" % url) url = ["http" + i for i in url.split("http") if "uptobox.com" in i][0] return url except: time.sleep(1) except: return
def resolve(self, url): control.log('RESSS %s' % url) try: if 'openload.co' in url: url = resolvers.request(url) return url if 'movieshd' in url: r = self.request(url)[0] r = re.findall("file: '([^']+)',label: '(\d+)", r) r1 = sorted(r, key=lambda k: k[1]) r2 = client.replaceHTMLCodes(r1[-1][0]) #r2 = client.googlepass(url) return r2 if 'seriesonline' in url: r = self.request(url)[0] r = [client.parseDOM(r, 'source', ret='src'), client.parseDOM(r,'source', ret='label')] r = zip(r[0],r[1]) r1 = sorted(r, key=lambda k: k[1]) r2 = client.replaceHTMLCodes(r1[-2][0]) r2 = client.googlepass(url) return r2 return False except Exception as e: control.log('RESSS %S' % e) pass
def get_movie(self, imdb, title, year): try: query = self.search_link % (urllib.quote_plus(title)) query = urlparse.urljoin(self.base_link, query) control.log("cda-online URL %s" % query) result = client.request(query) result = client.parseDOM(result, "div", attrs={"class": "item"}) # print('cda-online',result) result = [ ( client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "h2")[0], client.parseDOM(i, "span", attrs={"class": "year"})[0], ) for i in result ] # print('cda-online2',result) result = [i for i in result if cleantitle.movie(title) in cleantitle.movie(i[1])] # print('cda-online3',result) years = ["%s" % str(year), "%s" % str(int(year) + 1), "%s" % str(int(year) - 1)] result = [i[0] for i in result if any(x in i[2] for x in years)][0] # print('cda-online4',result) try: url = re.compile("//.+?(/.+)").findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode("utf-8") control.log("ALLTUBE URL %s" % url) return url except: return
def get_movie(self, imdb, title, year): try: query = self.search_link post = {'searchquery': title, 'searchin': '1'} post = urllib.urlencode(post) result = '' headers = {"Content-Type":"application/x-www-form-urlencoded", "Referer":urlparse.urljoin(self.base_link, query)} result = client.request(urlparse.urljoin(self.base_link, query), post=post, headers=headers) #if 'widget search-page' in str(result): break print("R",result) result = client.parseDOM(result, 'div', attrs = {'class': 'widget search-page'})[0] result = client.parseDOM(result, 'td') title = cleantitle.movie(title) years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)] result = [(client.parseDOM(i, 'a', ret='href')[-1], client.parseDOM(i, 'a')[-1]) for i in result] result = [i for i in result if title == cleantitle.movie(i[1])] result = [i[0] for i in result if any(x in i[1] for x in years)][0] url = client.replaceHTMLCodes(result) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass url = urlparse.urlparse(url).path url = url.encode('utf-8') return url except Exception as e: control.log("ERR iwatch %s" % e) return
def get_episode(self, url, imdb, tvdb, title, date, season, episode): control.log('##### 1 - url %s' % url) try: if url == None: return num = base64.b64decode('aHR0cDovL3RoZXR2ZGIuY29tL2FwaS9FQUNCMkRGNTM0Njc3OEU4L3Nlcmllcy8lcy9kZWZhdWx0LyUwMWQvJTAxZA==') num = num % (tvdb, int(season), int(episode)) control.log('##### 2 - num %s' % num) num = client.request(num) num = client.parseDOM(num, 'absolute_number')[0] control.log('##### 3 - num %s' % num) url = urlparse.urljoin(self.base_link, url) control.log('##### url %s' % url) result = client.request(url) control.log('##### res %s' % url) result = result.decode('iso-8859-1').encode('utf-8') result = client.parseDOM(result, 'tr', attrs = {'class': ''}) result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'td', attrs = {'class': 'epnum'})) for i in result] result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [i[0] for i in result if num == i[1]][0] url = urlparse.urljoin(self.base_link, result) url = urlparse.urlparse(url).path url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_sources(self, url, hosthdDict, hostDict, locDict): try: self.sources =[] mylinks = [] #hostDict = hostDict.sort() #for i in hostDict: # control.log("WA HO %s" % i) if url == None: return self.sources url = url.replace('/json/', '/') result = '' result, headers, content, cookie = client.request(urlparse.urljoin(self.base_link, url), output='extended') #result, headers, content, cookie = client.request(url, limit='0', output='extended') self.headers['Referer'] = urlparse.urljoin(self.base_link, url) self.headers['Cookie'] = cookie result = result.replace('\n','') result = result.decode('iso-8859-1').encode('utf-8') result = client.parseDOM(result, 'div', attrs = {'id': 'lang_1'})[0] links = re.compile('href=[\'|\"|\s|\<]*(.+?)[\'|\"|\s|\>].+?title=[\'|\"|\s|\<]*(.+?)[\'|\"|\s|\>]').findall(result) links = [x for y,x in enumerate(links) if x not in links[:y]] for i in links: try: host = i[1] host = host.split('.', 1)[0] host = host.strip().lower() #if not host in hostDict: raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') url = i[0] url = client.replaceHTMLCodes(url) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass if not url.startswith('http'): url = urlparse.urljoin(self.base_link, url) if not '/cale/' in url: raise Exception() url = url.encode('utf-8') url = url.replace('/json/', '/') url = urlparse.urlparse(url).path mylinks.append([url, 'SD']) except: pass threads = [] for i in mylinks[:15]: threads.append(workers.Thread(self.check, i, hostDict)) [i.start() for i in threads] for i in range(0, 10 * 2): is_alive = [x.is_alive() for x in threads] if all(x == False for x in is_alive): break time.sleep(1) return self.sources except Exception as e: control.log('ERROR watchseries %s' % e) return self.sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) content = re.compile('(.+?)\?episode=\d*$').findall(url) content = 'movie' if len(content) == 0 else 'episode' try: url, episode = re.compile('(.+?)\?episode=(\d*)$').findall( url)[0] except: pass url = urlparse.urljoin(self.base_link, url) + '/watching.html' result = client.source(url) movie = client.parseDOM(result, 'div', ret='movie-id', attrs={'id': 'media-player'})[0] control.log('####### %s MOVIE' % movie) try: quality = client.parseDOM(result, 'span', attrs={'class': 'quality'})[0].lower() control.log('####### %s MOVIE quality ' % quality) except: quality = 'hd' if quality == 'cam' or quality == 'ts': quality = 'CAM' elif quality == 'hd': quality = 'HD' else: quality = 'SD' url = '/movie/loadepisodes/%s' % movie url = urlparse.urljoin(self.base_link, url) result = client.source(url) result = client.parseDOM(result, 'div', attrs={'class': 'les-content'}) result = zip(client.parseDOM(result, 'a', ret='onclick'), client.parseDOM(result, 'a', ret='episode-id'), client.parseDOM(result, 'a')) result = [(re.sub('[^0-9]', '', i[0].split(',')[0]), re.sub('[^0-9]', '', i[0].split(',')[-1]), i[1], ''.join(re.findall('(\d+)', i[2])[:1])) for i in result] result = [(i[0], i[1], i[2], i[3]) for i in result] if content == 'episode': result = [i for i in result if i[3] == '%01d' % int(episode)] links = [('movie/load_episode/%s/%s' % (i[2], i[1]), 'gvideo') for i in result if 2 <= int(i[0]) <= 11] for i in links: sources.append({ 'source': i[1], 'quality': quality, 'provider': 'Muchmoviesv2', 'url': i[0] }) links = [] links += [('movie/loadEmbed/%s/%s' % (i[2], i[1]), 'openload.co') for i in result if i[0] == '14'] #links += [('movie/loadEmbed/%s/%s' % (i[2], i[1]), 'videomega.tv') for i in result if i[0] == '13'] #links += [('movie/loadEmbed/%s/%s' % (i[2], i[1]), 'videowood.tv') for i in result if i[0] == '12'] #for i in links: sources.append({'source': i[1], 'quality': quality, 'provider': 'Onemovies', 'url': i[0], 'direct': False, 'debridonly': False}) for i in links: sources.append({ 'source': i[1], 'quality': quality, 'provider': 'Muchmoviesv2', 'url': i[0] }) control.log('####### MOVIE sources %s' % sources) return sources #for u in url: sources.append({'source': 'Muchmovies', 'quality': quality, 'provider': 'Muchmoviesv2', 'url': u}) except: return sources
def http_response(self, request, response): control.log('Stopping Redirect') return response
def cached_http_get(url, base_url, timeout, cookies=None, data=None, multipart_data=None, headers=None, allow_redirect=True, method=None, require_debrid=False, cache_limit=8): #control.log('--=-=-==-=-=-=- CLIENT2 CACHE url: %s base_url:%s' % (url,base_url)) if cookies is None: cookies = {} if timeout == 0: timeout = None if headers is None: headers = {} if url.startswith('//'): url = 'http:' + url referer = headers['Referer'] if 'Referer' in headers else url #control.log('Getting Url: %s cookie=|%s| data=|%s| extra headers=|%s|' % (url, cookies, data, headers)) if data is not None: if isinstance(data, basestring): data = data else: data = urllib.urlencode(data, True) if multipart_data is not None: headers['Content-Type'] = 'multipart/form-data; boundary=X-X-X' data = multipart_data #_created, _res_header, html = cache.get_cached_url(url, data, cache_limit) #if html: # control.log('Returning cached result for: %s' % (url)) # return html try: cj = _set_cookies(url, cookies) request = urllib2.Request(url, data=data) request.add_header('User-Agent', control.get_ua()) request.add_header('Accept', '*/*') request.add_unredirected_header('Host', request.get_host()) request.add_unredirected_header('Referer', referer) for key in headers: request.add_header(key, headers[key]) cj.add_cookie_header(request) if not allow_redirect: opener = urllib2.build_opener(NoRedirection) urllib2.install_opener(opener) else: opener = urllib2.build_opener(urllib2.HTTPRedirectHandler) urllib2.install_opener(opener) opener2 = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj)) urllib2.install_opener(opener2) if method is not None: request.get_method = lambda: method.upper() response = urllib2.urlopen(request, timeout=timeout) cj.extract_cookies(response, request) #control.log('Response Cookies: %s - %s' % (url, cookies_as_str(cj))) cj._cookies = fix_bad_cookies(cj._cookies) cj.save(ignore_discard=True) if not allow_redirect and (response.getcode() in [301, 302, 303, 307] or response.info().getheader('Refresh')): if response.info().getheader('Refresh') is not None: refresh = response.info().getheader('Refresh') return refresh.split(';')[-1].split('url=')[-1] else: return response.info().getheader('Location') content_length = response.info().getheader('Content-Length', 0) if int(content_length) > control.MAX_RESPONSE: control.log('Response exceeded allowed size. %s => %s / %s' % (url, content_length, control.MAX_RESPONSE)) if method == 'HEAD': return '' else: if response.info().get('Content-Encoding') == 'gzip': buf = StringIO(response.read(control.MAX_RESPONSE)) f = gzip.GzipFile(fileobj=buf) html = f.read() else: html = response.read(control.MAX_RESPONSE) except urllib2.HTTPError as e: control.log('--=-=-==-=-=-=- CLIENT2 CACHE ERROR-1 e: %s' % (e)) if e.code == 503 and 'cf-browser-verification' in e.read(): html = cloudflare2.solve(url, cj, control.get_ua()) if not html: return '' else: control.log('Error (%s) during scraper http get: %s' % (str(e), url)) return '' except Exception as e: control.log('Error (%s) during scraper get: %s' % (str(e), url)) return '' cache.cache_url(url, html, data) return html
def request(url, close=True, redirect=True, error=False, proxy=None, post=None, headers=None, mobile=False, limit=None, referer=None, cookie=None, compression=True, output='', timeout='25', XHR=False): try: #control.log('@@@@@@@@@@@@@@ - URL:%s POST:%s' % (url, post)) handlers = [] if not proxy == None: handlers += [ urllib2.ProxyHandler({'http': '%s' % (proxy)}), urllib2.HTTPHandler ] opener = urllib2.build_opener(*handlers) opener = urllib2.install_opener(opener) if output == 'cookie' or output == 'extended' or not close == True: cookies = cookielib.LWPCookieJar() handlers += [ urllib2.HTTPHandler(), urllib2.HTTPSHandler(), urllib2.HTTPCookieProcessor(cookies) ] opener = urllib2.build_opener(*handlers) opener = urllib2.install_opener(opener) if (2, 7, 9) <= sys.version_info < (2, 7, 11): try: import ssl ssl_context = ssl.create_default_context() ssl_context.check_hostname = False ssl_context.verify_mode = ssl.CERT_NONE handlers += [urllib2.HTTPSHandler(context=ssl_context)] opener = urllib2.build_opener(*handlers) opener = urllib2.install_opener(opener) except: pass if url.startswith('//'): url = 'http:' + url try: headers.update(headers) except: headers = {} if 'User-Agent' in headers: pass elif not mobile == True: #headers['User-Agent'] = agent() headers['User-Agent'] = cache.get(randomagent, 1) else: headers['User-Agent'] = 'Apple-iPhone/701.341' headers[ 'User-Agent'] = 'Mozilla/5.0 (Linux; U; Android 4.0.3; ko-kr; LG-L160L Build/IML74K) AppleWebkit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30' if 'Referer' in headers: pass elif referer == None: headers['Referer'] = '%s://%s/' % (urlparse.urlparse(url).scheme, urlparse.urlparse(url).netloc) else: headers['Referer'] = referer if not 'Accept-Language' in headers: headers['Accept-Language'] = 'en-US' if 'X-Requested-With' in headers: pass elif XHR == True: headers['X-Requested-With'] = 'XMLHttpRequest' if 'Cookie' in headers: pass elif not cookie == None: headers['Cookie'] = cookie if 'Accept-Encoding' in headers: pass elif compression and limit is None: headers['Accept-Encoding'] = 'gzip' if redirect == False: class NoRedirection(urllib2.HTTPErrorProcessor): def http_response(self, request, response): return response opener = urllib2.build_opener(NoRedirection) opener = urllib2.install_opener(opener) try: del headers['Referer'] except: pass if isinstance(post, dict): post = urllib.urlencode(post) request = urllib2.Request(url, data=post, headers=headers) try: response = urllib2.urlopen(request, timeout=int(timeout)) except urllib2.HTTPError as response: if response.code == 503: cf_result = response.read(5242880) try: encoding = response.info().getheader('Content-Encoding') except: encoding = None if encoding == 'gzip': cf_result = gzip.GzipFile( fileobj=StringIO.StringIO(cf_result)).read() if 'cf-browser-verification' in cf_result: netloc = '%s://%s' % (urlparse.urlparse(url).scheme, urlparse.urlparse(url).netloc) ua = headers['User-Agent'] cf = cache.get(cfcookie().get, 168, netloc, ua, timeout) headers['Cookie'] = cf request = urllib2.Request(url, data=post, headers=headers) response = urllib2.urlopen(request, timeout=int(timeout)) elif error == False: return elif error == False: return if output == 'cookie': try: result = '; '.join( ['%s=%s' % (i.name, i.value) for i in cookies]) except: pass try: result = cf except: pass if close == True: response.close() return result elif output == 'geturl': result = response.geturl() if close == True: response.close() return result elif output == 'headers': result = response.headers if close == True: response.close() return result elif output == 'chunk': try: content = int(response.headers['Content-Length']) except: content = (2049 * 1024) if content < (2048 * 1024): return result = response.read(16 * 1024) if close == True: response.close() return result if limit == '0': result = response.read(224 * 1024) elif not limit == None: result = response.read(int(limit) * 1024) else: result = response.read(5242880) try: encoding = response.info().getheader('Content-Encoding') except: encoding = None if encoding == 'gzip': result = gzip.GzipFile(fileobj=StringIO.StringIO(result)).read() if 'sucuri_cloudproxy_js' in result: su = sucuri().get(result) headers['Cookie'] = su request = urllib2.Request(url, data=post, headers=headers) response = urllib2.urlopen(request, timeout=int(timeout)) if limit == '0': result = response.read(224 * 1024) elif not limit == None: result = response.read(int(limit) * 1024) else: result = response.read(5242880) try: encoding = response.info().getheader('Content-Encoding') except: encoding = None if encoding == 'gzip': result = gzip.GzipFile( fileobj=StringIO.StringIO(result)).read() if 'Blazingfast.io' in result and 'xhr.open' in result: netloc = '%s://%s' % (urlparse.urlparse(url).scheme, urlparse.urlparse(url).netloc) ua = headers['User-Agent'] headers['Cookie'] = cache.get(bfcookie().get, 168, netloc, ua, timeout) result = _basic_request(url, headers=headers, timeout=timeout, limit=limit) if output == 'extended': response_headers = response.headers response_code = str(response.code) try: cookie = '; '.join( ['%s=%s' % (i.name, i.value) for i in cookies]) except: pass try: cookie = cf except: pass if close == True: response.close() return (result, response_code, response_headers, headers, cookie) else: if close == True: response.close() return result except Exception as e: control.log('Client connect url:%s Error %s' % (url, e)) return
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) url = referer = url.replace('/watching.html', '') try: url, episode = re.findall('(.+?)\?episode=(\d*)$', url)[0] except: episode = None u = re.findall('-(\d+)', url)[-1] headers = {'X-Requested-With': 'XMLHttpRequest', 'Referer': url} quality = cache.get(self.muchmovies_info, 9000, u)[1].lower() if quality == 'cam' or quality == 'ts': quality = 'CAM' elif quality == 'hd': quality = 'HD' else: quality = 'SD' u = urlparse.urljoin(self.base_link, self.server_link % u) r = client.request(u, headers=headers) r = client.parseDOM(r, 'div', attrs={'class': 'les-content'}) r = zip(client.parseDOM(r, 'a', ret='onclick'), client.parseDOM(r, 'a')) r = [(i[0], ''.join(re.findall('(\d+)', i[1])[:1])) for i in r] if not episode == None: r = [i[0] for i in r if '%01d' % int(i[1]) == episode] else: r = [i[0] for i in r] r = [re.findall('(\d+),(\d+)', i) for i in r] r = [i[0][:2] for i in r if len(i) > 0] head_link = '|' + urllib.urlencode(headers) links = [] links += [{ 'source': 'gvideo', 'url': self.direct_link % i[1] } for i in r if 2 <= int(i[0]) <= 11] links += [{ 'source': 'openload', 'url': self.embed_link % i[1] } for i in r if i[0] == '14'] links += [{ 'source': 'videomega', 'url': self.embed_link % i[1] } for i in r if i[0] == '13'] links += [{ 'source': 'videowood', 'url': self.embed_link % i[1] } for i in r if i[0] == '12'] for i in links: sources.append({ 'source': i['source'], 'quality': quality, 'provider': 'Muchmovies', 'url': i['url'] + head_link }) return sources except Exception as e: control.log('ERROR MUCH %s' % e) return sources
def logger(msg): control.log(msg)
def resolve(self, url): try: myurl = url.split('?') control.log('ALLTUBE RESOLVE URL %s' % str(myurl)) control.log('ALLTUBE RESOLVE URL %s' % str(myurl[-1].decode('base64'))) try: mycookie = client.request(myurl[0], output='cookie', error=True) tmp = 'ZGVmIGFiYyhpbl9hYmMpOg0KICAgIGRlZiByaGV4KGEpOg0KICAgICAgICBoZXhfY2hyID0gJzAxMjM0NTY3ODlhYmNkZWYnDQogICAgICAgIHJldCA9ICcnDQogICAgICAgIGZvciBpIGluIHJhbmdlKDQpOg0KICAgICAgICAgICAgcmV0ICs9IGhleF9jaHJbKGEgPj4gKGkgKiA4ICsgNCkpICYgMHgwRl0gKyBoZXhfY2hyWyhhID4+IChpICogOCkpICYgMHgwRl0NCiAgICAgICAgcmV0dXJuIHJldA0KICAgIGRlZiBoZXgodGV4dCk6DQogICAgICAgIHJldCA9ICcnDQogICAgICAgIGZvciBpIGluIHJhbmdlKGxlbih0ZXh0KSk6DQogICAgICAgICAgICByZXQgKz0gcmhleCh0ZXh0W2ldKQ0KICAgICAgICByZXR1cm4gcmV0DQogICAgZGVmIGFkZDMyKGEsIGIpOg0KICAgICAgICByZXR1cm4gKGEgKyBiKSAmIDB4RkZGRkZGRkYNCiAgICBkZWYgY21uKGEsIGIsIGMsIGQsIGUsIGYpOg0KICAgICAgICBiID0gYWRkMzIoYWRkMzIoYiwgYSksIGFkZDMyKGQsIGYpKTsNCiAgICAgICAgcmV0dXJuIGFkZDMyKChiIDw8IGUpIHwgKGIgPj4gKDMyIC0gZSkpLCBjKQ0KICAgIGRlZiBmZihhLCBiLCBjLCBkLCBlLCBmLCBnKToNCiAgICAgICAgcmV0dXJuIGNtbigoYiAmIGMpIHwgKCh+YikgJiBkKSwgYSwgYiwgZSwgZiwgZykNCiAgICBkZWYgZ2coYSwgYiwgYywgZCwgZSwgZiwgZyk6DQogICAgICAgIHJldHVybiBjbW4oKGIgJiBkKSB8IChjICYgKH5kKSksIGEsIGIsIGUsIGYsIGcpDQogICAgZGVmIGhoKGEsIGIsIGMsIGQsIGUsIGYsIGcpOg0KICAgICAgICByZXR1cm4gY21uKGIgXiBjIF4gZCwgYSwgYiwgZSwgZiwgZykNCiAgICBkZWYgaWkoYSwgYiwgYywgZCwgZSwgZiwgZyk6DQogICAgICAgIHJldHVybiBjbW4oYyBeIChiIHwgKH5kKSksIGEsIGIsIGUsIGYsIGcpDQogICAgZGVmIGNyeXB0Y3ljbGUodGFiQSwgdGFiQik6DQogICAgICAgIGEgPSB0YWJBWzBdDQogICAgICAgIGIgPSB0YWJBWzFdDQogICAgICAgIGMgPSB0YWJBWzJdDQogICAgICAgIGQgPSB0YWJBWzNdDQogICAgICAgIGEgPSBmZihhLCBiLCBjLCBkLCB0YWJCWzBdLCA3LCAtNjgwODc2OTM2KTsNCiAgICAgICAgZCA9IGZmKGQsIGEsIGIsIGMsIHRhYkJbMV0sIDEyLCAtMzg5NTY0NTg2KTsNCiAgICAgICAgYyA9IGZmKGMsIGQsIGEsIGIsIHRhYkJbMl0sIDE3LCA2MDYxMDU4MTkpOw0KICAgICAgICBiID0gZmYoYiwgYywgZCwgYSwgdGFiQlszXSwgMjIsIC0xMDQ0NTI1MzMwKTsNCiAgICAgICAgYSA9IGZmKGEsIGIsIGMsIGQsIHRhYkJbNF0sIDcsIC0xNzY0MTg4OTcpOw0KICAgICAgICBkID0gZmYoZCwgYSwgYiwgYywgdGFiQls1XSwgMTIsIDEyMDAwODA0MjYpOw0KICAgICAgICBjID0gZmYoYywgZCwgYSwgYiwgdGFiQls2XSwgMTcsIC0xNDczMjMxMzQxKTsNCiAgICAgICAgYiA9IGZmKGIsIGMsIGQsIGEsIHRhYkJbN10sIDIyLCAtNDU3MDU5ODMpOw0KICAgICAgICBhID0gZmYoYSwgYiwgYywgZCwgdGFiQls4XSwgNywgMTc3MDAzNTQxNik7DQogICAgICAgIGQgPSBmZihkLCBhLCBiLCBjLCB0YWJCWzldLCAxMiwgLTE5NTg0MTQ0MTcpOw0KICAgICAgICBjID0gZmYoYywgZCwgYSwgYiwgdGFiQlsxMF0sIDE3LCAtNDIwNjMpOw0KICAgICAgICBiID0gZmYoYiwgYywgZCwgYSwgdGFiQlsxMV0sIDIyLCAtMTk5MDQwNDE2Mik7DQogICAgICAgIGEgPSBmZihhLCBiLCBjLCBkLCB0YWJCWzEyXSwgNywgMTgwNDYwMzY4Mik7DQogICAgICAgIGQgPSBmZihkLCBhLCBiLCBjLCB0YWJCWzEzXSwgMTIsIC00MDM0MTEwMSk7DQogICAgICAgIGMgPSBmZihjLCBkLCBhLCBiLCB0YWJCWzE0XSwgMTcsIC0xNTAyMDAyMjkwKTsNCiAgICAgICAgYiA9IGZmKGIsIGMsIGQsIGEsIHRhYkJbMTVdLCAyMiwgMTIzNjUzNTMyOSk7DQogICAgICAgIGEgPSBnZyhhLCBiLCBjLCBkLCB0YWJCWzFdLCA1LCAtMTY1Nzk2NTEwKTsNCiAgICAgICAgZCA9IGdnKGQsIGEsIGIsIGMsIHRhYkJbNl0sIDksIC0xMDY5NTAxNjMyKTsNCiAgICAgICAgYyA9IGdnKGMsIGQsIGEsIGIsIHRhYkJbMTFdLCAxNCwgNjQzNzE3NzEzKTsNCiAgICAgICAgYiA9IGdnKGIsIGMsIGQsIGEsIHRhYkJbMF0sIDIwLCAtMzczODk3MzAyKTsNCiAgICAgICAgYSA9IGdnKGEsIGIsIGMsIGQsIHRhYkJbNV0sIDUsIC03MDE1NTg2OTEpOw0KICAgICAgICBkID0gZ2coZCwgYSwgYiwgYywgdGFiQlsxMF0sIDksIDM4MDE2MDgzKTsNCiAgICAgICAgYyA9IGdnKGMsIGQsIGEsIGIsIHRhYkJbMTVdLCAxNCwgLTY2MDQ3ODMzNSk7DQogICAgICAgIGIgPSBnZyhiLCBjLCBkLCBhLCB0YWJCWzRdLCAyMCwgLTQwNTUzNzg0OCk7DQogICAgICAgIGEgPSBnZyhhLCBiLCBjLCBkLCB0YWJCWzldLCA1LCA1Njg0NDY0MzgpOw0KICAgICAgICBkID0gZ2coZCwgYSwgYiwgYywgdGFiQlsxNF0sIDksIC0xMDE5ODAzNjkwKTsNCiAgICAgICAgYyA9IGdnKGMsIGQsIGEsIGIsIHRhYkJbM10sIDE0LCAtMTg3MzYzOTYxKTsNCiAgICAgICAgYiA9IGdnKGIsIGMsIGQsIGEsIHRhYkJbOF0sIDIwLCAxMTYzNTMxNTAxKTsNCiAgICAgICAgYSA9IGdnKGEsIGIsIGMsIGQsIHRhYkJbMTNdLCA1LCAtMTQ0NDY4MTQ2Nyk7DQogICAgICAgIGQgPSBnZyhkLCBhLCBiLCBjLCB0YWJCWzJdLCA5LCAtNTE0MDM3ODQpOw0KICAgICAgICBjID0gZ2coYywgZCwgYSwgYiwgdGFiQls3XSwgMTQsIDE3MzUzMjg0NzMpOw0KICAgICAgICBiID0gZ2coYiwgYywgZCwgYSwgdGFiQlsxMl0sIDIwLCAtMTkyNjYwNzczNCk7DQogICAgICAgIGEgPSBoaChhLCBiLCBjLCBkLCB0YWJCWzVdLCA0LCAtMzc4NTU4KTsNCiAgICAgICAgZCA9IGhoKGQsIGEsIGIsIGMsIHRhYkJbOF0sIDExLCAtMjAyMjU3NDQ2Myk7DQogICAgICAgIGMgPSBoaChjLCBkLCBhLCBiLCB0YWJCWzExXSwgMTYsIDE4MzkwMzA1NjIpOw0KICAgICAgICBiID0gaGgoYiwgYywgZCwgYSwgdGFiQlsxNF0sIDIzLCAtMzUzMDk1NTYpOw0KICAgICAgICBhID0gaGgoYSwgYiwgYywgZCwgdGFiQlsxXSwgNCwgLTE1MzA5OTIwNjApOw0KICAgICAgICBkID0gaGgoZCwgYSwgYiwgYywgdGFiQls0XSwgMTEsIDEyNzI4OTMzNTMpOw0KICAgICAgICBjID0gaGgoYywgZCwgYSwgYiwgdGFiQls3XSwgMTYsIC0xNTU0OTc2MzIpOw0KICAgICAgICBiID0gaGgoYiwgYywgZCwgYSwgdGFiQlsxMF0sIDIzLCAtMTA5NDczMDY0MCk7DQogICAgICAgIGEgPSBoaChhLCBiLCBjLCBkLCB0YWJCWzEzXSwgNCwgNjgxMjc5MTc0KTsNCiAgICAgICAgZCA9IGhoKGQsIGEsIGIsIGMsIHRhYkJbMF0sIDExLCAtMzU4NTM3MjIyKTsNCiAgICAgICAgYyA9IGhoKGMsIGQsIGEsIGIsIHRhYkJbM10sIDE2LCAtNzIyNTIxOTc5KTsNCiAgICAgICAgYiA9IGhoKGIsIGMsIGQsIGEsIHRhYkJbNl0sIDIzLCA3NjAyOTE4OSk7DQogICAgICAgIGEgPSBoaChhLCBiLCBjLCBkLCB0YWJCWzldLCA0LCAtNjQwMzY0NDg3KTsNCiAgICAgICAgZCA9IGhoKGQsIGEsIGIsIGMsIHRhYkJbMTJdLCAxMSwgLTQyMTgxNTgzNSk7DQogICAgICAgIGMgPSBoaChjLCBkLCBhLCBiLCB0YWJCWzE1XSwgMTYsIDUzMDc0MjUyMCk7DQogICAgICAgIGIgPSBoaChiLCBjLCBkLCBhLCB0YWJCWzJdLCAyMywgLTk5NTMzODY1MSk7DQogICAgICAgIGEgPSBpaShhLCBiLCBjLCBkLCB0YWJCWzBdLCA2LCAtMTk4NjMwODQ0KTsNCiAgICAgICAgZCA9IGlpKGQsIGEsIGIsIGMsIHRhYkJbN10sIDEwLCAxMTI2ODkxNDE1KTsNCiAgICAgICAgYyA9IGlpKGMsIGQsIGEsIGIsIHRhYkJbMTRdLCAxNSwgLTE0MTYzNTQ5MDUpOw0KICAgICAgICBiID0gaWkoYiwgYywgZCwgYSwgdGFiQls1XSwgMjEsIC01NzQzNDA1NSk7DQogICAgICAgIGEgPSBpaShhLCBiLCBjLCBkLCB0YWJCWzEyXSwgNiwgMTcwMDQ4NTU3MSk7DQogICAgICAgIGQgPSBpaShkLCBhLCBiLCBjLCB0YWJCWzNdLCAxMCwgLTE4OTQ5ODY2MDYpOw0KICAgICAgICBjID0gaWkoYywgZCwgYSwgYiwgdGFiQlsxMF0sIDE1LCAtMTA1MTUyMyk7DQogICAgICAgIGIgPSBpaShiLCBjLCBkLCBhLCB0YWJCWzFdLCAyMSwgLTIwNTQ5MjI3OTkpOw0KICAgICAgICBhID0gaWkoYSwgYiwgYywgZCwgdGFiQls4XSwgNiwgMTg3MzMxMzM1OSk7DQogICAgICAgIGQgPSBpaShkLCBhLCBiLCBjLCB0YWJCWzE1XSwgMTAsIC0zMDYxMTc0NCk7DQogICAgICAgIGMgPSBpaShjLCBkLCBhLCBiLCB0YWJCWzZdLCAxNSwgLTE1NjAxOTgzODApOw0KICAgICAgICBiID0gaWkoYiwgYywgZCwgYSwgdGFiQlsxM10sIDIxLCAxMzA5MTUxNjQ5KTsNCiAgICAgICAgYSA9IGlpKGEsIGIsIGMsIGQsIHRhYkJbNF0sIDYsIC0xNDU1MjMwNzApOw0KICAgICAgICBkID0gaWkoZCwgYSwgYiwgYywgdGFiQlsxMV0sIDEwLCAtMTEyMDIxMDM3OSk7DQogICAgICAgIGMgPSBpaShjLCBkLCBhLCBiLCB0YWJCWzJdLCAxNSwgNzE4Nzg3MjU5KTsNCiAgICAgICAgYiA9IGlpKGIsIGMsIGQsIGEsIHRhYkJbOV0sIDIxLCAtMzQzNDg1NTUxKTsNCiAgICAgICAgdGFiQVswXSA9IGFkZDMyKGEsIHRhYkFbMF0pOw0KICAgICAgICB0YWJBWzFdID0gYWRkMzIoYiwgdGFiQVsxXSk7DQogICAgICAgIHRhYkFbMl0gPSBhZGQzMihjLCB0YWJBWzJdKTsNCiAgICAgICAgdGFiQVszXSA9IGFkZDMyKGQsIHRhYkFbM10pDQogICAgZGVmIGNyeXB0YmxrKHRleHQpOg0KICAgICAgICByZXQgPSBbXQ0KICAgICAgICBmb3IgaSBpbiByYW5nZSgwLCA2NCwgNCk6DQogICAgICAgICAgICByZXQuYXBwZW5kKG9yZCh0ZXh0W2ldKSArIChvcmQodGV4dFtpKzFdKSA8PCA4KSArIChvcmQodGV4dFtpKzJdKSA8PCAxNikgKyAob3JkKHRleHRbaSszXSkgPDwgMjQpKQ0KICAgICAgICByZXR1cm4gcmV0DQogICAgZGVmIGpjc3lzKHRleHQpOg0KICAgICAgICB0eHQgPSAnJzsNCiAgICAgICAgdHh0TGVuID0gbGVuKHRleHQpDQogICAgICAgIHJldCA9IFsxNzMyNTg0MTkzLCAtMjcxNzMzODc5LCAtMTczMjU4NDE5NCwgMjcxNzMzODc4XQ0KICAgICAgICBpID0gNjQNCiAgICAgICAgd2hpbGUgaSA8PSBsZW4odGV4dCk6DQogICAgICAgICAgICBjcnlwdGN5Y2xlKHJldCwgY3J5cHRibGsodGV4dFsnc3Vic3RyaW5nJ10oaSAtIDY0LCBpKSkpDQogICAgICAgICAgICBpICs9IDY0DQogICAgICAgIHRleHQgPSB0ZXh0W2kgLSA2NDpdDQogICAgICAgIHRtcCA9IFswLCAwLCAwLCAwLCAwLCAwLCAwLCAwLCAwLCAwLCAwLCAwLCAwLCAwLCAwLCAwXQ0KICAgICAgICBpID0gMA0KICAgICAgICB3aGlsZSBpIDwgbGVuKHRleHQpOg0KICAgICAgICAgICAgdG1wW2kgPj4gMl0gfD0gb3JkKHRleHRbaV0pIDw8ICgoaSAlIDQpIDw8IDMpDQogICAgICAgICAgICBpICs9IDENCiAgICAgICAgdG1wW2kgPj4gMl0gfD0gMHg4MCA8PCAoKGkgJSA0KSA8PCAzKQ0KICAgICAgICBpZiBpID4gNTU6DQogICAgICAgICAgICBjcnlwdGN5Y2xlKHJldCwgdG1wKTsNCiAgICAgICAgICAgIGZvciBpIGluIHJhbmdlKDE2KToNCiAgICAgICAgICAgICAgICB0bXBbaV0gPSAwDQogICAgICAgIHRtcFsxNF0gPSB0eHRMZW4gKiA4Ow0KICAgICAgICBjcnlwdGN5Y2xlKHJldCwgdG1wKTsNCiAgICAgICAgcmV0dXJuIHJldA0KICAgIGRlZiByZXplZG93YSh0ZXh0KToNCiAgICAgICAgcmV0dXJuIGhleChqY3N5cyh0ZXh0KSkNCiAgICByZXR1cm4gcmV6ZWRvd2EoaW5fYWJjKQ0K' tmp = base64.b64decode(tmp) _myFun = compile(tmp, '', 'exec') vGlobals = { "__builtins__": None, 'len': len, 'list': list, 'ord': ord, 'range': range } vLocals = {'abc': ''} exec _myFun in vGlobals, vLocals myFun1 = vLocals['abc'] except Exception as e: control.log('Altube err1 [%s]' % e) control.log('ALLTUBE cokie URL %s' % str(mycookie)) data = client.request(urlparse.urljoin(self.base_link, '/jsverify.php?op=tag'), cookie=mycookie) try: data = client.byteify(json.loads(data)) d = {} for i in range(len(data['key'])): d[data['key'][i]] = data['hash'][i] tmp = '' for k in sorted(d.keys()): tmp += d[k] mycookie = 'tmvh=%s;%s' % (myFun1(tmp), mycookie) except Exception as e: control.log('Altube err3 [%s]' % e) control.log("XXXXXXXXXXX %s" % mycookie) # http://alltube.tv/special.php?hosting=openload&id=oU9oQLz4F-U&width=673&height=471.09999999999997 link = client.request(myurl[-1].decode('base64') + '&width=673&height=471.09999999999997', cookie=mycookie) match = re.search('<iframe src="(.+?)"', link) if match: control.log('Altube link [%s]' % match.group(1)) linkVideo = match.group(1) return resolvers.request(linkVideo) return except: return
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) url = (data['title'].translate(None, '\/:*?"\'<>|!,')).replace(' ', '-').replace('--', '-').lower() url = urlparse.urljoin(self.base_link, self.watch_link % url) r = client.request(url, output='geturl') if r == None: raise Exception() r = result= client.request(url) r = re.sub(r'[^\x00-\x7F]+',' ', r) y = re.findall('Date\s*:\s*.+?>.+?(\d{4})', r) y = y[0] if len(y) > 0 else None if not (data['imdb'] in r or data['year'] == y): raise Exception() q = client.parseDOM(r, 'title') q = q[0] if len(q) > 0 else None quality = '1080p' if ' 1080' in q else 'HD' r = client.parseDOM(r, 'div', attrs = {'id': '5throw'})[0] r = client.parseDOM(r, 'a', ret='href', attrs = {'rel': 'nofollow'}) links = [] for url in r: try: if 'yadi.sk' in url: url = resolvers.request(url) elif 'mail.ru' in url: url = resolvers.request(url) else: raise Exception() if url == None: raise Exception() links += [{'source': 'cdn', 'url': url, 'quality': quality}] except: pass try: r = client.parseDOM(result, 'iframe', ret='src') r = [i for i in r if 'pasep' in i][0] for i in range(0, 4): try: r = client.request(r) r = re.sub(r'[^\x00-\x7F]+',' ', r) r = client.parseDOM(r, 'iframe', ret='src')[0] if 'google' in r: break except: break if not 'google' in r: raise Exception() #url = directstream.google(r) url = r for i in url: try: links += [{'source': 'gvideo', 'url': i['url'], 'quality': i['quality']}] except: pass except: pass for i in links: sources.append({'source': i['source'], 'quality': i['quality'], 'provider': 'Dayt', 'url': i['url']}) return sources except Exception as e: control.log('ERROR dayyt %s' % e) return sources
def resolve(self, url): control.log("##pelispedia %s " % url) return url
def get_sources(self, url, hosthdDict, hostDict, locDict): control.log("><><><><> PELISPEDIA SOURCE %s" % url) try: sources = [] if url == None: return sources r = urlparse.urljoin(self.base_link, url) result = client.request(r) f = client.parseDOM(result, 'iframe', ret='src') f = [i for i in f if 'iframe' in i][0] result = client.request(f, headers={'Referer': r}) r = client.parseDOM(result, 'div', attrs={'id': 'botones'})[0] r = client.parseDOM(r, 'a', ret='href') r = [(i, urlparse.urlparse(i).netloc) for i in r] r = [i[0] for i in r if 'pelispedia' in i[1]] links = [] for u in r: result = client.request(u, headers={'Referer': f}) try: url = re.findall('sources\s*:\s*\[(.+?)\]', result)[0] url = re.findall('"file"\s*:\s*"(.+?)"', url) url = [i.split()[0].replace('\\/', '/') for i in url] for i in url: try: links.append({ 'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'url': i }) except: pass except: pass try: headers = { 'X-Requested-With': 'XMLHttpRequest', 'Referer': u } post = re.findall('gkpluginsphp.*?link\s*:\s*"([^"]+)', result)[0] post = urllib.urlencode({'link': post}) url = urlparse.urljoin( self.base_link, '/Pe_flv_flsh/plugins/gkpluginsphp.php') url = client.request(url, post=post, headers=headers) url = json.loads(url)['link'] links.append({ 'source': 'gvideo', 'quality': 'HD', 'url': url }) except: pass try: headers = {'X-Requested-With': 'XMLHttpRequest'} post = re.findall('var\s+parametros\s*=\s*"([^"]+)', result)[0] post = urlparse.parse_qs( urlparse.urlparse(post).query)['pic'][0] post = urllib.urlencode({ 'sou': 'pic', 'fv': '21', 'url': post }) url = urlparse.urljoin( self.base_link, '/Pe_Player_Html5/pk/pk/plugins/protected.php') url = client.request(url, post=post, headers=headers) url = json.loads(url)[0]['url'] links.append({ 'source': 'cdn', 'quality': 'HD', 'url': url }) except: pass for i in links: sources.append({ 'source': i['source'], 'quality': i['quality'], 'provider': 'Pelispedia', 'url': i['url'] }) return sources except: return sources
def get_show(self, imdb, tvdb, tvshowtitle, year): try: query = self.search_link % (str(int(year) - 1), str(int(year) + 1), urllib.quote_plus(tvshowtitle)) print query result = '' result = client.request(urlparse.urljoin(self.base_link, query)) result = result.decode('iso-8859-1').encode('utf-8') result = client.parseDOM(result, 'div', attrs={'class': 'episode-summary'})[0] result = client.parseDOM(result, 'tr') tvshowtitle = cleantitle.tv(tvshowtitle) years = [ '(%s)' % str(year), '(%s)' % str(int(year) + 1), '(%s)' % str(int(year) - 1) ] result = [(re.compile( 'href=[\'|\"|\s|\<]*(.+?)[\'|\"|\s|\>]').findall(i)[0], client.parseDOM(i, 'a')[-1]) for i in result] result = [(i[0], re.sub('<.+?>|</.+?>', '', i[1])) for i in result] result = [i for i in result if any(x in i[1] for x in years)] result = [(client.replaceHTMLCodes(i[0]), i[1]) for i in result] try: result = [ (urlparse.parse_qs(urlparse.urlparse(i[0]).query)['u'][0], i[1]) for i in result ] except: pass result = [(urlparse.urlparse(i[0]).path, i[1]) for i in result] #print result,tvshowtitle,cleantitle.tv(result[0][1]) match = [ i[0] for i in result if cleantitle.tv(i[1]) in tvshowtitle ] print match match2 = [i[0] for i in result] match2 = [x for y, x in enumerate(match2) if x not in match2[:y]] if match2 == []: return for i in match2[:5]: try: if len(match) > 0: url = match[0] break result = client.request(self.base_link + i, headers=self.headers) if str(imdb) in str(result): url = i break except: pass url = url.encode('utf-8') return url except Exception as e: control.log('ERROR watchser GET %s' % e) return None
def resolve(self, url): control.log('>>>>>>>>>>>>>>>>>> Resolve ALLUC %s' % url) return resolvers.request(url)
def addItem(self, name, title, year, imdb, tmdb, tvdb, tvrage, season, episode, tvshowtitle, alter, date, meta): try: if imdb == '0': imdb = '0000000' imdb = 'tt' + re.sub('[^0-9]', '', str(imdb)) content = 'movie' if tvshowtitle == None else 'episode' self.sources = self.getSources(name, title, year, imdb, tmdb, tvdb, tvrage, season, episode, tvshowtitle, alter, date) if self.sources == []: raise Exception() self.progressDialog = control.progressDialog self.progressDialog.create(control.addonInfo('name'), '') self.progressDialog.update(0, control.lang(30515).encode('utf-8'), str(' ')) self.sources = self.sourcesFilter() infoMenu = control.lang(30502).encode('utf-8') if content == 'movie' else control.lang(30503).encode('utf-8') sysmeta = urllib.quote_plus(meta) sysaddon = sys.argv[0] meta = json.loads(meta) poster = meta['poster'] if 'poster' in meta else '0' banner = meta['banner'] if 'banner' in meta else '0' thumb = meta['thumb'] if 'thumb' in meta else poster fanart = meta['fanart'] if 'fanart' in meta else '0' if poster == '0': poster = control.addonPoster() if banner == '0' and poster == '0': banner = control.addonBanner() elif banner == '0': banner = poster if thumb == '0' and fanart == '0': thumb = control.addonFanart() elif thumb == '0': thumb = fanart if control.setting('fanart') == 'true' and not fanart == '0': pass else: fanart = control.addonFanart() for i in range(len(self.sources)): try: if self.progressDialog.iscanceled(): break self.progressDialog.update(int((100 / float(len(self.sources))) * i)) url, label, provider = self.sources[i]['url'], self.sources[i]['label'], self.sources[i]['provider'] sysname, sysurl, sysimage, sysprovider = urllib.quote_plus(name), urllib.quote_plus(url), urllib.quote_plus(poster), urllib.quote_plus(provider) syssource = urllib.quote_plus(json.dumps([self.sources[i]])) if i == 0: query = 'action=playItem&content=%s&name=%s&year=%s&imdb=%s&tvdb=%s&source=%s&meta=%s' % (content, sysname, year, imdb, tvdb, syssource, sysmeta) else: query = 'action=playItem&content=%s&name=%s&year=%s&imdb=%s&tvdb=%s&source=%s' % (content, sysname, year, imdb, tvdb, syssource) cm = [] cm.append((control.lang(30504).encode('utf-8'), 'RunPlugin(%s?action=queueItem)' % sysaddon)) cm.append((control.lang(30505).encode('utf-8'), 'RunPlugin(%s?action=download&name=%s&image=%s&url=%s&provider=%s)' % (sysaddon, sysname, sysimage, sysurl, sysprovider))) cm.append((infoMenu, 'Action(Info)')) cm.append((control.lang(30506).encode('utf-8'), 'RunPlugin(%s?action=refresh)' % sysaddon)) cm.append((control.lang(30507).encode('utf-8'), 'RunPlugin(%s?action=openSettings)' % sysaddon)) cm.append((control.lang(30508).encode('utf-8'), 'RunPlugin(%s?action=openPlaylist)' % sysaddon)) item = control.item(label=label, iconImage='DefaultVideo.png', thumbnailImage=thumb) try: item.setArt({'poster': poster, 'tvshow.poster': poster, 'season.poster': poster, 'banner': banner, 'tvshow.banner': banner, 'season.banner': banner}) except: pass item.setInfo(type='Video', infoLabels = meta) if not fanart == None: item.setProperty('Fanart_Image', fanart) item.setProperty('Video', 'true') #item.setProperty('IsPlayable', 'true') item.addContextMenuItems(cm, replaceItems=True) control.addItem(handle=int(sys.argv[1]), url='%s?%s' % (sysaddon, query), listitem=item, isFolder=False) except Exception as e: control.log('ERROR Sources.addItem %s' % e) pass control.content(int(sys.argv[1]), 'files') control.directory(int(sys.argv[1]), cacheToDisc=True) try: self.progressDialog.close() except: pass except: control.infoDialog(control.lang(30501).encode('utf-8')) try: self.progressDialog.close() except: pass
def getSources(self, name, title, year, imdb, tmdb, tvdb, tvrage, season, episode, tvshowtitle, alter, date): sourceDict = [] for package, name, is_pkg in pkgutil.walk_packages(__path__): sourceDict.append((name, is_pkg)) sourceDict = [i[0] for i in sourceDict if i[1] == False] content = 'movie' if tvshowtitle == None else 'episode' if content == 'movie': sourceDict = [i for i in sourceDict if i.endswith(('_mv', '_mv_tv'))] try: sourceDict = [(i, control.setting(re.sub('_mv_tv$|_mv$|_tv$', '', i))) for i in sourceDict] except: sourceDict = [(i, 'true') for i in sourceDict] else: sourceDict = [i for i in sourceDict if i.endswith(('_tv', '_mv_tv'))] try: sourceDict = [(i, control.setting(re.sub('_mv_tv$|_mv$|_tv$', '', i) + '_tv')) for i in sourceDict] except: sourceDict = [(i, 'true') for i in sourceDict] threads = [] control.makeFile(control.dataPath) self.sourceFile = control.sourcescacheFile sourceDict = [i[0] for i in sourceDict if i[1] == 'true'] if content == 'movie': title = cleantitle.normalize(title) for source in sourceDict: try: threads.append(workers.Thread(self.getMovieSource, title, year, imdb, re.sub('_mv_tv$|_mv$|_tv$', '', source), __import__(source, globals(), locals(), [], -1).source())) except Exception as e: control.log('Source getSources %s ERROR %s' % (source,e)) pass else: tvshowtitle = cleantitle.normalize(tvshowtitle) season, episode = alterepisode.alterepisode().get(imdb, tmdb, tvdb, tvrage, season, episode, alter, title, date) for source in sourceDict: try: threads.append(workers.Thread(self.getEpisodeSource, title, year, imdb, tvdb, season, episode, tvshowtitle, date, re.sub('_mv_tv$|_mv$|_tv$', '', source), __import__(source, globals(), locals(), [], -1).source())) except Exception as e: control.log('Source getSources %s ERROR %s' % (source, e)) pass try: timeout = int(control.setting('sources_timeout_40')) except: timeout = 40 [i.start() for i in threads] control.idle() sourceLabel = [re.sub('_mv_tv$|_mv$|_tv$', '', i) for i in sourceDict] sourceLabel = [re.sub('v\d+$', '', i).upper() for i in sourceLabel] self.progressDialog = control.progressDialog self.progressDialog.create(control.addonInfo('name'), '') self.progressDialog.update(0) string1 = control.lang(30512).encode('utf-8') string2 = control.lang(30513).encode('utf-8') string3 = control.lang(30514).encode('utf-8') for i in range(0, timeout * 2): try: if xbmc.abortRequested == True: return sys.exit() try: info = [sourceLabel[int(re.sub('[^0-9]', '', str(x.getName()))) - 1] for x in threads if x.is_alive() == True] except: info = [] try: if self.progressDialog.iscanceled(): break string4 = string1 + ' %s' % str(int(i * 0.5)) if len(info) > 5: string5 = string3 + ' %s' % str(len(info)) else: string5 = string3 + ' %s' % str(info).translate(None, "[]'") self.progressDialog.update(int((100 / float(len(threads))) * len([x for x in threads if x.is_alive() == False])), str(string4), str(string5)) except Exception as e: string4 = string2 + ' %s' % str(int(i * 0.5)) if len(info) > 5: string5 = string3 + ' %s' % str(len(info)) else: string5 = str(info).translate(None, "[]'") self.progressDialog.update(int((100 / float(len(threads))) * len([x for x in threads if x.is_alive() == False])), str(string4), str(string5)) is_alive = [x.is_alive() for x in threads] if all(x == False for x in is_alive): break time.sleep(0.5) except Exception as e: control.log('ERROR SOURCES2 %s' % e) pass try: self.progressDialog.close() except: pass time.sleep(0.5) return self.sources
def solve(url, user_agent=None, wait=True): print("--- --- ", user_agent) if user_agent is None: user_agent = USER_AGENT headers = {'User-Agent': user_agent, 'Referer': url} if cj is not None: if os.path.isfile(cookie_file): cj.load(cookie_file, ignore_discard=True) opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj)) urllib2.install_opener(opener) request = urllib2.Request(url) for key in headers: request.add_header(key, headers[key]) try: response = urllib2.urlopen(request) html = response.read() except urllib2.HTTPError as e: html = e.read() #print(">>>>>--- --- ",html) solver_pattern = 'var t,r,a,f,\s*([^=]+)={"([^"]+)":([^}]+)};.+challenge-form\'\);.*?\n.*?;(.*?);a\.value' vc_pattern = 'input type="hidden" name="jschl_vc" value="([^"]+)' pass_pattern = 'input type="hidden" name="pass" value="([^"]+)' init_match = re.search(solver_pattern, html, re.DOTALL) vc_match = re.search(vc_pattern, html) pass_match = re.search(pass_pattern, html) if not init_match or not vc_match or not pass_match: control.log( "Couldn't find attribute: init: |%s| vc: |%s| pass: |%s| No cloudflare check?" % (init_match, vc_match, pass_match), LOGWARNING) response.close() return html init_dict, init_var, init_equation, equations = init_match.groups() vc = vc_match.group(1) password = pass_match.group(1) # log("VC is: %s" % (vc), xbmc.LOGDEBUG) varname = (init_dict, init_var) result = int(solve_equation(init_equation.rstrip())) control.log('Initial value: |%s| Result: |%s|' % (init_equation, result), LOGDEBUG) for equation in equations.split(';'): equation = equation.rstrip() if equation[:len('.'.join(varname))] != '.'.join(varname): control.log( 'Equation does not start with varname |%s|' % (equation), LOGDEBUG) else: equation = equation[len('.'.join(varname)):] expression = equation[2:] operator = equation[0] if operator not in ['+', '-', '*', '/']: control.log('Unknown operator: |%s|' % (equation), LOGWARNING) continue result = int( str(eval(str(result) + operator + str(solve_equation(expression))))) control.log('intermediate: %s = %s' % (equation, result), LOGDEBUG) scheme = urlparse.urlparse(url).scheme domain = urlparse.urlparse(url).hostname result += len(domain) control.log('Final Result: |%s|' % (result), LOGDEBUG) if wait: control.log('Sleeping for 5 Seconds', LOGDEBUG) xbmc.sleep(5000) url = '%s://%s/cdn-cgi/l/chk_jschl?jschl_vc=%s&jschl_answer=%s&pass=%s' % ( scheme, domain, vc, result, password) control.log('url: %s' % (url), LOGDEBUG) request = urllib2.Request(url) for key in headers: request.add_header(key, headers[key]) try: opener = urllib2.build_opener(NoRedirection) urllib2.install_opener(opener) response = urllib2.urlopen(request) while response.getcode() in [301, 302, 303, 307]: if cj is not None: cj.extract_cookies(response, request) request = urllib2.Request(response.info().getheader('location')) print("Location:", response.info().getheader('location')) for key in headers: request.add_header(key, headers[key]) if cj is not None: cj.add_cookie_header(request) response = urllib2.urlopen(request) final = response.read() response.close() except urllib2.HTTPError as e: control.log('CloudFlare Error: %s on url: %s' % (e.code, url), LOGWARNING) return False if cj is not None: cj.save(cookie_file) return final
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources data = os.path.join(control.dataPath, 'serieswatch.db') download = True try: download = abs( datetime.datetime.fromtimestamp(os.path.getmtime(data)) - (datetime.datetime.now())) > datetime.timedelta(days=7) except: pass if download == True: result = client.request(base64.b64decode(self.data_link)) print(len(result)) control.log(">>>>>>>>>>>>>>> ONEC Downloading") zip = zipfile.ZipFile(StringIO.StringIO(result)) zip.extractall(control.dataPath) zip.close() dbcon = database.connect(data) dbcur = dbcon.cursor() content = re.compile('(.+?)\sS\d*E\d*$').findall(url) if len(content) == 0: title, year = re.compile('(.+?) (\d{4})$').findall(url)[0] title = cleantitle.movie(title) dbcur.execute("SELECT * FROM movies WHERE title like '%" + title + "%' and title like '%" + year + "%'") result = dbcur.fetchall() else: tvshowtitle, season, episode = re.compile( '(.+?)\sS(\d*)E(\d*)$').findall(url)[0] tvshowtitle = cleantitle.movie(tvshowtitle) myses = 's%se%s' % (season, episode) control.log(">>>>>>>>>>>>>>> ONEC %s season |%s|" % (tvshowtitle, myses)) mysql = "SELECT * FROM movies WHERE title like '%" + tvshowtitle + "%' and title like '%" + myses + "%'" control.log(">>>>>>>>>>>>>>> ONEC SQL |%s|" % (mysql)) dbcur.execute(mysql) result = dbcur.fetchall() for myurl in result: result = myurl[1] if any(word in result.lower() for word in [ 'camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'ts' ]): quality = 'CAM' elif '1080' in result: quality = '1080p' elif '720p' in result: quality = 'HD' else: quality = 'SD' links = myurl[0] #links = [i for i in links if i.startswith('http')] if not any(word in links.lower() for word in ['mp3', 'farsi', 'ganool']): #print("Mamy", links) sources.append({ 'source': 'Serieswatch', 'quality': quality, 'provider': 'Serieswatch', 'url': links }) return sources except: return
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources u = urlparse.urljoin(self.base_link, url) r = client.request(u) #control.log('R %s' % r) r = re.findall("load_player\(\s*'([^']+)'\s*,\s*'?(\d+)\s*'?", r) r = list(set(r)) r = [i for i in r if i[1] == '0' or int(i[1]) >= 720] control.log('R %s' % r) links = [] for p in r: try: print('P', p) headers = { 'X-Requested-With': 'XMLHttpRequest', 'Referer': u } player = urlparse.urljoin(self.base_link, '/ajax/movie/load_player') post = urllib.urlencode({'id': p[0], 'quality': p[1]}) control.sleep(220) result = client.request(player, post=post, headers=headers) control.log('result %s' % result) frame = client.parseDOM(result, 'iframe', ret='src') embed = client.parseDOM(result, 'embed', ret='flashvars') if frame: if 'player.php' in frame[0]: frame = client.parseDOM(result, 'input', ret='value', attrs={'type': 'hidden'})[0] headers = { 'Referer': urlparse.urljoin(self.base_link, frame[0]) } url = client.request(frame, headers=headers, output='geturl') links += [{ 'source': 'gvideo', 'url': url, 'quality': client.googletag(url)[0]['quality'] }] elif 'openload.' in frame[0]: links += [{ 'source': 'openload.co', 'url': frame[0], 'quality': 'HQ' }] elif 'videomega.' in frame[0]: links += [{ 'source': 'videomega.tv', 'url': frame[0], 'quality': 'HQ' }] elif embed: url = urlparse.parse_qs(embed[0])['fmt_stream_map'][0] url = [i.split('|')[-1] for i in url.split(',')] for i in url: try: links.append({ 'source': 'gvideo', 'url': i, 'quality': client.googletag(i)[0]['quality'], 'direct': True }) except: pass except: pass for i in links: #sources.append({'source': i['source'], 'quality': i['quality'], 'provider': 'Xmovies', 'url': i['url'], 'direct': i['direct'], 'debridonly': False}) sources.append({ 'source': i['source'], 'quality': i['quality'], 'provider': 'Xmovies', 'url': i['url'] }) return sources except Exception as e: control.log('ERROR XMOVIES %s' % e) return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) query = re.sub('(\\\|/|-|:|;|\*|\?|"|\'|<|>|\|)', ' ', data['title']) query = self.search_link % urllib.quote_plus(query) query = urlparse.urljoin(self.base_link, query) r = client.request(query) r = client.parseDOM(r, 'div', attrs = {'class': 'item'}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs = {'class': 'calidad2'}), client.parseDOM(i, 'span', attrs = {'class': 'tt'}), client.parseDOM(i, 'span', attrs = {'class': 'year'})) for i in r] r = [(i[0][0], i[1][0], i[2][0], i[3][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0 and len(i[3]) > 0] r = [(i[0], i[1], i[2]) for i in r if i[3] == data['year'] and not i[1] == '3D'] r = [(i[0], i[1], re.sub('(\.|\(|\[|\s)(1080p|720p|3D|\d{4})(\.|\)|\]|\s|)(.+|)', '', i[2])) for i in r] r = [(i[0], i[1]) for i in r if cleantitle.get(data['title']) == cleantitle.get(i[2])] l = [(i[0], '1080p') for i in r if i[1] == '1080p'][:1] l += [(i[0], 'HD') for i in r if i[1] == '720p'][:1] quality = l[0][1] hostDict = hostDict url = l[0][0] url = client.replaceHTMLCodes(url) r = client.request(url) try: links = client.parseDOM(r, 'div', attrs = {'class': 'enlaces_box'})[0] links = client.parseDOM(links, 'a', ret='href') except: links = client.parseDOM(r, 'div', attrs = {'class': 'txt-block'})[0] links = links.split('Download Link')[-1] links = client.parseDOM(links, 'strong') links = client.parseDOM(links, 'a', ret='href') for url in links: try: url = client.replaceHTMLCodes(url) url = url.encode('utf-8') host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] if not host in hostDict: raise Exception() try: host = host.split('.')[0] except: pass host = client.replaceHTMLCodes(host) host = host.encode('utf-8') sources.append({'source': host, 'quality': quality, 'provider': 'Hevcfilm', 'url': url}) except: pass return sources except Exception as e: control.log('ERROR hev %s' % e) return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): control.log('###DIZIGOLD - SOUR %s' % url) try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = client.source(url) query = urlparse.urljoin(self.base_link, self.ajax_link) post = re.compile('var\s*view_id\s*=\s*"(\d*)"').findall(result)[0] post = {'id': post, 'tip': 'view', 'dil': 'en'} result = client.request(query, post=post, headers=self.headers) control.log('###DIZIGOLD - SOUR post res %s' % result) result = json.loads(result) result = result['data'] control.log('moja data %s' % result) result = re.compile('iframe.*?src="([^"]+)"').findall(result)[0] control.log('###DIZIGOLD - SOUR post res2 %s' % result) result = client.request(result, headers={'Referer': url}) control.log('###DIZIGOLD - SOUR post res3 %s' % result) links = [{ 'url': i[0], 'quality': i[1] } for i in result if 'google' in i[0]] links += [{ 'url': '%s|User-Agent=%s&Referer=%s' % (i[0], urllib.quote_plus( client.agent()), urllib.quote_plus(url)), 'quality': i[1] } for i in result if not 'google' in i[0]] try: sources.append({ 'source': 'GVideo', 'quality': '1080p', 'provider': 'Dizigoldv2', 'url': [i['url'] for i in links if i['quality'] == '1080p'][0] }) except: pass try: sources.append({ 'source': 'GVideo', 'quality': 'HD', 'provider': 'Dizigoldv2', 'url': [i['url'] for i in links if i['quality'] == '720p'][0] }) except: pass try: sources.append({ 'source': 'GVideo', 'quality': 'SD', 'provider': 'Dizigoldv2', 'url': [i['url'] for i in links if i['quality'] == '480p'][0] }) except: sources.append({ 'source': 'GVideo', 'quality': 'SD', 'provider': 'Dizigoldv2', 'url': [i['url'] for i in links if i['quality'] == '360p'][0] }) return sources except: return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources url1 = urlparse.urljoin(self.base_link, url) r100 = client.request(url1, output='extended') cookie = r100[4] ; headers = r100[3] ; result = r100[0] try: auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0] auth = 'Bearer %s' % urllib.unquote_plus(auth) except: auth = 'Bearer false' headers['Authorization'] = auth headers['X-Requested-With'] = 'XMLHttpRequest' #headers['Content-Type']='application/x-www-form-urlencoded; charset=UTF-8' #headers['Accept'] = 'application/json, text/javascript, */*; q=0.01' headers['Cookie'] = cookie u = '/ajax/nembeds.php' u = urlparse.urljoin(self.base_link, u) #action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb' if '/episode/' in url: url = urlparse.urljoin(self.base_link, '/tv-series'+ url) action = 'getEpisodeEmb' else: action = 'getMovieEmb' url = urlparse.urljoin(self.base_link, '/tv-series' + url) headers['Referer'] = url control.sleep(200) elid = urllib.quote(base64.encodestring(str(int(time.time()))).strip()) token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0] idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0] post = {'action': action, 'idEl': idEl, 'token': token, 'elid': elid} post = urllib.urlencode(post) print post print headers r = client.request(u, post=post, headers=headers, output='') print("####",r) r = str(json.loads(r)) r = client.parseDOM(r, 'iframe', ret='.+?') + client.parseDOM(r, 'IFRAME', ret='.+?') links = [] for i in r: try: links += [{'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'url': i}] except: pass links += [{'source': 'openload', 'quality': 'SD', 'url': i} for i in r if 'openload.co' in i] links += [{'source': 'videomega', 'quality': 'SD', 'url': i} for i in r if 'thevideo.me' in i] for i in links: sources.append({'source': i['source'], 'quality': i['quality'], 'provider': 'MoviesHD', 'url': i['url']}) return sources except Exception as e: control.log('ERROR moviesHD %s' % e) return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: self.sources = [] mylinks = [] if url == None: return [] result = '' headers = {"Referer":urlparse.urljoin(self.base_link, url)} r100 = client.request(urlparse.urljoin(self.base_link, url), output='extended', headers=headers) cookie = r100[4] ; headers = r100[3] ; result = r100[0] links = client.parseDOM(result, 'tr', attrs = {'id': 'pt.+?'}) for i in links: try: lang = re.compile('<img src=[\'|\"|\s|\<]*(.+?)[\'|\"|\s|\>]').findall(i)[1] if not 'English' in lang: raise Exception() host = re.compile('<img src=[\'|\"|\s|\<]*(.+?)[\'|\"|\s|\>]').findall(i)[0] host = host.split('/')[-1] host = host.split('.')[-3] host = host.strip().lower() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') if '>Cam<' in i or '>TS<' in i: quality = 'CAM' #elif '>HD<' in i and host in hostDict: quality = 'HD' else: quality = 'SD' #if quality == 'HD' and not host in hosthdDict: raise Exception() #if quality == 'SD' and not host in hostDict: raise Exception() if '>3D<' in i: info = '3D' else: info = '' url = re.compile('href=[\'|\"|\s|\<]*(.+?)[\'|\"|\s|\>]').findall(i)[0] url = client.replaceHTMLCodes(url) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass if url.startswith('http'): url = urlparse.urlparse(url).path if not url.startswith('http'): url = urlparse.urljoin(self.base_link, url) url = url.encode('utf-8') #control.log('######## IWATCH LINK url:%s host:%s q:%s' % (url,host,quality)) self.sources.append( {'source': host, 'quality': quality, 'provider': 'Iwatchonline', 'url': url}) except: pass #print("M",mylinks) #for i in mylinks: # control.log(">>>>>>>>>>>>>>> ONE IWACH LINKS %s" % (i)) #threads = [] #for i in mylinks: threads.append(workers.Thread(self.check, i, headers, cookie,hostDict,hosthdDict)) #[i.start() for i in threads] #for i in range(0, 10 * 2): # is_alive = [x.is_alive() for x in threads] # if all(x == False for x in is_alive): break # time.sleep(0.5) return self.sources except Exception as e: control.log("ERR iwatch %s" % e) return []
def get_sources(self, url, hosthdDict, hostDict, locDict): control.log('### WATCHFREE %s' % url) try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = client.request(url, 'link_ite') links = client.parseDOM(result, 'table', attrs={'class': 'link_ite.+?'}) print links for i in links: #control.log('### i %s' % i) try: url = client.parseDOM(i, 'a', ret='href') url = [x for x in url if 'gtfo' in x][-1] print "URL", url try: url = urlparse.parse_qs( urlparse.urlparse(url).query)['u'][0] except: pass try: url = urlparse.parse_qs( urlparse.urlparse(url).query)['q'][0] except: pass url = urlparse.parse_qs( urlparse.urlparse(url).query)['gtfo'][0] url = base64.b64decode(url) url = client.replaceHTMLCodes(url) url = url.encode('utf-8') host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] print "Host", host if not host in hostDict: raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') quality = client.parseDOM(i, 'div', attrs={'class': 'quality'}) if any(x in ['[CAM]', '[TS]'] for x in quality): quality = 'CAM' else: quality = 'SD' quality = quality.encode('utf-8') sources.append({ 'source': host, 'quality': quality, 'provider': 'Watchfree', 'url': url }) except: pass return sources except Exception as e: control.log('ERROR Watchfree %s' % e) return sources
#PARAMS: {'tmdb': '62715', 'episode': '7', 'name': 'Dragon Ball Super S04E07', 'title': 'A Message From the Future - Goku Black Invades!', # 'tvdb': '295068', 'season': '4', 'tvshowtitle': 'Dragon Ball Super', 'date': '2016-06-26', # 'meta': '{"rating": "7.5", "code": "tt4644488", "tmdb": "62715", "imdb": "tt4644488", "year": "2015", "duration": "1500", # "plot": "T are they? ", "votes": "332", "thumb": "https://walter.trakt.us/images/episodes/002/265/650/screenshots/thumb/4923bc211d.jpg", # "title": "A Message From the Future - Goku Black Invades!", "tvdb": "295068", "mpaa": "TV-14", # "season": "4", "status": "Continuing", "poster": "https://walter.trakt.us/images/shows/000/098/580/posters/medium/32569f3caa.jpg", # "tvshowtitle": "Dragon Ball Super", "studio": "Fuji TV", "genre": "Animation / Action / Adventure / Mystery", # "tvrage": "48862", "banner": "https://walter.trakt.us/images/shows/000/098/580/banners/original/dc596601d3.jpg", # "episode": "7", "name": "Dragon Ball Super S04E07", "premiered": "2016-06-26", # "fanart": "https://walter.trakt.us/images/shows/000/098/580/fanarts/original/fab7afcb95.jpg", # "trailer": "plugin://plugin.video.specto/?action=trailer&name=Dragon+Ball+Super"}', 'imdb': 'tt4644488', # 'year': '2015', 'action': 'sources', 'tvrage': '48862', 'alter': '0'} tvdb='295068' title = 'Dragon Ball Super' imdb='tt4644488' c=my.get_show(imdb,tvdb,title,'2015') control.log('############ get_show res-1 %s' % c) d=my.get_episode(c,imdb,tvdb,title,data,'4','7') control.log('############ get_episode res-1 %s' % d) e=my.get_sources(d,'','','') print ("get_sources",e[0][0]) print(e) f=my.resolve() exit()
def update(self, query=None, info='true'): if not query == None: control.idle() try: items = [] season, episode = [], [] show = [ os.path.join(self.library_folder, i) for i in control.listDir(self.library_folder)[0] ] for s in show: try: season += [ os.path.join(s, i) for i in control.listDir(s)[0] ] except: pass for s in season: try: episode.append([ os.path.join(s, i) for i in control.listDir(s)[1] if i.endswith('.strm') ][-1]) except: pass for file in episode: try: file = control.openFile(file) read = file.read() read = read.encode('utf-8') file.close() if not read.startswith(sys.argv[0]): raise Exception() params = dict(urlparse.parse_qsl(read.replace('?', ''))) try: tvshowtitle = params['tvshowtitle'] except: tvshowtitle = None try: tvshowtitle = params['show'] except: pass if tvshowtitle == None or tvshowtitle == '': raise Exception() year, imdb, tvdb = params['year'], params['imdb'], params[ 'tvdb'] imdb = 'tt' + re.sub('[^0-9]', '', str(imdb)) try: tmdb = params['tmdb'] except: tmdb = '0' try: tvrage = params['tvrage'] except: tvrage = '0' items.append({ 'tvshowtitle': tvshowtitle, 'year': year, 'imdb': imdb, 'tmdb': tmdb, 'tvdb': tvdb, 'tvrage': tvrage }) except: pass items = [i for x, i in enumerate(items) if i not in items[x + 1:]] if len(items) == 0: raise Exception() except: return try: lib = control.jsonrpc( '{"jsonrpc": "2.0", "method": "VideoLibrary.GetTVShows", "params": {"properties" : ["imdbnumber", "title", "year"]}, "id": 1}' ) lib = unicode(lib, 'utf-8', errors='ignore') lib = json.loads(lib)['result']['tvshows'] except: return if info == 'true' and not control.condVisibility( 'Window.IsVisible(infodialog)') and not control.condVisibility( 'Player.HasVideo'): control.infoDialog(control.lang(30422).encode('utf-8'), time=10000000) self.infoDialog = True try: control.makeFile(control.dataPath) dbcon = database.connect(control.libcacheFile) dbcur = dbcon.cursor() dbcur.execute("CREATE TABLE IF NOT EXISTS tvshows (" "id TEXT, " "items TEXT, " "UNIQUE(id)" ");") except: return try: from resources.lib.indexers import episodes except: return for item in items: it = None if xbmc.abortRequested == True: return sys.exit() try: dbcur.execute("SELECT * FROM tvshows WHERE id = '%s'" % item['tvdb']) fetch = dbcur.fetchone() it = eval(fetch[1].encode('utf-8')) except: pass try: if not it == None: raise Exception() it = episodes.episodes().get(item['tvshowtitle'], item['year'], item['imdb'], item['tmdb'], item['tvdb'], item['tvrage'], idx=False) status = it[0]['status'].lower() it = [{ 'name': i['name'], 'title': i['title'], 'year': i['year'], 'imdb': i['imdb'], 'tmdb': i['tmdb'], 'tvdb': i['tvdb'], 'tvrage': i['tvrage'], 'season': i['season'], 'episode': i['episode'], 'tvshowtitle': i['tvshowtitle'], 'alter': i['alter'], 'date': i['premiered'] } for i in it] if status == 'continuing': raise Exception() dbcur.execute("INSERT INTO tvshows Values (?, ?)", (item['tvdb'], repr(it))) dbcon.commit() except: pass try: id = [item['imdb'], item['tvdb']] if not item['tmdb'] == '0': id += [item['tmdb']] ep = [ x['title'].encode('utf-8') for x in lib if str(x['imdbnumber']) in id or ( x['title'].encode('utf-8') == item['tvshowtitle'] and str(x['year']) == item['year']) ][0] ep = control.jsonrpc( '{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodes", "params": {"filter":{"and": [{"field": "tvshow", "operator": "is", "value": "%s"}]}, "properties": ["season", "episode"]}, "id": 1}' % ep) ep = unicode(ep, 'utf-8', errors='ignore') ep = json.loads(ep)['result']['episodes'][-1] num = [ x for x, y in enumerate(it) if str(y['season']) == str(ep['season']) and str(y['episode']) == str(ep['episode']) ][-1] it = [y for x, y in enumerate(it) if x > num] if len(it) == 0: continue except: continue for i in it: try: if xbmc.abortRequested == True: return sys.exit() if int(self.date) <= int( re.sub('[^0-9]', '', str(i['date']))): from resources.lib.sources import sources src = sources().checkSources( i['name'], i['title'], i['year'], i['imdb'], i['tmdb'], i['tvdb'], i['tvrage'], i['season'], i['episode'], i['tvshowtitle'], i['alter'], i['date']) control.log('### SOURCES SRC 10 %s | %s' % (src, i['name'])) if src == False: raise Exception() libtvshows().strmFile(i) except: pass if self.infoDialog == True: control.infoDialog(control.lang(30423).encode('utf-8'), time=1) if self.library_setting == 'true' and not control.condVisibility( 'Library.IsScanningVideo'): control.execute('UpdateLibrary(video)')
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources u = urlparse.urljoin(self.base_link, url) r = u.replace('/watching.html', '') + '/watching.html' for i in range(5): post = client.request(u) if not post == None: break post = re.findall('movie=(\d+)', post)[0] post = urllib.urlencode({ 'id': post, 'episode_id': '0', 'link_id': '0', 'from': 'v3' }) headers = { 'Accept-Formating': 'application/json, text/javascript', 'X-Requested-With': 'XMLHttpRequest', 'Server': 'cloudflare-nginx', 'Referer': r } url = urlparse.urljoin(self.base_link, '/ajax/movie/load_episodes') for i in range(5): r = client.request(url, post=post, headers=headers) if not r == None: break r = re.findall("load_player\(\s*'([^']+)'\s*,\s*'?(\d+)\s*'?", r) r = list(set(r)) r = [i for i in r if i[1] == '0' or int(i[1]) >= 720] links = [] for p in r: try: play = urlparse.urljoin(self.base_link, '/ajax/movie/load_player_v2') post = urllib.urlencode({'id': p[0], 'quality': p[1]}) for i in range(5): url = client.request(play, post=post, headers=headers) if not url == None: break url = json.loads(url)['link'] url = client.request(url, headers=headers, output='geturl') if 'openload.' in url: links += [{ 'source': 'openload', 'url': url, 'quality': 'HD' }] elif 'videomega.' in url: links += [{ 'source': 'videomega', 'url': url, 'quality': 'HD' }] else: try: links.append({ 'source': 'gvideo', 'url': url, 'quality': client.googletag(url)[0]['quality'] }) except: pass except: pass for i in links: sources.append({ 'source': i['source'], 'quality': i['quality'], 'provider': 'Xmovies', 'url': i['url'] }) return sources except Exception as e: control.log('ERROR XMOVIES %s' % e) return sources
def request(url, close=True, redirect=True, error=False, proxy=None, post=None, headers=None, mobile=False, limit=None, referer=None, cookie=None, output='', timeout='30'): try: #control.log('@@@@@@@@@@@@@@ - URL:%s' % url) handlers = [] if not proxy == None: handlers += [ urllib2.ProxyHandler({'http': '%s' % (proxy)}), urllib2.HTTPHandler ] opener = urllib2.build_opener(*handlers) opener = urllib2.install_opener(opener) if output == 'cookie' or output == 'extended' or not close == True: cookies = cookielib.LWPCookieJar() handlers += [ urllib2.HTTPHandler(), urllib2.HTTPSHandler(), urllib2.HTTPCookieProcessor(cookies) ] opener = urllib2.build_opener(*handlers) opener = urllib2.install_opener(opener) try: if sys.version_info < (2, 7, 9): raise Exception() import ssl ssl_context = ssl.create_default_context() ssl_context.check_hostname = False ssl_context.verify_mode = ssl.CERT_NONE handlers += [urllib2.HTTPSHandler(context=ssl_context)] opener = urllib2.build_opener(*handlers) opener = urllib2.install_opener(opener) except: pass try: headers.update(headers) except: headers = {} if 'User-Agent' in headers: pass elif not mobile == True: #headers['User-Agent'] = agent() headers['User-Agent'] = cache.get(randomagent, 1) else: headers['User-Agent'] = 'Apple-iPhone/701.341' if 'Referer' in headers: pass elif referer == None: headers['Referer'] = '%s://%s/' % (urlparse.urlparse(url).scheme, urlparse.urlparse(url).netloc) else: headers['Referer'] = referer if not 'Accept-Language' in headers: headers['Accept-Language'] = 'pl-PL' if 'Cookie' in headers: pass elif not cookie == None: headers['Cookie'] = cookie if redirect == False: class NoRedirection(urllib2.HTTPErrorProcessor): def http_response(self, request, response): return response opener = urllib2.build_opener(NoRedirection) opener = urllib2.install_opener(opener) try: del headers['Referer'] except: pass request = urllib2.Request(url, data=post, headers=headers) try: response = urllib2.urlopen(request, timeout=int(timeout)) except urllib2.HTTPError as response: if response.code == 503: if 'cf-browser-verification' in response.read(5242880): netloc = '%s://%s' % (urlparse.urlparse(url).scheme, urlparse.urlparse(url).netloc) cf = cache.get(cfcookie, 168, netloc, headers['User-Agent'], timeout) headers['Cookie'] = cf request = urllib2.Request(url, data=post, headers=headers) response = urllib2.urlopen(request, timeout=int(timeout)) elif error == False: return elif error == False: print("Response code", response.code, response.msg, url) return if output == 'cookie': try: result = '; '.join( ['%s=%s' % (i.name, i.value) for i in cookies]) except: pass try: result = cf except: pass elif output == 'response': if limit == '0': result = (str(response.code), response.read(224 * 1024)) elif not limit == None: result = (str(response.code), response.read(int(limit) * 1024)) else: result = (str(response.code), response.read(5242880)) elif output == 'chunk': try: content = int(response.headers['Content-Length']) except: content = (2049 * 1024) if content < (2048 * 1024): return result = response.read(16 * 1024) elif output == 'extended': try: cookie = '; '.join( ['%s=%s' % (i.name, i.value) for i in cookies]) except: pass try: cookie = cf except: pass content = response.headers result = response.read(5242880) return (result, headers, content, cookie) elif output == 'geturl': result = response.geturl() elif output == 'headers': content = response.headers return content else: if limit == '0': result = response.read(224 * 1024) elif not limit == None: result = response.read(int(limit) * 1024) else: result = response.read(5242880) if close == True: response.close() return result except Exception as e: control.log('Client ERR %s, url:%s' % (e, url)) return
from resources.lib.libraries import control import xbmcaddon, os, xbmc scriptID = 'plugin.video.specto' ptv = xbmcaddon.Addon(scriptID) datapath = xbmc.translatePath(ptv.getAddonInfo('profile')) BASE_RESOURCE_PATH = os.path.join( ptv.getAddonInfo('path'), "mylib" ) sys.path.append( os.path.join( ptv.getAddonInfo('path'), "mylib" ) ) #import pydevd #pydevd.settrace('localhost', port=34099, stdoutToServer=True, stderrToServer=True) params = dict(urlparse.parse_qsl(sys.argv[2].replace('?',''))) control.log("->---------- PARAMS: %s" % params) try: action = params['action'] except: action = None try: name = params['name'] except: name = None try: title = params['title'] except: title = None
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) url = path = re.sub('/watching.html$', '', url.strip('/')) url = referer = url + '/watching.html' p = client.request(url) p = re.findall( "data\s*:\s*{\s*id:\s*(\d+),\s*episode_id:\s*(\d+),\s*link_id:\s*(\d+)", p)[0] p = urllib.urlencode({ 'id': p[0], 'episode_id': p[1], 'link_id': p[2], '_': int(time.time() * 1000) }) headers = { 'Accept-Formating': 'application/json, text/javascript', 'X-Requested-With': 'XMLHttpRequest', 'Server': 'cloudflare-nginx', 'Referer': referer } r = urlparse.urljoin(self.base_link, '/ajax/movie/load_episodes') r = client.request(r, post=p, headers=headers) r = re.findall("load_player\(\s*'([^']+)'\s*,\s*'?(\d+)\s*'?", r) #r = [i for i in r if int(i[1]) >= 720] for u in r: try: p = urllib.urlencode({ 'id': u[0], 'quality': u[1], '_': int(time.time() * 1000) }) u = urlparse.urljoin(self.base_link, '/ajax/movie/load_player_v2') u = client.request(u, post=p, headers=headers) u = json.loads(u)['playlist'] u = client.request(u, headers=headers) u = json.loads(u)['playlist'][0]['sources'] u = [i['file'] for i in u if 'file' in i] for i in u: try: sources.append({ 'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'provider': 'Xmovies', 'url': i }) except: pass except: pass return sources except Exception as e: control.log('ERROR XMOVIES %s' % e) return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): # for i in links: sources.append({'source': i['source'], 'quality': i['quality'], 'provider': 'MoviesHD', 'url': i['url']}) try: sources = [] if url == None: return sources if not str(url).startswith('http'): data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] imdb = data['imdb']; year = data['year'] if 'tvshowtitle' in data: url = '%s/tv-show/%s/season/%01d/episode/%01d' % ( self.base_link, cleantitle.geturl(title), int(data['season']), int(data['episode'])) else: url = '%s/movie/%s' % (self.base_link, cleantitle.geturl(title)) result = client.request(url, limit='5') if result == None and not 'tvshowtitle' in data: url += '-%s' % year result = client.request(url, limit='5') result = client.parseDOM(result, 'title')[0] if '%TITLE%' in result: raise Exception() r = client.request(url, output='extended') if not imdb in r[0]: raise Exception() else: url = urlparse.urljoin(self.base_link, url) r = client.request(url, output='extended') cookie = r[4]; headers = r[3]; result = r[0] try: auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0] except: auth = 'false' auth = 'Bearer %s' % urllib.unquote_plus(auth) headers['Authorization'] = auth headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8' headers['Accept'] = 'application/json, text/javascript, */*; q=0.01' headers['Cookie'] = cookie headers['Referer'] = url u = '/ajax/tnembeds.php' self.base_link = client.request(self.base_link, output='geturl') u = urlparse.urljoin(self.base_link, u) action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb' elid = urllib.quote(base64.encodestring(str(int(time.time()))).strip()) token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0] idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0] post = {'action': action, 'idEl': idEl, 'token': token, 'elid': elid} post = urllib.urlencode(post) r = client.request(u, post=post, XHR=True) r = str(json.loads(r)) r = re.findall('\'(http.+?)\'', r) + re.findall('\"(http.+?)\"', r) for i in r: try: sources.append( {'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'url': i, 'provider': 'MoviesHD'}) except: pass return sources except Exception as e: control.log('ERROR moviesHD %s' % e) return sources