def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) t = cleantitle.get(data['tvshowtitle']) title = data['tvshowtitle'] season = '%01d' % int(season) ; episode = '%01d' % int(episode) year = re.findall('(\d{4})', date)[0] years = [str(year), str(int(year)+1), str(int(year)-1)] r = cache.get(self.ymovies_info_season, 720, title, season) r = [(i[0], re.findall('(.+?)\s+(?:-|)\s+season\s+(\d+)$', i[1].lower())) for i in r] r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if i[1]] r = [i[0] for i in r if t == cleantitle.get(i[1]) and season == '%01d' % int(i[2])][:2] r = [(i, re.findall('(\d+)', i)[-1]) for i in r] for i in r: try: y, q = cache.get(self.ymovies_info, 9000, i[1]) mychk = False years = [str(year),str(int(year) + 1),str(int(year) - 1)] for x in years: if str(y) == x: mychk = True if mychk == False: raise Exception() return urlparse.urlparse(i[0]).path, (episode) except: pass except Exception as e: control.log('Error yesmovies %s' % e) return
def request(url, post=None, headers=None, mobile=False, safe=False, timeout='30'): try: try: headers.update(headers) except: headers = {} agent = cache.get(cloudflareAgent, 168) if not 'User-Agent' in headers: headers['User-Agent'] = agent u = '%s://%s' % (urlparse.urlparse(url).scheme, urlparse.urlparse(url).netloc) cookie = cache.get(cloudflareCookie, 168, u, post, headers, mobile, safe, timeout) result = client.request(url, cookie=cookie, post=post, headers=headers, mobile=mobile, safe=safe, timeout=timeout, output='response', error=True) if result[0] == '503': agent = cache.get(cloudflareAgent, 0) ; headers['User-Agent'] = agent cookie = cache.get(cloudflareCookie, 0, u, post, headers, mobile, safe, timeout) result = client.request(url, cookie=cookie, post=post, headers=headers, mobile=mobile, safe=safe, timeout=timeout) else: result= result[1] return result except: return
def removeDownload(url): try: def download(): return [] result = cache.get(download, 600000000, table='rel_dl') if result == '': result = [] result = [i for i in result if not i['url'] == url] if result == []: result = '' def download(): return result result = cache.get(download, 0, table='rel_dl') control.refresh() except: control.infoDialog('You need to remove file manually', 'Can not remove from Queue')
def get_movie(self, imdb, title, year): try: t = cleantitle.get(title) q = self.search_link_2 % (urllib.quote_plus(cleantitle.query(title))) q = q.replace('+','-') q = urlparse.urljoin(self.base_link, q) r = self.request(q)[0] r = client.parseDOM(r, 'div', attrs = {'class': 'ml-item'}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title'), client.parseDOM(i, 'a', ret='data-url')) for i in r] r = [(i[0][0], i[1][0], i[2][0]) for i in r if i[0] and i[1]] #else: # r = zip(client.parseDOM(r, 'a', ret='href', attrs = {'class': 'ss-title'}), client.parseDOM(r, 'a', attrs = {'class': 'ss-title'})) r = [(i[0],i[2]) for i in r if cleantitle.get(t) == cleantitle.get(i[1])][:2] r = [(i[0], re.findall('(\d+)', i[1])[-1]) for i in r] for i in r: try: y, q = cache.get(self.muchmovies_info, 9000, i[1]) if not y == year: raise Exception() return urlparse.urlparse(i[0]).path except: pass except: return
def get_show(self, imdb, tvdb, tvshowtitle, year): try: tk = cache.get(self.putlocker_token, 8) st = self.putlocker_set() ; rt = self.putlocker_rt(tk + st) tm = int(time.time() * 1000) headers = {'X-Requested-With': 'XMLHttpRequest'} url = urlparse.urljoin(self.base_link, self.search_link) post = {'q': tvshowtitle.lower(), 'limit': '100', 'timestamp': tm, 'verifiedCheck': tk, 'sl': st, 'rt': rt} post = urllib.urlencode(post) r = client.request(url, post=post, headers=headers) print(">>>",r) r = json.loads(r) t = cleantitle.get(tvshowtitle) r = [i for i in r if 'year' in i and 'meta' in i] r = [(i['permalink'], i['title'], str(i['year']), i['meta'].lower()) for i in r] r = [i for i in r if 'tv' in i[3]] r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0] url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, tvshowtitle, year): try: list = cache.get(self.sezonlukdizi_tvcache, 120) url = [i[0] for i in list if cleantitle.query(tvshowtitle) == i[1]] if not url: t = cache.get(self.getImdbTitle, 900, imdb) url = [i[0] for i in list if cleantitle.query(t) == i[1]] url = urlparse.urljoin(self.base_link, url[0]) url = urlparse.urlparse(url).path url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, tvshowtitle, year): try: result = cache.get(self.moviefarsi_shows, 168, table='chronic') if result == None: return tvshowtitle = cleantitle.tv(tvshowtitle) years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)] result = [i[0] for i in result if tvshowtitle == cleantitle.tv(i[1])][0] url = urlparse.urljoin(self.base_link, result) result = client.source(url, cookie=self.cookie_link) if result == None: result = cloudflare.source(url) result = client.parseDOM(result, 'article', attrs = {'id': 'post-\d*'})[0] y = client.parseDOM(result, 'strong')[0] y = re.compile('(\d{4})').findall(y)[0] if not y in years: return result = client.parseDOM(result, 'a', ret='href')[0] url = re.compile('//.+?/(\d*)').findall(result)[0] url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: t = cleantitle.get(title) q = '/search/%s.html' % (urllib.quote_plus(cleantitle.query(title))) q = urlparse.urljoin(self.base_link, q) for i in range(3): r = client.request(q) if not r == None: break r = client.parseDOM(r, 'div', attrs = {'class': 'ml-item'}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r] r = [(i[0][0], i[1][0]) for i in r if i[0] and i[1]] r = [i[0] for i in r if t == cleantitle.get(i[1])][:2] r = [(i, re.findall('(\d+)', i)[-1]) for i in r] for i in r: try: y, q = cache.get(self.ymovies_info, 9000, i[1]) if not y == year: raise Exception() return urlparse.urlparse(i[0]).path except: pass except Exception as e: control.log('Error yesmovies %s' % e) return
def get_episode(self, url, imdb, tvdb, title, premiered, season, episode): return None try: data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) tvshowtitle = cleantitle.get(data['tvshowtitle']) year = re.findall('(\d{4})', premiered)[0] season = '%01d' % int(season) episode = '%01d' % int(episode) result = cache.get(self.pubfilm_tvcache, 120) result = [i for i in result if tvshowtitle == i[1]] result = [i[0] for i in result if season == '%01d' % int(i[2])] result = [(i, re.findall('(\d{4})', [x for x in i.split('/') if not x == ''][-1])[0]) for i in result] result = [i[0] for i in result if i[1] == year][0] url = urlparse.urljoin(self.base_link, result) url = urlparse.urlparse(url).path url += '?episode=%01d' % int(episode) url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: t = cleantitle.get(title) headers = {'X-Requested-With': 'XMLHttpRequest'} query = urllib.urlencode({'keyword': title}) url = urlparse.urljoin(self.base_link, self.search_link) r = client.request(url, post=query, headers=headers) #print("1",r) r = json.loads(r)['content'] #print ("2",r) r = zip(client.parseDOM(r, 'a', ret='href', attrs = {'class': 'ss-title'}), client.parseDOM(r, 'a', attrs = {'class': 'ss-title'})) r = [i[0] for i in r if cleantitle.get(t) == cleantitle.get(i[1])][:2] r = [(i, re.findall('(\d+)', i)[-1]) for i in r] #print ("3",r) for i in r: try: y, q = cache.get(self.muchmovies_info, 9000, i[1]) #print("4",y,q) if not y == year: raise Exception() return urlparse.urlparse(i[0]).path except: pass except: return
def get_episode(self, url, imdb, tvdb, title, date, season, episode): return try: if url == None: return url = urlparse.parse_qs(url) print url url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url]) print url result = cache.get(self.tvshow_cache, 120) tvshowtitle = cleantitle.get(url['tvshowtitle']) for i in result: if cleantitle.get(tvshowtitle) in cleantitle.get(i[1]): print("MAM", i) result = [i[0] for i in result if cleantitle.get(tvshowtitle) in cleantitle.get(i[1])] url = [i for i in url.split('/') if not i == ''] url['title'], url['season'], url['episode'] = title, season, episode url = urllib.urlencode(url) print("URL",url) #view-source:http://alltube.tv/marco-polo/odcinek-4/odcinek-4-sezon-2/62284 url = '/%s/odcinek-%s/odcinek-%s-sezon-%s/%s' % (url[1],int(episode),int(episode),int(season), url[2]) print("URL", url) url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get(self, url, idx=True, provider=None,network=None): try: self.list = cache.get(self.get_shows, 168, url, provider, network) if idx == True: self.tvshowDirectory(self.list) return self.list except: pass
def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: if url == None: return url = urlparse.parse_qs(url) print url url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url]) print url result = cache.get(self.tvshow_cache, 120) tvshowtitle = cleantitle.get(url['tvshowtitle']) for i in result: if cleantitle.get(tvshowtitle) in cleantitle.get(i[1]): print("MAM", i) result = [i[0] for i in result if cleantitle.get(tvshowtitle) in cleantitle.get(i[1])][0] txts = 's%02de%02d' % (int(season),int(episode)) print result,title,txts result = client.source(result) result = client.parseDOM(result, 'li', attrs = {'class': 'episode'}) result = [i for i in result if txts in i][0] url = client.parseDOM(result, 'a', ret='href')[0] url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: tk = cache.get(self.putlocker_token, 8) set = self.putlocker_set() rt = self.putlocker_rt(tk + set) sl = self.putlocker_sl() tm = int(time.time() * 1000) headers = {'X-Requested-With': 'XMLHttpRequest'} url = self.search_link post = {'q': title.lower(), 'limit': '20', 'timestamp': tm, 'verifiedCheck': tk, 'set': set, 'rt': rt, 'sl': sl} print("POST",post) post = urllib.urlencode(post) r = client.request(url, post=post, headers=headers, output='') print("R",r) r = json.loads(r) t = cleantitle.get(title) r = [i for i in r if 'year' in i and 'meta' in i] r = [(i['permalink'], i['title'], str(i['year']), i['meta'].lower()) for i in r] r = [i for i in r if 'movie' in i[3]] r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0] url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') print("U",url) return url except: return
def request(url, post=None, mobile=False, timeout='30'): try: u = '%s://%s' % (urlparse.urlparse(url).scheme, urlparse.urlparse(url).netloc) cookie = cache.get(cloudflare, 168, u, post, mobile, timeout) result = client.request(url, cookie=cookie, post=post, mobile=mobile, timeout=timeout, output='response', error=True) if 'HTTP Error 503' in result[0]: cookie = cache.get(cloudflare, 0, u, post, mobile, timeout) result = client.request(url, cookie=cookie, post=post, mobile=mobile, timeout=timeout) else: result= result[1] return result except: return
def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) t = cleantitle.get(data['tvshowtitle']) year = re.findall('(\d{4})', date)[0] years = [str(year), str(int(year)+1), str(int(year)-1)] season = '%01d' % int(season) episode = '%01d' % int(episode) headers = {'X-Requested-With': 'XMLHttpRequest'} query = urllib.urlencode({'keyword': '%s - Season %s' % (data['tvshowtitle'], season)}) url = urlparse.urljoin(self.base_link, self.search_link) r = client.request(url, post=query, headers=headers) r = json.loads(r)['content'] r = zip(client.parseDOM(r, 'a', ret='href', attrs = {'class': 'ss-title'}), client.parseDOM(r, 'a', attrs = {'class': 'ss-title'})) r = [(i[0], re.findall('(.+?) - season (\d+)$', i[1].lower())) for i in r] r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0] r = [i for i in r if t == cleantitle.get(i[1])] r = [i[0] for i in r if season == '%01d' % int(i[2])][:2] r = [(i, re.findall('(\d+)', i)[-1]) for i in r] for i in r: try: y, q = cache.get(self.muchmovies_info, 9000, i[1]) if not y in years: raise Exception() return urlparse.urlparse(i[0]).path + '?episode=%01d' % int(episode) except: pass except: return
def get(self, tvshowtitle, year, imdb, tmdb, tvdb, tvrage, idx=True): if idx == True: self.list = cache.get(self.tvdb_list, 24, tvshowtitle, year, imdb, tmdb, tvdb, tvrage, self.info_lang) self.seasonDirectory(self.list) return self.list else: self.list = self.tvdb_list(tvshowtitle, year, imdb, tmdb, tvdb, tvrage, self.info_lang) return self.list
def get(self, url, idx=True, provider=None,network=None): try: self.list = cache.get(self.get_shows, 168, url, provider, network) if idx == True: self.tvshowDirectory(self.list) return self.list except: client.printException('tvshows.get(url=%s,provider=%s,network=%s)' % (url, provider, network)) pass
def userlists(self): try: userlists = [] if trakt.getTraktCredentials() == False: raise Exception() userlists += cache.get(self.trakt_user_list, 0, self.traktlists_link) except: pass try: self.list = [] if self.imdb_user == '': raise Exception() userlists += cache.get(self.imdb_user_list, 0, self.imdblists_link) except: pass self.list = userlists for i in range(0, len(self.list)): self.list[i].update({'image': 'tvUserlists.jpg', 'action': 'tvshows'}) self.addDirectory(self.list) return self.list
def animestreams(self, url, image, fanart): try: if url == self.newanime_link: self.list = cache.get(self.anime_list_3, 0, url) else: self.list = cache.get(self.anime_list_2, 0, url, image, fanart) if len(self.list) == 1: return self.animeplay(self.list[0]['url']) for i in self.list: i.update({'action': 'phtoons.animeplay'}) for i in self.list: i.update({'fanart': self.anime_fanart}) self.addDirectory(self.list, content='files') return self.list except: pass
def get(self, imdb, tmdb, tvdb, tvrage, season, episode, alter, title, date): try: alt = False if alter == '1': alt = True elif any(x in alter for x in ['Documentary', 'Reality', 'Game Show', 'Talk Show']): alt = True if len(season) > 3: alt = True block = ['73141'] if tvdb in block: alt = True if alt == False: raise Exception() if tvrage == '0': tvrage = cache.get(self.tvrageId, 8640, imdb, tmdb, tvdb) if tvrage == None: raise Exception() result = cache.get(self.tvrageEpisode, 8640, tvrage, title, date, season, episode) if result == None: raise Exception() return (result[0], result[1]) except: return (season, episode)
def getHosts(): try: user, password = getCredentials() url = 'http://real-debrid.com/api/hosters.php' result = cache.get(client.request, 24, url) hosts = json.loads('[%s]' % result) hosts = [i.rsplit('.' ,1)[0].lower() for i in hosts] return hosts except: return []
def get_movie(self, imdb, title, year): try: url = {'imdb': imdb, 'title': title, 'year': year} url = urllib.urlencode(url) if not url: t = cache.get(self.getImdbTitle, 900, imdb) url = [i[0] for i in list if cleantitle.query(t) == i[1]] return url except: return
def getHosts(): try: user, password = getCredentials() url = 'http://api.premiumize.me/pm-api/v1.php?method=hosterlist¶ms[login]=%s¶ms[pass]=%s' % (user, password) result = cache.get(client.request, 24, url) hosts = json.loads(result)['result']['hosterlist'] hosts = [i.rsplit('.' ,1)[0].lower() for i in hosts] return hosts except: return []
def adDict(): try: if '' in debridCredentials()['alldebrid'].values(): raise Exception() url = 'http://alldebrid.com/api.php?action=get_host' result = cache.get(client.request, 24, url) hosts = json.loads('[%s]' % result) hosts = [i.lower() for i in hosts] return hosts except: return []
def rdDict(): try: if '' in debridCredentials()['realdebrid'].values(): raise Exception() url = 'http://api.real-debrid.com/rest/1.0/hosts/domains' result = cache.get(client.request, 24, url) hosts = json.loads(result) hosts = [i.lower() for i in hosts] return hosts except: return []
def get_show(self, imdb, tvdb, tvshowtitle, year): try: self.mytitle = tvshowtitle result = cache.get(self.dayt_tvcache, 120) tvshowtitle = cleantitle.get(tvshowtitle) result = [i[0] for i in result if tvshowtitle == i[1]][0] url = result url = url.encode('utf-8') return url except: return
def genres(self): try: url = self.genres_link url = re.sub('language=(fi|hr|no)', '', url) self.list = cache.get(self.tmdb_genre_list, 24, url) for i in range(0, len(self.list)): self.list[i].update({'image': 'tvGenres.jpg', 'action': 'tvshows'}) self.addDirectory(self.list) return self.list except: return
def pzDict(): try: if '' in debridCredentials()['premiumize'].values(): raise Exception() user, password = debridCredentials()['premiumize']['user'], debridCredentials()['premiumize']['pass'] url = 'http://api.premiumize.me/pm-api/v1.php?method=hosterlist¶ms[login]=%s¶ms[pass]=%s' % (user, password) result = cache.get(client.request, 24, url) hosts = json.loads(result)['result']['hosterlist'] hosts = [i.lower() for i in hosts] return hosts except: return []
def rpDict(): try: if '' in debridCredentials()['rpnet'].values(): raise Exception() url = 'http://premium.rpnet.biz/hoster2.json' result = cache.get(client.request, 24, url) result = json.loads(result) hosts = result['supported'] hosts = [i.lower() for i in hosts] return hosts except: return []
def get_show(self, imdb, tvdb, tvshowtitle, year): try: result = cache.get(self.sezonlukdizi_tvcache, 120) tvshowtitle = cleantitle.get(tvshowtitle) result = [i[0] for i in result if tvshowtitle == i[1]][0] url = urlparse.urljoin(self.base_link, result) url = urlparse.urlparse(url).path url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, tvshowtitle, year): try: result = cache.get(self.dizigold_shows, 72) tvshowtitle = cleantitle.tv(tvshowtitle) result = [i[0] for i in result if tvshowtitle == i[1]][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) t = cleantitle.get(data['tvshowtitle']) print('###', t, data['tvshowtitle']) year = re.findall('(\d{4})', date)[0] years = [str(year), str(int(year) + 1), str(int(year) - 1)] season = '%01d' % int(season) episode = '%01d' % int(episode) hash = hashlib.md5(data['tvshowtitle'] + ' ').hexdigest() query = urllib.urlencode({ 'keyword': '%s ' % (data['tvshowtitle']), 'hash': hash }) print query url = urlparse.urljoin(self.base_link, self.search_link) r = client.request(url, post=query, headers=self.headers) r = json.loads(r)['content'] print('>>>', r) r = zip( client.parseDOM(r, 'a', ret='href', attrs={'class': 'ss-title'}), client.parseDOM(r, 'a', attrs={'class': 'ss-title'})) r = [(i[0], re.findall('(.+?) - season (\d+)$', i[1].lower())) for i in r] r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0] r = [i for i in r if t == cleantitle.get(i[1])] r = [i[0] for i in r if season == '%01d' % int(i[2])][:2] r = [(i, re.findall('(\d+)', i)[-1]) for i in r] print('>>>', r) for i in r: try: y, q = cache.get(self.myesmovies_info, 9000, i[1]) if not y in years: raise Exception() return urlparse.urlparse( i[0]).path + '?episode=%01d' % int(episode) except: pass except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] r = cache.get(client.request, 1, url) try: v = re.findall('document.write\(Base64.decode\("(.+?)"\)', r)[0] b64 = base64.b64decode(v) url = client.parseDOM(b64, 'iframe', ret='src')[0] try: host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] host = client.replaceHTMLCodes(host) host = host.encode('utf-8') sources.append({ 'source': host, 'quality': 'SD', 'language': 'en', 'url': url.replace('\/', '/'), 'direct': False, 'debridonly': False }) except: pass except: pass r = client.parseDOM(r, 'div', {'class': 'server_line'}) r = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'p', attrs={'class': 'server_servername'})[0]) for i in r] if r: for i in r: try: host = re.sub('Server|Link\s*\d+', '', i[1]).lower() url = i[0] host = client.replaceHTMLCodes(host) host = host.encode('utf-8') if 'other' in host: continue sources.append({ 'source': host, 'quality': 'SD', 'language': 'en', 'url': url.replace('\/', '/'), 'direct': False, 'debridonly': False }) except: pass return sources except Exception: return
def anime(self, url): try: if url == self.animesearch_link: k = control.keyboard('', '') ; k.setHeading(control.infoLabel('ListItem.Label')) ; k.doModal() if k.getText() == '' or not k.isConfirmed(): return url = self.animesearch_link % urllib.quote_plus(k.getText()) self.list = cache.get(self.anime_list, 0, url) for i in self.list: i.update({'action': 'phtoons.animestreams'}) for i in self.list: i.update({'fanart': self.anime_fanart}) self.addDirectory(self.list) return self.list except: pass
def movie(self, imdb, title, localtitle, aliases, year): try: clean_title = cleantitle.geturl(title) search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+')) r = cache.get(client.request, 1, search_url) r = client.parseDOM(r, 'div', {'id': 'movie-featured'}) r = [(client.parseDOM(i, 'a', ret='href'), re.findall('.+?elease:\s*(\d{4})</', i), re.findall('<b><i>(.+?)</i>', i)) for i in r] r = [(i[0][0], i[1][0], i[2][0]) for i in r if (cleantitle.get(i[2][0]) == cleantitle.get(title) and i[1][0] == year)] url = r[0][0] return url except Exception: return
def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) if 'tvshowtitle' in data: r = self.searchShow(data['tvshowtitle'], season) if r == None: t = cache.get(self.getImdbTitle, 900, imdb) if t != data['tvshowtitle']: r = self.searchShow(t, season) return urllib.urlencode({'url': r, 'episode': episode}) except: return
def get_movie(self, imdb, title, year): try: leter = title[0] result = cache.get(self.filmxy_cache,9000,leter) print "r1",result years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)] result = [i for i in result if cleantitle.movie(title) == cleantitle.movie(i[2])] print "r2",result result = [i[0] for i in result if any(x in i[1] for x in years)][0] print "r3",result url = client.replaceHTMLCodes(result) url = url.encode('utf-8') return url except Exception as e: control.log('Filmxy ERROR %s' % e) return
def get_movie(self, imdb, title, year): try: tk = cache.get(self.movieshd_token, 8) set = self.movieshd_set() rt = self.movieshd_rt(tk + set) sl = self.movieshd_sl() tm = int(time.time() * 1000) headers = {'X-Requested-With': 'XMLHttpRequest'} url = urlparse.urljoin(self.base_link, self.search_link) post = { 'q': title.lower(), 'limit': '100', 'timestamp': tm, 'verifiedCheck': tk, 'set': set, 'rt': rt, 'sl': sl } post = urllib.urlencode(post) r = client.request(url, post=post, headers=headers, output='cookie2') r = json.loads(r) t = cleantitle.get(title) r = [i for i in r if 'year' in i and 'meta' in i] r = [(i['permalink'], i['title'], str(i['year']), i['meta'].lower()) for i in r] r = [i for i in r if 'movie' in i[3]] r = [ i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2] ][0] url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: t = cleantitle.get(title) q = self.search_link_2 % (urllib.quote_plus( cleantitle.query(title))) q = urlparse.urljoin(self.base_link, q) u = urlparse.urljoin(self.base_link, self.search_link) p = urllib.urlencode({'keyword': title}) r = self.request(u, post=p, XHR=True)[0] try: r = json.loads(r)['content'] except: r = None if r == None: r = self.request(q)[0] r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r] r = [(i[0][0], i[1][0]) for i in r if i[0] and i[1]] else: r = zip( client.parseDOM(r, 'a', ret='href', attrs={'class': 'ss-title'}), client.parseDOM(r, 'a', attrs={'class': 'ss-title'})) r = [i[0] for i in r if cleantitle.get(t) == cleantitle.get(i[1])][:2] r = [(i, re.findall('(\d+)', i)[-1]) for i in r] for i in r: try: y, q = cache.get(self.onemovies_info, 9000, i[1]) if not y == year: raise Exception() return urlparse.urlparse(i[0]).path except: pass except: return
def get_movie(self, imdb, title, year, proxy_options=None, key=None): try: if control.setting('Provider-%s' % name) == False: log('INFO', 'get_movie', 'Provider Disabled by User') return None variations = [title, title.replace('&', 'and')] for title in variations: try: t = cleantitle.get(title) q = '/search/%s.html' % (urllib.quote_plus( cleantitle.query(title))) q = urlparse.urljoin(self.base_link, q) for i in range(3): #r = client.request(q, IPv4=True) r = proxies.request(q, IPv4=True, proxy_options=proxy_options, use_web_proxy=self.proxyrequired) if not r == None: break r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r] r = [(i[0][0], i[1][0]) for i in r if i[0] and i[1]] r = [i[0] for i in r if t == cleantitle.get(i[1])][:2] r = [(i, re.findall('(\d+)', i)[-1]) for i in r] for i in r: try: y, q = cache.get(self.ymovies_info, 9000, i[1], proxy_options=proxy_options) if not y == year: raise Exception() return urlparse.urlparse(i[0]).path, '' except: pass except: pass except Exception as e: log('ERROR', 'get_movie', '%s: %s' % (title, e), dolog=self.init) return
def cartoons(self, url): try: if url == self.cartoonsearch_link: k = control.keyboard('', '') ; k.setHeading(control.infoLabel('ListItem.Label')) ; k.doModal() if k.getText() == '' or not k.isConfirmed(): return url = self.cartoonsearch_link % urllib.quote_plus(k.getText().split()[0]) self.list = cache.get(self.cartoon_list, 0, url) for i in self.list: i.update({'nextaction': 'phtoons.cartoons', 'nexticon': self.cartoons_image % (random.randint(1,10)), 'nextfanart': self.cartoons_fanart}) for i in self.list: i.update({'action': 'phtoons.cartoonstreams'}) for i in self.list: i.update({'fanart': self.cartoons_fanart}) self.addDirectory(self.list) return self.list except: pass
def pubfilm_tvcache(self): try: data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) year = re.findall('(\d{4})', premiered)[0] season = '%01d' % int(season) ; episode = '%01d' % int(episode) tvshowtitle = '%s %s: Season %s' % (data['tvshowtitle'], year, season) url = cache.get(self.pidtv_tvcache, 120, tvshowtitle) if url == None: raise Exception() url += '?episode=%01d' % int(episode) url = url.encode('utf-8') return url except: return
def person(self, query=None): try: if query == None: t = control.lang(30231).encode('utf-8') k = control.keyboard('', t) ; k.doModal() self.query = k.getText() if k.isConfirmed() else None else: self.query = query if (self.query == None or self.query == ''): return url = self.persons_link % urllib.quote_plus(self.query) self.list = cache.get(self.tmdb_person_list, 0, url) for i in range(0, len(self.list)): self.list[i].update({'action': 'tvshows'}) self.addDirectory(self.list) return self.list except: return
def search(self, query=None, lang=None): try: if query == None: t = control.lang(30201).encode('utf-8') k = control.keyboard('', t) ; k.doModal() self.query = k.getText() if k.isConfirmed() else None else: self.query = query if (self.query == None or self.query == ''): return url = self.search_link % ('%s', urllib.quote_plus(self.query)) self.list = cache.get(self.tmdb_list, 0, url) self.worker() self.movieDirectory(self.list) return self.list except: return
def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) t = cleantitle.get(data['tvshowtitle']) year = re.findall('(\d{4})', date)[0] years = [str(year), str(int(year) + 1), str(int(year) - 1)] season = '%01d' % int(season) episode = '%01d' % int(episode) q = self.search_link_2 % (urllib.quote_plus( '%s-Season-%s' % (data['tvshowtitle'], season))) q = q.replace('+', '-') q = urlparse.urljoin(self.base_link, q) r = self.request(q)[0] r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title'), client.parseDOM(i, 'a', ret='data-url')) for i in r] r = [(i[0][0], i[1][0].split('- Season'), re.findall('(\d+)', i[2][0])[-1]) for i in r if i[0] and i[1]] r = [(i[0], i[1][0], i[1][1], i[2]) for i in r] r = [i for i in r if t == cleantitle.get(i[1])] r = [(i[0], i[3]) for i in r if season == '%01d' % int(i[2])][:2] for i in r: try: y, q = cache.get(self.muchmovies_info, 9000, i[1]) if not y in years: raise Exception() return urlparse.urlparse( i[0]).path + '?episode=%01d' % int(episode) except: pass except: return
def get_show(self, imdb, tvdb, tvshowtitle, year): try: """ url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year} url = urllib.urlencode(url) return url except: return None """ tk = cache.get(self.putlocker_token, 8) set = self.putlocker_set() rt = self.putlocker_rt(tk + set) sl = self.putlocker_sl() tm = int(time.time() * 1000) headers = {'X-Requested-With': 'XMLHttpRequest'} url = self.search_link post = {'q': tvshowtitle.lower(), 'limit': '100', 'timestamp': tm, 'verifiedCheck': tk, 'set': set, 'rt': rt, 'sl': sl} post = urllib.urlencode(post) r = client.request(url, post=post, headers=headers) print(">>>",r) r = json.loads(r) t = cleantitle.get(tvshowtitle) r = [i for i in r if 'year' in i and 'meta' in i] r = [(i['permalink'], i['title'], str(i['year']), i['meta'].lower()) for i in r] r = [i for i in r if 'tv' in i[3]] r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0] url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') print(">>>",url) return url except: return
def request(self, endpoint, query=None): try: # Encode the queries, if there is any... if (query != None): query = '?' + urllib.urlencode(query) else: query = '' # Make the request request = self.api_url % (endpoint, query) # Send the request and get the response # Get the results from cache if available response = cache.get(client.request, 24, request) # Retrun the result as a dictionary return json.loads(response) except: pass return {}
def getSearch(): addDirectoryItem('Search...', '0', 'searchDirectory', '0', '0', '0', '0', '0', {}) addDirectoryItem('Clear History', '0', 'clearSearch', '0', '0', '0', '0', '0', {}) try: def search(): return result = cache.get(search, 600000000, table='rel_srch') for q in result: try: addDirectoryItem('%s...' % q, q, 'searchDirectory2', '0', '0', '0', '0', '0', {}) except: pass except: pass control.directory(int(sys.argv[1]), cacheToDisc=True)
def getSearch(): addDirectoryItem('%s...' % control.lang(30702).encode('utf-8'), '0', 'searchDirectory', '0', '0', '0', '0', '0', {}) addDirectoryItem( control.lang(30703).encode('utf-8'), '0', 'clearSearch', '0', '0', '0', '0', '0', {}) try: def search(): return result = cache.get(search, 600000000, table='rel_srch') for q in result: try: addDirectoryItem('%s...' % q, q, 'searchDirectory2', '0', '0', '0', '0', '0', {}) except: pass except: pass control.directory(int(sys.argv[1]), cacheToDisc=True)
def get(self): try: name = None title = None year = None imdb = None tmdb = None tvdb = None tvrage = None season = None episode = None tvshowtitle = None alter = None date = None meta = None sourceList = cache.get(sources().getSources, 2, name, title, year, imdb, tmdb, tvdb, tvrage, season, episode, tvshowtitle, alter, date, meta) self.list.extend(sourceList) self.list = sorted(self.list, key=lambda k: k['name']) self.channelDirectory(self.list) except: pass
def get_episode(self, url, imdb, tvdb, title, premiered, season, episode): try: data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) tvshowtitle = cleantitle.get(data['tvshowtitle']) year = re.findall('(\d{4})', premiered)[0] season = '%01d' % int(season) episode = '%01d' % int(episode) result = cache.get(self.pubfilm_tvcache, 120) result = [i for i in result if tvshowtitle == i[1]] result = [i[0] for i in result if season == '%01d' % int(i[2])] result = [(i, re.findall('(\d{4})', [x for x in i.split('/') if not x == ''][-1])[0]) for i in result] result = [i[0] for i in result if i[1] == year][0] url = urlparse.urljoin(self.base_link, result) url = urlparse.urlparse(url).path url += '?episode=%01d' % int(episode) url = url.encode('utf-8') return url except: return
def run(self): def download(): return [] result = cache.get(download, 600000000, table='rel_dl') for item in result: self.name = item['name'] self.image = item['image'] self.url = item['url'] sysname = self.name.translate(None, '\/:*?"<>|').strip('.') url = self.url.split('|')[0] try: headers = dict(urlparse.parse_qsl(self.url.rsplit('|', 1)[1])) except: headers = dict('') ext = os.path.splitext(urlparse.urlparse(url).path)[1][1:].lower() if not ext in ['mp4', 'mkv', 'flv', 'avi', 'mpg']: ext = 'mp4' hdlr = re.compile('.+? ([(]\d{4}[)]|S\d*E\d*)$').findall(self.name) if len(hdlr) == 0: self.content = 'Uncategorised' hdlr = re.compile('.+? (S\d*E\d*)$').findall(self.name) if len(hdlr) > 0: self.content = 'TVShows' hdlr = re.compile('.+? [(](\d{4})[)]$').findall(self.name) if len(hdlr) > 0: self.content = 'Movies' if self.content == 'Movies': dest = os.path.join(downloadPath, 'Movies') control.makeFile(dest) dest = os.path.join(dest, sysname) control.makeFile(dest) elif self.content == 'TVShows': d = re.compile('(.+?) S(\d*)E(\d*)$').findall(sysname)[0] dest = os.path.join(downloadPath, 'TVShows') control.makeFile(dest) dest = os.path.join(dest, d[0]) control.makeFile(dest) dest = os.path.join(dest, 'Season %01d' % int(d[1])) control.makeFile(dest) else: dest = os.path.join(downloadPath, 'Uncategorised') control.makeFile(dest) dest = os.path.join(dest, sysname + '.' + ext) control.infoDialog(self.name + ' Is Downloading', 'Downloads Started', self.image, time=7000) try: req = urllib2.Request(url, headers=headers) resp = urllib2.urlopen(req, timeout=30) except Exception, e: removeDownload(self.url) print '%s ERROR - File Failed To Open' % (dest) continue try: self.size = int(resp.headers['Content-Length']) except: self.size = 0 if self.size < 1: removeDownload(self.url) print '%s Unknown filesize - Unable to download' % (dest) continue try: resumable = 'bytes' in resp.headers['Accept-Ranges'].lower() except: resumable = False size = 1024 * 1024 if self.size < size: size = self.size gb = '%.2f GB' % (float(self.size) / 1073741824) start = time.clock() total = 0 notify = 0 errors = 0 count = 0 resume = 0 sleep = 0 self.clear() control.window.setProperty(property + '.status', 'downloading') control.window.setProperty(property + '.name', str(self.name)) control.window.setProperty(property + '.image', str(self.image)) control.window.setProperty(property + '.size', str(gb)) f = control.openFile(dest, 'wb') chunk = None chunks = [] while True: downloaded = total for c in chunks: downloaded += len(c) percent = min(100 * downloaded / self.size, 100) self.speed = str( int((downloaded / 1024) / (time.clock() - start))) + ' KB/s' self.percent = str(percent) + '%' control.window.setProperty(property + '.percent', str(self.percent)) control.window.setProperty(property + '.speed', str(self.speed)) if percent >= notify: control.infoDialog('Downloaded %s' % self.percent, self.name, self.image, time=5000) notify += 10 chunk = None error = False try: chunk = resp.read(size) if not chunk: if self.percent < 99: error = True else: while len(chunks) > 0: c = chunks.pop(0) f.write(c) del c f.close() print '%s download complete' % (dest) break except Exception, e: print str(e) error = True sleep = 10 errno = 0 if hasattr(e, 'errno'): errno = e.errno if errno == 10035: # 'A non-blocking socket operation could not be completed immediately' pass if errno == 10054: #'An existing connection was forcibly closed by the remote host' errors = 10 #force resume sleep = 30 if errno == 11001: # 'getaddrinfo failed' errors = 10 #force resume sleep = 30 if chunk: errors = 0 chunks.append(chunk) if len(chunks) > 5: c = chunks.pop(0) f.write(c) total += len(c) del c if error: errors += 1 count += 1 print '%d Error(s) whilst downloading %s' % (count, dest) control.sleep(sleep * 1000) if (resumable and errors > 0) or errors >= 10: if (not resumable and resume >= 50) or resume >= 500: #Give up! print '%s download canceled - too many error whilst downloading' % ( dest) break resume += 1 errors = 0 if resumable: chunks = [] #create new response print 'Download resumed (%d) %s' % (resume, dest) h = headers h['Range'] = 'bytes=%d-' % int(total) try: resp = urllib2.urlopen(urllib2.Request(url, headers=h), timeout=10) except: resp = None else: #use existing response pass if control.window.getProperty(property + '.status') == 'stop': control.infoDialog('Process Complete', 'Downloads', time=5000) return self.clear()
def downloader(): thumb = control.addonThumb() fanart = control.addonFanart() status = control.window.getProperty(property + '.status') if not downloadPath == '': item = control.item('[COLOR FF00b8ff]Downloads[/COLOR]', iconImage=thumb, thumbnailImage=thumb) item.addContextMenuItems([], replaceItems=True) item.setProperty('fanart_image', fanart) control.addItem(handle=int(sys.argv[1]), url=downloadPath, listitem=item, isFolder=True) if status == 'downloading': item = control.item('[COLOR red]Stop Downloads[/COLOR]', iconImage=thumb, thumbnailImage=thumb) item.addContextMenuItems([], replaceItems=True) item.setProperty('fanart_image', fanart) control.addItem(handle=int(sys.argv[1]), url=sys.argv[0] + '?action=stopDownload', listitem=item, isFolder=True) else: item = control.item('[COLOR FF00b8ff]Start Downloads[/COLOR]', iconImage=thumb, thumbnailImage=thumb) item.addContextMenuItems([], replaceItems=True) item.setProperty('fanart_image', fanart) control.addItem(handle=int(sys.argv[1]), url=sys.argv[0] + '?action=startDownload', listitem=item, isFolder=True) if status == 'downloading': item = control.item('[COLOR gold]Download Status[/COLOR]', iconImage=thumb, thumbnailImage=thumb) item.addContextMenuItems([], replaceItems=True) item.setProperty('Fanart_Image', fanart) control.addItem(handle=int(sys.argv[1]), url=sys.argv[0] + '?action=statusDownload', listitem=item, isFolder=True) def download(): return [] result = cache.get(download, 600000000, table='rel_dl') for i in result: try: cm = [] cm.append(('Remove from Queue', 'RunPlugin(%s?action=removeDownload&url=%s)' % (sys.argv[0], urllib.quote_plus(i['url'])))) item = control.item(i['name'], iconImage=i['image'], thumbnailImage=i['image']) item.addContextMenuItems(cm, replaceItems=True) item.setProperty('fanart_image', fanart) item.setProperty('Video', 'true') item.setProperty('IsPlayable', 'true') control.addItem(handle=int(sys.argv[1]), url=i['url'], listitem=item) except: pass control.directory(int(sys.argv[1]), cacheToDisc=True)
def addDownload(name, url, image, provider=None): try: def download(): return [] result = cache.get(download, 600000000, table='rel_dl') result = [i['name'] for i in result] except: pass if name in result: return control.infoDialog('Item Already In Your Queue', name) try: if not provider == None: from resources.lib.sources import sources url = sources().sourcesResolve(url, provider) if url == None: raise Exception() #legacy issue, will be removed later if 'afdah.org' in url and not '</source>' in url: url += '<source>afdah</source>' if '</source>' in url: source = re.compile('<source>(.+?)</source>').findall(url)[0] url = re.compile('(.+?)<source>').findall(url)[0] for i in ['_mv', '_tv', '_mv_tv']: try: call = __import__( 'resources.lib.sources.%s%s' % (source, i), globals(), locals(), ['object'], -1).source() except: pass from resources.lib import sources d = sources.sources() url = call.get_sources(url, d.hosthdfullDict, d.hostsdfullDict, d.hostlocDict) if type(url) == list: url = sorted(url, key=lambda k: k['quality']) url = url[0]['url'] url = call.resolve(url) from resources.lib import resolvers url = resolvers.request(url) if type(url) == list: url = sorted(url, key=lambda k: k['quality']) url = url[0]['url'] if url == None: raise Exception() except: return control.infoDialog('Unplayable stream') pass try: u = url.split('|')[0] try: headers = dict(urlparse.parse_qsl(url.rsplit('|', 1)[1])) except: headers = dict('') ext = os.path.splitext(urlparse.urlparse(u).path)[1][1:].lower() if ext == 'm3u8': raise Exception() #if not ext in ['mp4', 'mkv', 'flv', 'avi', 'mpg']: ext = 'mp4' dest = name + '.' + ext req = urllib2.Request(u, headers=headers) resp = urllib2.urlopen(req, timeout=30) size = int(resp.headers['Content-Length']) size = ' %.2f GB' % (float(size) / 1073741824) no = control.yesnoDialog(dest, 'Complete file is' + size, 'Continue with download?', name + ' - ' + 'Confirm Download', 'Confirm', 'Cancel') if no: return except: return control.infoDialog('Unable to download') pass def download(): return [{'name': name, 'url': url, 'image': image}] result = cache.get(download, 600000000, table='rel_dl') result = [i for i in result if not i['url'] == url] def download(): return result + [{'name': name, 'url': url, 'image': image}] result = cache.get(download, 0, table='rel_dl') control.infoDialog('Item Added to Queue', name)
def cachesyncTVShows(timeout=0): indicators = cache.get(syncTVShows, timeout, control.setting('trakt.user').strip(), table='trakt') return indicators
def get_episode(self, url=None, imdb=None, tvdb=None, title=None, year=None, season=None, episode=None, proxy_options=None, key=None): try: if control.setting('Provider-%s' % name) == False: return None if url != None: data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) else: data = {} data['tvshowtitle'] = title t1 = data['tvshowtitle'] #t2 = cleantitle.get(data['tvshowtitle']) titles = [t1] if '\'s' in t1: t0 = t1.replace('\'s', '') titles.append(t0) if ' ' in t0: t0 = t0.split(' ') t0 = t0[0] titles.append(t0) elif '\'' in t1: t0 = t1.replace('\'', '') titles.append(t0) if ' ' in t0: t0 = t0.split(' ') t0 = t0[0] titles.append(t0) for title in titles: #print title try: season = '%01d' % int(season) episode = '%01d' % int(episode) r = cache.get(self.ymovies_info_season, 720, title, season, proxy_options=proxy_options) if r == None or len(r) == 0: raise Exception() #print r r = [(i[0], re.findall('(.+?)\s+(?:-|)\s+season\s+(\d+)$', i[1].lower())) for i in r] #print r r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if i[1]] #print r r1 = [] try: r1 = [ i[0] for i in r if title == cleantitle.get(i[1]) and int(season) == int(i[2]) ][:2] except: pass if len(r1) == 0: r = [i[0] for i in r if int(season) == int(i[2])][:2] else: r = r1 #print r r = [(i, re.findall('(\d+)', i)[-1]) for i in r] #print r for i in r: try: y, q = cache.get(self.ymovies_info, 9000, i[1], proxy_options=proxy_options) mychk = False years = [ str(year), str(int(year) + 1), str(int(year) - 1) ] for x in years: if str(y) == x: mychk = True if mychk == False: raise Exception() return urlparse.urlparse(i[0]).path, (episode) except: pass # yr variation for shows try: year = int(year) + int(season) for i in r: try: y, q = cache.get(self.ymovies_info, 9000, i[1], proxy_options=proxy_options) mychk = False years = [ str(year), str(int(year) + 1), str(int(year) - 1) ] for x in years: if str(y) == x: mychk = True if mychk == False: raise Exception() return urlparse.urlparse(i[0]).path, (episode) except: pass except: pass # yr variation for shows try: year = int(year) - int(season) for i in r: try: y, q = cache.get(self.ymovies_info, 9000, i[1], proxy_options=proxy_options) mychk = False years = [ str(year), str(int(year) + 1), str(int(year) - 1) ] for x in years: if str(y) == x: mychk = True if mychk == False: raise Exception() return urlparse.urlparse(i[0]).path, (episode) except: pass except: pass # yr ignore for shows for i in r: return urlparse.urlparse(i[0]).path, (episode) except: pass return except Exception as e: log('ERROR', 'get_episode', '%s: %s' % (title, e), dolog=self.init) return
def request(url, close=True, redirect=True, error=False, proxy=None, post=None, headers=None, mobile=False, limit=None, referer=None, cookie=None, output='', timeout='30'): try: #control.log('@@@@@@@@@@@@@@ - URL:%s' % url) handlers = [] if not proxy == None: handlers += [ urllib2.ProxyHandler({'http': '%s' % (proxy)}), urllib2.HTTPHandler ] opener = urllib2.build_opener(*handlers) opener = urllib2.install_opener(opener) if output == 'cookie2' or output == 'cookie' or output == 'extended' or not close == True: cookies = cookielib.LWPCookieJar() handlers += [ urllib2.HTTPHandler(), urllib2.HTTPSHandler(), urllib2.HTTPCookieProcessor(cookies) ] opener = urllib2.build_opener(*handlers) opener = urllib2.install_opener(opener) try: if sys.version_info < (2, 7, 9): raise Exception() import ssl ssl_context = ssl.create_default_context() ssl_context.check_hostname = False ssl_context.verify_mode = ssl.CERT_NONE handlers += [urllib2.HTTPSHandler(context=ssl_context)] opener = urllib2.build_opener(*handlers) opener = urllib2.install_opener(opener) except: pass try: headers.update(headers) except: headers = {} if 'User-Agent' in headers: pass elif not mobile == True: #headers['User-Agent'] = agent() headers['User-Agent'] = cache.get(randomagent, 1) else: headers['User-Agent'] = 'Apple-iPhone/701.341' if 'Referer' in headers: pass elif referer == None: headers['Referer'] = '%s://%s/' % (urlparse.urlparse(url).scheme, urlparse.urlparse(url).netloc) else: headers['Referer'] = referer if not 'Accept-Language' in headers: headers['Accept-Language'] = 'en-US' if 'Cookie' in headers: pass elif not cookie == None: headers['Cookie'] = cookie if redirect == False: class NoRedirection(urllib2.HTTPErrorProcessor): def http_response(self, request, response): return response opener = urllib2.build_opener(NoRedirection) opener = urllib2.install_opener(opener) try: del headers['Referer'] except: pass request = urllib2.Request(url, data=post, headers=headers) try: response = urllib2.urlopen(request, timeout=int(timeout)) except urllib2.HTTPError as response: control.log("AAAA- CODE %s|%s " % (url, response.code)) if response.code == 503: if 'cf-browser-verification' in response.read(5242880): control.log("CF-OK") netloc = '%s://%s' % (urlparse.urlparse(url).scheme, urlparse.urlparse(url).netloc) cf = cache.get(cfcookie, 168, netloc, headers['User-Agent'], timeout) headers['Cookie'] = cf request = urllib2.Request(url, data=post, headers=headers) response = urllib2.urlopen(request, timeout=int(timeout)) elif error == False: return elif response.code == 307: control.log("AAAA- Response read: %s" % response.read(5242880)) control.log("AAAA- Location: %s" % (response.headers['Location'].rstrip())) cookie = '' try: cookie = '; '.join( ['%s=%s' % (i.name, i.value) for i in cookies]) except: pass headers['Cookie'] = cookie request = urllib2.Request(response.headers['Location'], data=post, headers=headers) response = urllib2.urlopen(request, timeout=int(timeout)) #control.log("AAAA- BBBBBBB %s" % response.code) elif error == False: print("Response code", response.code, response.msg, url) return if output == 'cookie': try: result = '; '.join( ['%s=%s' % (i.name, i.value) for i in cookies]) except: pass try: result = cf except: pass elif output == 'response': if limit == '0': result = (str(response.code), response.read(224 * 1024)) elif not limit == None: result = (str(response.code), response.read(int(limit) * 1024)) else: result = (str(response.code), response.read(5242880)) elif output == 'chunk': try: content = int(response.headers['Content-Length']) except: content = (2049 * 1024) if content < (2048 * 1024): return result = response.read(16 * 1024) elif output == 'extended': try: cookie = '; '.join( ['%s=%s' % (i.name, i.value) for i in cookies]) except: pass try: cookie = cf except: pass content = response.headers result = response.read(5242880) return (result, headers, content, cookie) elif output == 'geturl': result = response.geturl() elif output == 'headers': content = response.headers return content else: if limit == '0': result = response.read(224 * 1024) elif not limit == None: result = response.read(int(limit) * 1024) else: result = response.read(5242880) if close == True: response.close() return result except Exception as e: control.log('Client ERR %s, url:' % (e, url)) return
def agent(): return cache.get(randomagent, 24)
def request(url, close=True, redirect=True, error=False, proxy=None, post=None, headers=None, mobile=False, limit=None, referer=None, cookie=None, output='', timeout='30', XHR=False): try: #control.log('@@@@@@@@@@@@@@ - URL:%s POST:%s' % (url, post)) handlers = [] if not proxy == None: handlers += [urllib2.ProxyHandler({'http':'%s' % (proxy)}), urllib2.HTTPHandler] opener = urllib2.build_opener(*handlers) opener = urllib2.install_opener(opener) if output == 'cookie' or output == 'extended' or not close == True: cookies = cookielib.LWPCookieJar() handlers += [urllib2.HTTPHandler(), urllib2.HTTPSHandler(), urllib2.HTTPCookieProcessor(cookies)] opener = urllib2.build_opener(*handlers) opener = urllib2.install_opener(opener) try: if sys.version_info < (2, 7, 9): raise Exception() import ssl; ssl_context = ssl.create_default_context() ssl_context.check_hostname = False ssl_context.verify_mode = ssl.CERT_NONE handlers += [urllib2.HTTPSHandler(context=ssl_context)] opener = urllib2.build_opener(*handlers) opener = urllib2.install_opener(opener) except: pass try: headers.update(headers) except: headers = {} if 'User-Agent' in headers: pass elif not mobile == True: #headers['User-Agent'] = agent() headers['User-Agent'] = cache.get(randomagent, 1) else: headers['User-Agent'] = 'Apple-iPhone/701.341' if 'Referer' in headers: pass elif referer == None: headers['Referer'] = '%s://%s/' % (urlparse.urlparse(url).scheme, urlparse.urlparse(url).netloc) else: headers['Referer'] = referer if not 'Accept-Language' in headers: headers['Accept-Language'] = 'en-US' if 'X-Requested-With' in headers: pass elif XHR == True: headers['X-Requested-With'] = 'XMLHttpRequest' if 'Cookie' in headers: pass elif not cookie == None: headers['Cookie'] = cookie if redirect == False: class NoRedirection(urllib2.HTTPErrorProcessor): def http_response(self, request, response): return response opener = urllib2.build_opener(NoRedirection) opener = urllib2.install_opener(opener) try: del headers['Referer'] except: pass request = urllib2.Request(url, data=post, headers=headers) try: response = urllib2.urlopen(request, timeout=int(timeout)) except urllib2.HTTPError as response: if response.code == 503: cf_result = response.read(5242880) try: encoding = response.info().getheader('Content-Encoding') except: encoding = None if encoding == 'gzip': cf_result = gzip.GzipFile(fileobj=StringIO.StringIO(cf_result)).read() if 'cf-browser-verification' in cf_result: netloc = '%s://%s' % (urlparse.urlparse(url).scheme, urlparse.urlparse(url).netloc) ua = headers['User-Agent'] cf = cache.get(cfcookie().get, 168, netloc, ua, timeout) headers['Cookie'] = cf request = urllib2.Request(url, data=post, headers=headers) response = urllib2.urlopen(request, timeout=int(timeout)) elif error == False: return elif error == False: return if output == 'cookie': try: result = '; '.join(['%s=%s' % (i.name, i.value) for i in cookies]) except: pass try: result = cf except: pass if close == True: response.close() return result elif output == 'geturl': result = response.geturl() if close == True: response.close() return result elif output == 'headers': result = response.headers if close == True: response.close() return result elif output == 'chunk': try: content = int(response.headers['Content-Length']) except: content = (2049 * 1024) if content < (2048 * 1024): return result = response.read(16 * 1024) if close == True: response.close() return result if limit == '0': result = response.read(224 * 1024) elif not limit == None: result = response.read(int(limit) * 1024) else: result = response.read(5242880) try: encoding = response.info().getheader('Content-Encoding') except: encoding = None if encoding == 'gzip': result = gzip.GzipFile(fileobj=StringIO.StringIO(result)).read() if 'sucuri_cloudproxy_js' in result: su = sucuri().get(result) headers['Cookie'] = su request = urllib2.Request(url, data=post, headers=headers) try: response = urllib2.urlopen(request, timeout=int(timeout)) except Exception as e: control.log('Sucuri url: %s Error: %s' % (url,e)) ValueError('Sucuri url: %s Error: %s' % (url,e)) if limit == '0': result = response.read(224 * 1024) elif not limit == None: result = response.read(int(limit) * 1024) else: result = response.read(5242880) try: encoding = response.info().getheader('Content-Encoding') except: encoding = None if encoding == 'gzip': result = gzip.GzipFile(fileobj=StringIO.StringIO(result)).read() if output == 'extended': response_headers = response.headers response_code = str(response.code) try: cookie = '; '.join(['%s=%s' % (i.name, i.value) for i in cookies]) except: pass try: cookie = cf except: pass if close == True: response.close() return (result, response_code, response_headers, headers, cookie) else: if close == True: response.close() return result except: return