def resolve(url): try: if '/vod/' in url: url = re.compile('/(\d+)').findall(url)[-1] url = 'http://www.filmon.com/vod/info/%s' % url elif '/tv/' in url: url = url.replace('/tv/', '/channel/') elif not '/channel/' in url: raise Exception() headers = {'X-Requested-With': 'XMLHttpRequest'} result = client.request(url, headers=headers) result = json.loads(result) try: result = result['streams'] except: result = result['data']['streams'] result = [i[1] for i in result.iteritems()] strm = [(i['url'], int(i['watch-timeout'])) for i in result] strm = [i for i in strm if '.m3u8' in i[0]] strm.sort() strm = strm[-1][0] url = client.request(strm).splitlines() url = [i for i in url if '.m3u8' in i] if len(url) == 0: return strm url = urlparse.urljoin(strm, url[0]) return url except: return
def resolve(url): try: result = client.request(url) post = {} f = client.parseDOM(result, 'Form', attrs = {'action': '' })[0] k = client.parseDOM(f, 'input', ret='name', attrs = {'type': 'hidden'}) for i in k: post.update({i: client.parseDOM(f, 'input', ret='value', attrs = {'name': i})[0]}) post.update({'method_free': ' '}) result = client.request(url, post=post) post = {} f = client.parseDOM(result, 'Form', attrs = {'action': '' })[0] k = client.parseDOM(f, 'input', ret='name', attrs = {'type': 'hidden'}) for i in k: post.update({i: client.parseDOM(f, 'input', ret='value', attrs = {'name': i})[0]}) post.update({'method_free': ' '}) post.update(captcha.request(result)) post = post result = client.request(url, post=post) url = re.compile("var\s+download_url *= *'(.+?)'").findall(result)[0] return url except: return
def resolve(url): try: result = client.request(url) post = {} f = client.parseDOM(result, 'Form', attrs = {'action': ''}) k = client.parseDOM(f, 'input', ret='name', attrs = {'type': 'hidden'}) for i in k: post.update({i: client.parseDOM(f, 'input', ret='value', attrs = {'name': i})[0]}) post.update({'method_free': 'Free Download'}) post = urllib.urlencode(post) result = client.request(url, post=post) post = {} f = client.parseDOM(result, 'Form', attrs = {'action': '' }) k = client.parseDOM(f, 'input', ret='name', attrs = {'type': 'hidden'}) for i in k: post.update({i: client.parseDOM(f, 'input', ret='value', attrs = {'name': i})[0]}) post.update({'method_free': 'Free Download'}) try: post.update(captcha.request(result)) except: pass post = urllib.urlencode(post) result = client.request(url, post=post) url = client.parseDOM(result, 'a', ret='onClick') url = [i for i in url if i.startswith('window.open')][0] url = re.compile('[\'|\"](.+?)[\'|\"]').findall(url)[0] return url except: return
def resolve(url): try: url = url.replace('/embed-', '/') url = re.compile('//.+?/([\w]+)').findall(url)[0] page = 'http://allmyvideos.net/%s' % url control.log('AAA Page %s' % page) result = client.request(page, close=False) post = {} f = client.parseDOM(result, 'form', attrs = {'action': ''}) k = client.parseDOM(f, 'input', ret='name', attrs = {'type': 'hidden'}) for i in k: post.update({i: client.parseDOM(f, 'input', ret='value', attrs = {'name': i})[0]}) post = urllib.urlencode(post) result = client.request(page, post=post) url = re.compile('"file" *: *"(http.+?)"').findall(result) #control.log('AAA Page %s' % url) url = url[-1] url += '&direct=false&ua=false' xbmc.sleep(2000) #return url + '|' + urllib.urlencode({ 'User-Agent': client.IE_USER_AGENT }) return url except: return
def request(url, post=None, headers=None, mobile=False, safe=False, timeout='30'): try: try: headers.update(headers) except: headers = {} agent = cache.get(cloudflareAgent, 168) if not 'User-Agent' in headers: headers['User-Agent'] = agent u = '%s://%s' % (urlparse.urlparse(url).scheme, urlparse.urlparse(url).netloc) cookie = cache.get(cloudflareCookie, 168, u, post, headers, mobile, safe, timeout) result = client.request(url, cookie=cookie, post=post, headers=headers, mobile=mobile, safe=safe, timeout=timeout, output='response', error=True) if result[0] == '503': agent = cache.get(cloudflareAgent, 0) ; headers['User-Agent'] = agent cookie = cache.get(cloudflareCookie, 0, u, post, headers, mobile, safe, timeout) result = client.request(url, cookie=cookie, post=post, headers=headers, mobile=mobile, safe=safe, timeout=timeout) else: result= result[1] return result except: return
def cloudflareCookie(url, post, headers, mobile, safe, timeout): try: result = client.request(url, post=post, headers=headers, mobile=mobile, safe=safe, timeout=timeout, error=True) jschl = re.compile('name="jschl_vc" value="(.+?)"/>').findall(result)[0] init = re.compile('setTimeout\(function\(\){\s*.*?.*:(.*?)};').findall(result)[0] builder = re.compile(r"challenge-form\'\);\s*(.*)a.v").findall(result)[0] decryptVal = parseJSString(init) lines = builder.split(';') for line in lines: if len(line)>0 and '=' in line: sections=line.split('=') line_val = parseJSString(sections[1]) decryptVal = int(eval(str(decryptVal)+sections[0][-1]+str(line_val))) answer = decryptVal + len(urlparse.urlparse(url).netloc) query = '%s/cdn-cgi/l/chk_jschl?jschl_vc=%s&jschl_answer=%s' % (url, jschl, answer) if 'type="hidden" name="pass"' in result: passval = re.compile('name="pass" value="(.*?)"').findall(result)[0] query = '%s/cdn-cgi/l/chk_jschl?pass=%s&jschl_vc=%s&jschl_answer=%s' % (url, urllib.quote_plus(passval), jschl, answer) time.sleep(5) cookie = client.request(query, post=post, headers=headers, mobile=mobile, safe=safe, timeout=timeout, output='cookie', error=True) return cookie except: pass
def resolve(self, url): try: result = client.request(url) result = result.decode('iso-8859-1').encode('utf-8') url = client.parseDOM(result, 'div', attrs = {'class': 'player-embed'})[0] url = client.parseDOM(url, 'iframe', ret='src')[0] if not 'auengine.com' in url: url = client.parseDOM(result, 'div', attrs = {'class': 'generic-video-item'}) url = [i for i in url if 'auengine video' in i.lower()][0] url = client.parseDOM(url, 'a', ret='href')[0] url = urlparse.urljoin(self.base_link, url) result = client.request(url) result = result.decode('iso-8859-1').encode('utf-8') url = client.parseDOM(result, 'div', attrs = {'class': 'player-embed'})[0] url = client.parseDOM(url, 'iframe', ret='src')[0] result = client.request(url) url = re.compile("video_link *= *'(.+?)'").findall(result)[0] url = urllib.unquote_plus(url) return url except: return
def check(self, i): try: result = client.request(i['url']) result = client.parseDOM(result, 'td', attrs = {'class': 'td_cols'})[0] result = result.split('"td_heads"') result = client.parseDOM(result, 'a', ret='href') for url in result: try: if 'go4up.com' in url: url = re.compile('//.+?/.+?/([\w]+)').findall(url)[0] url = client.request(self.go4up_link_2 % url) url = client.parseDOM(url, 'div', attrs = {'id': 'linklist'})[0] url = client.parseDOM(url, 'a', ret='href')[0] host = urlparse.urlparse(url).netloc host = host.rsplit('.', 1)[0].split('.', 1)[-1] host = host.strip().lower() if not host in ['uptobox', 'hugefiles', 'uploadrocket']: raise Exception() if host == 'hugefiles': check = hugefiles.check(url) elif host == 'uploadrocket': check = uploadrocket.check(url) elif host == 'uptobox': check = uptobox.check(url) if check == False: raise Exception() self.sources.append({'source': host, 'quality': 'HD', 'provider': 'TVrelease', 'url': url, 'info': i['info']}) except: pass except: pass
def resolve(url): try: headers = '|%s' % urllib.urlencode({'User-Agent': client.agent(), 'Referer': url}) id = re.compile('//.+?/.+?/([\w]+)').findall(url) id += re.compile('//.+?/.+?v=([\w]+)').findall(url) id = id[0] url = 'http://embed.nowvideo.sx/embed.php?v=%s' % id result = client.request(url) key = re.compile('flashvars.filekey=(.+?);').findall(result)[-1] try: key = re.compile('\s+%s="(.+?)"' % key).findall(result)[-1] except: pass url = 'http://www.nowvideo.sx/api/player.api.php?key=%s&file=%s' % (key, id) result = client.request(url) url = re.compile('url=(.+?)&').findall(result)[0] url += headers return url except: return
def resolve(url): try: try: quality = urlparse.parse_qs(urlparse.urlparse(url).query)['quality'][0] except: quality = '1080P' url = url.rsplit('?', 1)[0] result = client.request(url, close=False) url = client.parseDOM(result, 'div', attrs = {'class': 'player'})[0] url = client.parseDOM(url, 'iframe', ret='src')[0] result = client.request(url) url = client.parseDOM(result, 'iframe', ret='src') if len(url) > 0: return resolvers.request(url[0], debrid) count = len(re.findall('window\.atob', result)) result = re.compile("window\.atob\('([^']+)").findall(result)[0] for i in xrange(count): result = base64.decodestring(result) result = re.compile('(\d*p)="([^"]+)"').findall(result) url = [i for i in result if i[0].upper() == quality] if len(url) > 0: url = url[0][1] else: url = result[0][1] return url except: return
def resolve(url): try: url = url.replace('/embed-', '/') url = re.compile('//.+?/([\w]+)').findall(url)[0] url = 'http://grifthost.com/embed-%s.html' % url result = client.request(url) try: post = {} f = client.parseDOM(result, 'Form', attrs = {'method': 'POST'})[0] f = f.replace('"submit"', '"hidden"') k = client.parseDOM(f, 'input', ret='name', attrs = {'type': 'hidden'}) for i in k: post.update({i: client.parseDOM(f, 'input', ret='value', attrs = {'name': i})[0]}) post = post result = client.request(url, post=post) except: pass result = re.compile('(eval.*?\)\)\))').findall(result)[-1] result = jsunpack.unpack(result) url = client.parseDOM(result, 'embed', ret='src') url += re.compile("file *: *[\'|\"](.+?)[\'|\"]").findall(result) url = [i for i in url if not i.endswith('.srt')] url = 'http://' + url[0].split('://', 1)[-1] return url except: return
def resolve(url): try: referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0] page = urlparse.parse_qs(urlparse.urlparse(url).query)['id'][0] page = 'http://p2pcast.tv/stream.php?id=%s&live=0&p2p=0&stretching=uniform' % page result = client.request(page, referer=referer) try: swf = re.compile('src\s*=[\'|\"](.+?player.+?\.js)[\'|\"]').findall(result)[0] swf = client.request(swf) swf = re.compile('flashplayer\s*:\s*[\'|\"](.+?)[\'|\"]').findall(swf)[0] except: swf = 'http://cdn.p2pcast.tv/jwplayer.flash.swf' url = re.compile('url\s*=\s*[\'|\"](.+?)[\'|\"]').findall(result)[0] url = base64.b64decode(url) url = '%s|User-Agent=%s&Referer=%s' % (url, urllib.quote_plus(client.agent()), urllib.quote_plus(swf)) return url except: return
def resolve(url): try: url = url.replace("/embed-", "/") url = re.compile("//.+?/([\w]+)").findall(url)[0] page = "http://allmyvideos.net/%s" % url control.log("AAA Page %s" % page) result = client.request(page, close=False) post = {} f = client.parseDOM(result, "form", attrs={"action": ""}) k = client.parseDOM(f, "input", ret="name", attrs={"type": "hidden"}) for i in k: post.update({i: client.parseDOM(f, "input", ret="value", attrs={"name": i})[0]}) post = urllib.urlencode(post) result = client.request(page, post=post) url = re.compile('"file" *: *"(http.+?)"').findall(result) # control.log('AAA Page %s' % url) url = url[-1] url += "&direct=false&ua=false" xbmc.sleep(2000) # return url + '|' + urllib.urlencode({ 'User-Agent': client.IE_USER_AGENT }) return url except: return
def resolve(url): try: result = client.request(url, mobile=True, close=False) try: post = {} f = client.parseDOM(result, 'Form', attrs = {'method': 'POST'})[0] f = f.replace('"submit"', '"hidden"') k = client.parseDOM(f, 'input', ret='name', attrs = {'type': 'hidden'}) for i in k: post.update({i: client.parseDOM(f, 'input', ret='value', attrs = {'name': i})[0]}) except: post=None for i in range(0, 10): try: result = client.request(url, post=post, mobile=True, close=False) result = result.replace('\n','') result = re.compile('(eval.*?\)\)\))').findall(result)[-1] result = jsunpack.unpack(result) result = re.compile('sources *: *\[.+?\]').findall(result)[-1] result = re.compile('file *: *"(http.+?)"').findall(result) url = [i for i in result if not '.m3u8' in i] if len(url) > 0: return '%s|Referer=%s' % (url[0], urllib.quote_plus('http://vidzi.tv/nplayer/jwplayer.flash.swf')) url = [i for i in result if '.m3u8' in i] if len(url) > 0: return url[0] except: time.sleep(1) except: return
def tvrageEpisode(self, tvrage, title, date, season, episode): monthMap = {'01':'Jan', '02':'Feb', '03':'Mar', '04':'Apr', '05':'May', '06':'Jun', '07':'Jul', '08':'Aug', '09':'Sep', '10':'Oct', '11':'Nov', '12':'Dec'} title = cleantitle.tv(title) try: url = self.tvrage_link % tvrage result = client.request(url, timeout='5') search = re.compile('<td.+?><a.+?title=.+?season.+?episode.+?>(\d+?)x(\d+?)<.+?<td.+?>(\d+?/.+?/\d+?)<.+?<td.+?>.+?href=.+?>(.+?)<').findall(result.replace('\n','')) d = '%02d/%s/%s' % (int(date.split('-')[2]), monthMap[date.split('-')[1]], date.split('-')[0]) match = [i for i in search if d == i[2]] if len(match) == 1: return (str('%01d' % int(match[0][0])), str('%01d' % int(match[0][1]))) match = [i for i in search if title == cleantitle.tv(i[3])] if len(match) == 1: return (str('%01d' % int(match[0][0])), str('%01d' % int(match[0][1]))) except: pass try: url = self.epguides_link % tvrage result = client.request(url, timeout='5') search = re.compile('\d+?,(\d+?),(\d+?),.+?,(\d+?/.+?/\d+?),"(.+?)",.+?,".+?"').findall(result) d = '%02d/%s/%s' % (int(date.split('-')[2]), monthMap[date.split('-')[1]], date.split('-')[0][-2:]) match = [i for i in search if d == i[2]] if len(match) == 1: return (str('%01d' % int(match[0][0])), str('%01d' % int(match[0][1]))) match = [i for i in search if title == cleantitle.tv(i[3])] if len(match) == 1: return (str('%01d' % int(match[0][0])), str('%01d' % int(match[0][1]))) except: pass
def resolve(url): try: page = re.compile('//.+?/(?:embed|v)/([0-9a-zA-Z-_]+)').findall(url)[0] page = 'http://sawlive.tv/embed/%s' % page try: referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0] except: referer = page result = client.request(page, referer=referer) unpacked = '' packed = result.split('\n') for i in packed: try: unpacked += jsunpack.unpack(i) except: pass result += unpacked result = urllib.unquote_plus(result) result = re.compile('<iframe(.+?)</iframe>').findall(result)[-1] url = re.compile('src\s*=\s*[\'|\"](.+?)[\'|\"].+?[\'|\"](.+?)[\'|\"]').findall(result)[0] url = '/'.join(url) result = client.request(url, referer=referer) strm = re.compile("'streamer'.+?'(.+?)'").findall(result)[0] file = re.compile("'file'.+?'(.+?)'").findall(result)[0] swf = re.compile("SWFObject\('(.+?)'").findall(result)[0] url = '%s playpath=%s swfUrl=%s pageUrl=%s live=1 timeout=30' % (strm, file, swf, url) return url except: return
def sky_list(self, num, channel, id): try: url = self.sky_now_link % id result = client.request(url, timeout='10') result = json.loads(result) match = result['listings'][id][0]['url'] dt1 = (self.uk_datetime).strftime('%Y-%m-%d') dt2 = int((self.uk_datetime).strftime('%H')) if (dt2 < 6): dt2 = 0 elif (dt2 >= 6 and dt2 < 12): dt2 = 1 elif (dt2 >= 12 and dt2 < 18): dt2 = 2 elif (dt2 >= 18): dt2 = 3 url = self.sky_programme_link % (id, str(dt1), str(dt2)) result = client.request(url, timeout='10') result = json.loads(result) result = result['listings'][id] result = [i for i in result if i['url'] == match][0] year = result['d'] year = re.findall('[(](\d{4})[)]', year)[0].strip() year = year.encode('utf-8') title = result['t'] title = title.replace('(%s)' % year, '').strip() title = client.replaceHTMLCodes(title) title = title.encode('utf-8') self.items.append((title, year, channel, num)) except: pass
def resolve(url): try: headers = '|%s' % urllib.urlencode({'User-Agent': client.agent(), 'Referer': url}) u = url result = client.request(url) post = {} f = client.parseDOM(result, 'Form', attrs = {'action': '' }) f += client.parseDOM(result, 'form', attrs = {'action': '' }) k = client.parseDOM(f, 'input', ret='name', attrs = {'type': 'hidden'}) for i in k: post.update({i: client.parseDOM(f, 'input', ret='value', attrs = {'name': i})[0]}) post.update({'method_free': 'Free Download'}) post.update(captcha.request(result)) post = urllib.urlencode(post) result = client.request(url, post=post, close=False) post = {} f = client.parseDOM(result, 'Form', attrs = {'action': '' }) f += client.parseDOM(result, 'form', attrs = {'action': '' }) k = client.parseDOM(f, 'input', ret='name', attrs = {'type': 'hidden'}) for i in k: post.update({i: client.parseDOM(f, 'input', ret='value', attrs = {'name': i})[0]}) post = urllib.urlencode(post) url = client.request(url, post=post, output='geturl') if u in url or url in u : return return url + headers except: return
def resolve(url): try: headers = '|%s' % urllib.urlencode({'User-Agent': client.agent(), 'Referer': url}) result = client.request(url) result = result.decode('iso-8859-1').encode('utf-8') post = {} f = client.parseDOM(result, 'Form', attrs = {'name': 'freeorpremium'})[0] k = client.parseDOM(f, 'input', ret='name', attrs = {'type': 'hidden'}) for i in k: post.update({i: client.parseDOM(f, 'input', ret='value', attrs = {'name': i})[0]}) post.update({'method_isfree': 'Click for Free Download'}) post = urllib.urlencode(post) result = client.request(url, post=post) result = result.decode('iso-8859-1').encode('utf-8') post = {} f = client.parseDOM(result, 'Form', attrs = {'name': 'F1'})[0] k = client.parseDOM(f, 'input', ret='name', attrs = {'type': 'hidden'}) for i in k: post.update({i: client.parseDOM(f, 'input', ret='value', attrs = {'name': i})[0]}) post.update(captcha.request(result)) post = urllib.urlencode(post) result = client.request(url, post=post) result = result.decode('iso-8859-1').encode('utf-8') url = client.parseDOM(result, 'a', ret='href', attrs = {'onclick': 'DL.+?'})[0] url += headers return url except: return
def resolve(self, url): try: headers = {'Referer': url} url = urlparse.urljoin(self.base_link, url) result = client.request(url, headers=headers) if 'load_embed' in url: result = json.loads(result) result = resolvers.request(result['embed_url']) return result except: pass try: url = re.compile('"?file"?\s*=\s*"(.+?)"\s+"?label"?\s*=\s*"(\d+)p?"').findall(result) url = [(int(i[1]), i[0]) for i in url] url = sorted(url, key=lambda k: k[0]) url = url[-1][1] url = client.request(url, output='geturl') if 'requiressl=yes' in url: url = url.replace('http://', 'https://') else: url = url.replace('https://', 'http://') return url except: pass
def resolve(url): try: id = re.compile('//.+?/.+?/([\w]+)').findall(url) id += re.compile('//.+?/.+?v=([\w]+)').findall(url) id = id[0] url = 'http://www.cloudtime.to/video/%s' % id result = client.request(url) post = {} f = client.parseDOM(result, 'form', attrs = {'action': ''}) k = client.parseDOM(f, 'input', ret='name', attrs = {'type': 'hidden'}) for i in k: post.update({i: client.parseDOM(f, 'input', ret='value', attrs = {'name': i})[0]}) post = urllib.urlencode(post) result = client.request(url, post=post) key = re.compile('flashvars.filekey=(.+?);').findall(result)[-1] try: key = re.compile('\s+%s="(.+?)"' % key).findall(result)[-1] except: pass url = 'http://www.cloudtime.to/api/player.api.php?key=%s&file=%s' % (key, id) result = client.request(url) url = re.compile('url=(.+?)&').findall(result)[0] return url except: return
def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: if url == None: return year, month = re.compile('(\d{4})-(\d{2})').findall(date)[-1] if int(year) <= 2008: raise Exception() cat = urlparse.urljoin(self.base_link, url) cat = cat.split('category/', 1)[-1].rsplit('/')[0] url = urlparse.urljoin(self.base_link, '/episode/%s-s%02de%02d' % (cat, int(season), int(episode))) result = client.request(url, output='response', error=True) if '404' in result[0]: url = urlparse.urljoin(self.base_link, '/%s/%s/%s-s%02de%02d' % (year, month, cat, int(season), int(episode))) result = client.request(url, output='response', error=True) if '404' in result[0]: url = urlparse.urljoin(self.base_link, '/%s/%s/%s-%01dx%01d' % (year, month, cat, int(season), int(episode))) result = client.request(url, output='response', error=True) if '404' in result[0]: raise Exception() try: url = re.compile('//.+?(/.+)').findall(url)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def resolve(url): try: headers = "|%s" % urllib.urlencode({"User-Agent": client.agent(), "Referer": url}) id = re.compile("//.+?/.+?/([\w]+)").findall(url) id += re.compile("//.+?/.+?v=([\w]+)").findall(url) id = id[0] url = "http://embed.videoweed.es/embed.php?v=%s" % id result = client.request(url) key = re.compile("flashvars.filekey=(.+?);").findall(result)[-1] try: key = re.compile('\s+%s="(.+?)"' % key).findall(result)[-1] except: pass url = "http://www.videoweed.es/api/player.api.php?key=%s&file=%s" % (key, id) result = client.request(url) url = re.compile("url=(.+?)&").findall(result)[0] url += headers return url except: return
def resolve(url): try: referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0] page = urlparse.parse_qs(urlparse.urlparse(url).query)['id'][0] page = 'http://p2pcast.tv/stream.php?id=%s&live=0&p2p=0&stretching=uniform' % page result = client.request(page, referer=referer) js = re.compile('src\s*=\s*[\'|\"](.+?player.+?\.js)[\'|\"]').findall(result)[-1] js = client.request(js) try: token = re.findall('[\'|\"](.+?\.php)[\'|\"]',js)[-1] token = urlparse.urljoin('http://p2pcast.tv', token) token = client.request(token, referer=page, headers={'User-Agent': client.agent(), 'X-Requested-With': 'XMLHttpRequest'}) token = re.compile('[\'|\"]token[\'|\"]\s*:\s*[\'|\"](.+?)[\'|\"]').findall(token)[0] except: token = '' try: swf = re.compile('flashplayer\s*:\s*[\'|\"](.+?)[\'|\"]').findall(js)[-1] except: swf = 'http://cdn.p2pcast.tv/jwplayer.flash.swf' url = re.compile('url\s*=\s*[\'|\"](.+?)[\'|\"]').findall(result)[0] url = base64.b64decode(url) + token url += '|%s' % urllib.urlencode({'User-Agent': client.agent(), 'Referer': swf}) return url except: return
def resolve(url): try: result = client.request(url, mobile=True, close=False) try: post = {} f = client.parseDOM(result, "Form", attrs={"method": "POST"})[0] f = f.replace('"submit"', '"hidden"') k = client.parseDOM(f, "input", ret="name", attrs={"type": "hidden"}) for i in k: post.update({i: client.parseDOM(f, "input", ret="value", attrs={"name": i})[0]}) post = urllib.urlencode(post) except: post = None for i in range(0, 10): try: result = client.request(url, post=post, mobile=True, close=False) result = result.replace("\n", "") result = re.compile("sources *: *\[.+?\]").findall(result)[-1] result = re.compile('file *: *"(http.+?)"').findall(result) url = [i for i in result if ".m3u8" in i] if len(url) > 0: return url[0] url = [i for i in result if not ".m3u8" in i] if len(url) > 0: return url[0] except: time.sleep(1) except: return
def solvemedia(data): try: url = client.parseDOM(data, 'iframe', ret='src') url = [i for i in url if 'api.solvemedia.com' in i] if not len(url) > 0: return result = client.request(url[0], referer='') response = client.parseDOM(result, 'iframe', ret='src') response += client.parseDOM(result, 'img', ret='src') response = [i for i in response if '/papi/media' in i][0] response = 'http://api.solvemedia.com' + response response = keyboard(response) post = {} f = client.parseDOM(result, 'form', attrs = {'action': 'verify.noscript'})[0] k = client.parseDOM(f, 'input', ret='name', attrs = {'type': 'hidden'}) for i in k: post.update({i: client.parseDOM(f, 'input', ret='value', attrs = {'name': i})[0]}) post.update({'adcopy_response': response}) client.request('http://api.solvemedia.com/papi/verify.noscript', post=post) return {'adcopy_challenge': post['adcopy_challenge'], 'adcopy_response': 'manual_challenge'} except: pass
def resolve(url): try: url = re.compile('//.+?/([\w]+)').findall(url)[0] url = 'http://180upload.com/embed-%s.html' % url result = client.request(url) post = {} f = client.parseDOM(result, 'form', attrs = {'id': 'captchaForm'})[0] k = client.parseDOM(f, 'input', ret='name', attrs = {'type': 'hidden'}) for i in k: post.update({i: client.parseDOM(f, 'input', ret='value', attrs = {'name': i})[0]}) post = urllib.urlencode(post) result = client.request(url, post=post) result = re.compile('(eval.*?\)\)\))').findall(result)[-1] result = jsunpack.unpack(result) url = client.parseDOM(result, 'embed', ret='src') url += re.compile("'file' *, *'(.+?)'").findall(result) url = [i for i in url if not i.endswith('.srt')] url = 'http://' + url[0].split('://', 1)[-1] return url except: return
def resolve(url): try: url = re.compile('//.+?/([\w]+)').findall(url)[0] url = 'http://www.movdivx.com/%s' % url result = client.request(url) post = {} f = client.parseDOM(result, 'Form', attrs = {'action': '' })[0] k = client.parseDOM(f, 'input', ret='name', attrs = {'type': 'hidden'}) for i in k: post.update({i: client.parseDOM(f, 'input', ret='value', attrs = {'name': i})[0]}) post.update({'method_free': 'Free Download'}) result = client.request(url, post=post) result = re.compile('(eval.*?\)\)\))').findall(result)[-1] result = jsunpack.unpack(result) url = client.parseDOM(result, 'embed', ret='src') url += re.compile("file *: *[\'|\"](.+?)[\'|\"]").findall(result) url = [i for i in url if not i.endswith('.srt')] url = 'http://' + url[0].split('://', 1)[-1] return url except: return
def resolve(url): try: url = url.replace("/embed-", "/") url = re.compile("//.+?/([\w]+)").findall(url)[0] url = "http://grifthost.com/embed-%s.html" % url result = client.request(url) try: post = {} f = client.parseDOM(result, "Form", attrs={"method": "POST"})[0] f = f.replace('"submit"', '"hidden"') k = client.parseDOM(f, "input", ret="name", attrs={"type": "hidden"}) for i in k: post.update({i: client.parseDOM(f, "input", ret="value", attrs={"name": i})[0]}) post = post result = client.request(url, post=post) except: pass result = re.compile("(eval.*?\)\)\))").findall(result)[-1] result = jsunpack.unpack(result) url = client.parseDOM(result, "embed", ret="src") url += re.compile("file *: *['|\"](.+?)['|\"]").findall(result) url = [i for i in url if not i.endswith(".srt")] url = "http://" + url[0].split("://", 1)[-1] return url except: return
def resolve(url): try: url = re.compile("//.+?/([\w]+)").findall(url)[0] url = "http://www.movdivx.com/%s" % url result = client.request(url) post = {} f = client.parseDOM(result, "Form", attrs={"action": ""})[0] k = client.parseDOM(f, "input", ret="name", attrs={"type": "hidden"}) for i in k: post.update({i: client.parseDOM(f, "input", ret="value", attrs={"name": i})[0]}) post.update({"method_free": "Free Download"}) result = client.request(url, post=post) result = re.compile("(eval.*?\)\)\))").findall(result)[-1] result = jsunpack.unpack(result) url = client.parseDOM(result, "embed", ret="src") url += re.compile("file *: *['|\"](.+?)['|\"]").findall(result) url = [i for i in url if not i.endswith(".srt")] url = "http://" + url[0].split("://", 1)[-1] return url except: return
def resolve(url): try: id = re.compile('#(\d*)').findall(url)[0] result = client.request(url, headers={'User-Agent': client.agent()}) result = re.search('feedPreload:\s*(.*}]}})},', result, re.DOTALL).group(1) result = json.loads(result)['feed']['entry'] if len(result) > 1: result = [i for i in result if str(id) in i['link'][0]['href']][0] elif len(result) == 1: result = result[0] result = result['media']['content'] result = [i['url'] for i in result if 'video' in i['type']] result = sum([tag(i) for i in result], []) url = [] try: url += [[i for i in result if i['quality'] == '1080p'][0]] except: pass try: url += [[i for i in result if i['quality'] == 'HD'][0]] except: pass try: url += [[i for i in result if i['quality'] == 'SD'][0]] except: pass if url == []: return return url except: return
def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: control.log('wrzcraft episode begin') urls = [] tvshowtitle, year = re.compile('(.+?) [(](\d{4})[)]$').findall( url)[0] season, episode = season.zfill(2), episode.zfill(2) control.log('got before query') query = '%s s%se%s' % (tvshowtitle, season, episode) query = self.search_link % (urllib.quote_plus(query)) control.log('wrzcraft query') control.log(query) r = urlparse.urljoin(self.base_link, query) r = client.replaceHTMLCodes(r) r = r.encode('utf-8') r = client.request(r) posts = client.parseDOM(r, 'div', attrs={'class': 'post'}) for post in posts: containerDiv = client.parseDOM(post, 'div', attrs={'class': 'posttitle'}) if not containerDiv: containerDiv = client.parseDOM( post, 'div', attrs={'class': 'expandposttitle'}) href = client.parseDOM(containerDiv, 'a', ret='href')[0] title = client.parseDOM(containerDiv, 'a', ret='title')[0] href = href.encode('utf-8') title = title.encode('utf-8') urls.append({'url': href, 'title': title}) return urls except Exception as e: control.log('wrzcraft error') control.log(e) return
def sources(self, url, hostDict, hostprDict): try: sources = [] r = client.request(url) try: match = re.compile( 'class="search-title may-blank" >(.+?)</a>.+?<span class="search-result-icon search-result-icon-external"></span><a href="(.+?)://(.+?)/(.+?)" class="search-link may-blank" >').findall( r) for info, http, host, ext in match: if '2160' in info: quality = '4K' elif '1080' in info: quality = '1080p' elif '720' in info: quality = 'HD' elif '480' in info: quality = 'SD' else: quality = 'SD' url = '%s://%s/%s' % (http, host, ext) if 'google' in host: host = 'GDrive' if 'Google' in host: host = 'GDrive' if 'GOOGLE' in host: host = 'GDrive' sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': False }) except: return except Exception: return return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): sources = [] try: if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = client.request(url) links = client.parseDOM(result, 'td', attrs = {'class': 'even tdhost'}) links += client.parseDOM(result, 'td', attrs = {'class': 'odd tdhost'}) for i in links: try: host = client.parseDOM(i, 'a')[0].strip() #control.log('#host# %s' % host) #host = host.split('<', 1)[0] #host = host.rsplit('.', 1)[0].split('.', 1)[-1] #host = host.strip().lower() if host in hostDict: host = host.rsplit('.', 1)[0].split('.', 1)[-1] host = host.strip().lower() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') url = client.parseDOM(i, 'a', ret='href')[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') print("Url",url) sources.append({'source': host, 'quality': 'SD', 'provider': 'wsonline', 'url': url}) except: pass return sources except Exception as e: control.log('ERROR WSO %s' % e) return sources
def resolve(url): try: url = url.replace('/embed-', '/') url = re.compile('//.+?/([\w]+)').findall(url)[0] url = 'http://allvid.ch/embed-%s.html' % url result = client.request(url) result = re.compile( 'file\s*:\s*"(.+?)".+?label\s*:\s*"(\d+)"').findall(result) url = [] try: url.append({ 'quality': '1080p', 'url': [i[0] for i in result if int(i[1]) >= 1080][0] }) except: pass try: url.append({ 'quality': 'HD', 'url': [i[0] for i in result if 720 <= int(i[1]) < 1080][0] }) except: pass try: url.append({ 'quality': 'SD', 'url': [i[0] for i in result if int(i[1]) < 720][0] }) except: pass return url[0]['url'] except: return
def get_movie(self, imdb, title, year): try: query = self.moviesearch_link % (urllib.quote_plus(title)) query = urlparse.urljoin(self.base_link, query) result = client.request(query) result = client.parseDOM(result, 'div', attrs={'class': 'searchResult'}) title = cleantitle.movie(title) years = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1) ] result = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'h2', ret='title')[0], client.parseDOM(i, 'span', attrs={'itemprop': 'copyrightYear'})) for i in result] result = [i for i in result if len(i[2]) > 0] result = [i for i in result if title == cleantitle.movie(i[1])] result = [ i[0] for i in result if any(x in i[2][0] for x in years) ][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def getEpisodeList(self, show): try: q = urlparse.urljoin(self.base_link, show['url']) r = client.request(q) r = client.parseDOM(r, 'table', attrs={'class': 'listing'})[0] result = [] episode_re = re.compile(r'[^\d]+(\d+(\.\d+)?)') for x in self.getTableRows(r): try: x.index('<a ') except ValueError: continue try: url = str(client.parseDOM(x, 'a', ret='href')[0]) episode = client.parseDOM(x, 'a')[0] try: episode = episode.encode('ascii') except: pass episode = episode.replace(show['title'], '') episode = float(episode_re.search(episode).group(1)) result.append({'episode': episode, 'url': url}) except: continue if len(result): return sorted(result, key=lambda k: k['episode']) except: pass return
def resolve(url, online=None, page_url=None, **kwargs): try: if online == None: if check(url) == False: raise Exception('Video not available') video_url = None headersx = {'Referer': url, 'User-Agent': client.agent()} page_data, head, ret, cookie = client.request(url, output='extended', headers=headersx) try: cookie = re.findall(r'Set-Cookie:(.*)', str(ret), re.MULTILINE)[0].strip() except: pass headersx['Cookie'] = cookie mp4_vids = re.findall(r'\"(http.*?.mp4.*?)\"', page_data) items = [] for u in mp4_vids: u = u.strip().replace(' ', '%20').replace('&', '&') items.append(u) if len(items) > 0: video_url = items else: raise Exception('Video not available') paramsx = {'headers': headersx} params = client.b64encode(json.dumps(paramsx, encoding='utf-8')) return (video_url, '', params) except Exception as e: e = '{}'.format(e) return (None, e, None)
def sources(self, url, hostDict, hostprDict): try: sources = [] match = re.compile('url="(.+?)"&episode="(.+?)"').findall(url) for url, episode in match: url = url episode = episode r = client.request(url) try: match = re.compile('<a title="Episode ' + episode + '.+?" data-openload="(.+?)"').findall(r) for url in match: if '2160' in url: quality = '4K' elif '1080' in url: quality = '1080p' elif '720' in url: quality = 'HD' elif '480' in url: quality = 'SD' else: quality = 'SD' url = url sources.append({ 'source': 'Openload.co', 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) except: return except Exception: return return sources
def get_show(self, imdb, tvdb, tvshowtitle, year): try: r = 'search/tvdb/%s?type=show&extended=full' % tvdb r = json.loads(trakt.getTrakt(r)) if not r: return '0' d = r[0]['show']['genres'] if not ('anime' in d or 'animation' in d): return '0' tv_maze = tvmaze.tvMaze() tvshowtitle = tv_maze.showLookup('thetvdb', tvdb) tvshowtitle = tvshowtitle['name'] t = cleantitle.get(tvshowtitle) q = urlparse.urljoin(self.base_link, self.search_link) q = q % urllib.quote_plus(tvshowtitle) r = client.request(q) print r r = client.parseDOM(r, 'ul', attrs={'class': 'items'}) r = client.parseDOM(r, 'li') r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title'), re.findall('\d{4}', i)) for i in r] r = [(i[0][0], i[1][0], i[2][-1]) for i in r if i[0] and i[1] and i[2]] r = [i for i in r if t == cleantitle.get(i[1]) and year == i[2]] r = r[0][0] url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def img_parser(self, image, referer): try: if not image.startswith('http:'): image = 'http:' + image d = control.windowDialog result = client.request(image, referer=referer, close=False) for match in re.finditer( "<img\s+src='([^']+)'\s+width='(\d+)'\s+height='(\d+)'", result): img_url, width, height = match.groups() img_url = client.replaceHTMLCodes(img_url) width = int(width) height = int(height) if width > 0 and height > 0: left = (1280 - width) / 2 f = control.image(left, 0, width, height, img_url) d.addControl(f) else: client.request(img_url, referer=image, close=False) d.show() control.dialog.ok(control.addonInfo('name'), str('Continue to Video'), '') match = re.search("href='([^']+)", result) if match and random.randint(0, 100) < 5: result = client.request(match.group(1), close=False) match = re.search("location=decode\('([^']+)", result) client.request(match.group(1)) try: d.removeControl(f) d.close() except: return except: try: d.removeControl(f) d.close() except: return
def get_movie(self, imdb, title, year): try: query = self.search_link % (urllib.quote_plus( cleantitle.query2(title))) query = urlparse.urljoin(self.base_link, query) result = client.request(query) result = result.decode('utf-8-sig') result = client.parseDOM(result, 'ul', attrs={'id': 'resultList2'})[0] result = client.parseDOM(result, 'li') result = [(client.parseDOM(i, 'div', attrs={'class': 'title'}), client.parseDOM(i, 'div', attrs={'class': 'info'}), client.parseDOM(i, 'a', ret='href')[0]) for i in result] result = [(i[0][0], re.findall(r"(\d{4})", i[1][0])[0], i[2]) for i in result] years = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1) ] result = [ i for i in result if cleantitle.movie(title) in cleantitle.movie(i[0]) ] result = [i[2] for i in result if any(x in i[1] for x in years)][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def filmxy_cache(self, leter=''): try: url = urlparse.urljoin(self.base_link, self.search_link) #control.log('>>>>>>>>>>>>---------- CACHE %s' % url) headers = {'X-Requested-With': "XMLHttpRequest"} params = {"action": "ajax_process2", "query": leter.upper()} params = urllib.urlencode(params) result = client.request(url, post=params, headers=headers) result = client.parseDOM(result, 'p') result = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'a')[0], client.parseDOM(i, 'a')[0]) for i in result] result = [(re.sub('http.+?//.+?/', '/', i[0]), re.findall("\(\d+\)", i[1]), i[2].split('(')[0]) for i in result] #control.log('>>>>>>>>>>>>---------- CACHE-4 %s' % result) result = [(i[0], i[1][0], i[2].strip()) for i in result if len(i[1]) > 0] return result except Exception as e: control.log('Filmxy Cache ERROR %s' % e) return
def nhlDirectory(): dt = procTimezone(5) datex = int(dt.strftime('%Y%m%d')) url = 'http://live.nhl.com/GameData/SeasonSchedule-20152016.json' result = client.request(url) items = json.loads(result) items = sorted(items, key=lambda k: k['est']) addDirectoryItem( control.lang(30751).encode('utf-8'), 'Scoreboard', 'nhlScoreboard', '0', '0') addDirectoryItem( control.lang(30752).encode('utf-8'), 'Archived', 'nhlArchives', '0', '0') addDirectoryItem(control.lang(30753).encode('utf-8'), '0', '0', '0', '0') addDirectoryItem(control.lang(30754).encode('utf-8'), '0', '0', '0', '0') for item in items: try: est = datetime.datetime.strptime(item['est'], '%Y%m%d %H:%M:%S') date = int(est.strftime('%Y%m%d')) if not date == datex: raise Exception() est = procTimezone(5, est) name = '%s at %s [COLOR gold](%s)[/COLOR] [COLOR red](%s)[/COLOR]' % ( item['a'], item['h'], est.strftime('%H:%M'), est.strftime('%Y-%m-%d')) url = str(item['id']) addDirectoryItem(name, url, 'nhlStreams', '0', '0') except: pass endDirectory()
def pidtv_tvcache(self, tvshowtitle): try: headers = {'X-Requested-With': 'XMLHttpRequest'} post = urllib.urlencode({'aspp': tvshowtitle, 'action': 'ajaxsearchpro_search', 'options': 'qtranslate_lang=0&set_exactonly=checked&set_intitle=None&customset%5B%5D=post', 'asid': '1', 'asp_inst_id': '1_1'}) url = urlparse.urljoin(self.base_link, self.tvsearch_link) url = client.request(url, post=post, headers=headers) url = zip(client.parseDOM(url, 'a', ret='href', attrs={'class': 'asp_res_url'}), client.parseDOM(url, 'a', attrs={'class': 'asp_res_url'})) url = [(i[0], re.findall('(.+?: Season \d+)', i[1].strip())) for i in url] url = [i[0] for i in url if len(i[1]) > 0 and tvshowtitle == i[1][0]][0] ''' url = urlparse.urljoin(self.base_link, self.tvsearch_link_2) url = url % urllib.quote_plus(tvshowtitle) url = client.request(url) url = zip(client.parseDOM(url, 'a', ret='href', attrs={'rel': '.+?'}), client.parseDOM(url, 'a', attrs={'rel': '.+?'})) url = [i[0] for i in url if i[1] == tvshowtitle][0] ''' url = urlparse.urljoin(self.base_link, url) url = re.findall('(?://.+?|)(/.+)', url)[0] return url except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] info_response = client.request(url) iframes = re.findall('''<iframe\s*src=['"]([^'"]+)''', info_response) for url in iframes: host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] if host in hostDict: host = client.replaceHTMLCodes(host) host = host.encode('utf-8') sources.append({ 'source': host, 'quality': 'SD', 'language': 'en', 'url': url.replace('\/', '/'), 'direct': False, 'debridonly': False }) return sources except Exception: return
def resolve(url): try: headers = '|%s' % urllib.urlencode({ 'User-Agent': client.agent(), 'Referer': url }) url = url.replace('/video/', '/embed/') result = client.request(url) unpacked = '' packed = result.split('\n') for i in packed: try: unpacked += jsunpack.unpack(i) except: pass result += unpacked result = re.sub('\s\s+', ' ', result) var = re.compile('var\s(.+?)\s*=\s*\'(.+?)\'').findall(result) for i in range(100): for v in var: result = result.replace("' %s '" % v[0], v[1]).replace("'%s'" % v[0], v[1]) url = re.compile('sources\s*:\s*\[.+?file\s*:\s*(.+?)\s*\,').findall( result)[0] var = re.compile('var\s+%s\s*=\s*\'(.+?)\'' % url).findall(result) if len(var) > 0: url = var[0].strip() url += headers if url.startswith('http'): return url except: return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year): try: query = self.search_link % urllib.quote_plus( cleantitle.query(tvshowtitle)) for i in range(4): result = client.request(query, timeout=3) if not result == None: break t = [tvshowtitle] + source_utils.aliases_to_array(aliases) t = [cleantitle.get(i) for i in set(t) if i] result = re.compile( 'itemprop="url"\s+href="([^"]+).*?itemprop="name"\s+class="serie-title">([^<]+)', re.DOTALL).findall(result) for i in result: if cleantitle.get(cleantitle.normalize( i[1])) in t and year in i[1]: url = i[0] url = url.encode('utf-8') return url except: return
def resolve(url): try: url = url.split('/preview', 1)[0] url = url.replace('drive.google.com', 'docs.google.com') result = client.request(url) result = re.compile('"fmt_stream_map",(".+?")').findall(result)[0] u = json.loads(result) u = [i.split('|')[-1] for i in u.split(',')] u = sum([tag(i) for i in u], []) url = [] try: url += [[i for i in u if i['quality'] == '1080p'][0]] except: pass try: url += [[i for i in u if i['quality'] == 'HD'][0]] except: pass try: url += [[i for i in u if i['quality'] == 'SD'][0]] except: pass if url == []: return return url except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] typ = '' if url == None: return sources if "serial" in url: typ = "episode" if "film" in url: typ = "movie" url = urlparse.urljoin(self.base_link, url) result = client.request(url) html = result result = client.parseDOM(result, 'div', attrs={'id': 'links'}) attr = client.parseDOM(result, 'ul', ret='data-type') result = client.parseDOM(result, 'ul') for x in range(0, len(attr)): transl_type = attr[x] links = result[x + 1] sources += self.extract_sources(transl_type, links, typ, html) return sources except: return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): sources = [] try: if url == None: return sources post = urlparse.parse_qs(urlparse.urlparse(url).query)['v'][0] post = urllib.urlencode({'v': post}) result = client.request(self.post, post=post) result = json.loads(result) print("r102", result) for i in result: print("i", i, result[i]) mq = 'SD' if '1080' in i: mq = '1080p' if '72' in i: mq = 'HD' sources.append({ 'source': 'gvideo', 'quality': mq, 'provider': 'Genvideos', 'url': result[i] }) return sources except Exception as e: control.log('ERROR gen %s' % e) return sources
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return result = client.request(url) # cant user dom parser here because HTML is bugged div is not closed result = re.findall('<ul class="episodios">(.*?)</ul>', result, re.MULTILINE | re.DOTALL) for item in result: season_episodes = re.findall('<li>(.*?)</li>', item, re.MULTILINE | re.DOTALL) for row in season_episodes: s = client.parseDOM(row, 'div', attrs={'class': 'numerando'})[0].split('x') season_found = s[0].strip() episode_found = s[1].strip() if (season_found != season): break if episode_found == episode: return client.parseDOM(row, 'a', ret='href')[0] except: return
def get_episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return url = urlparse.parse_qs(url) url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url]) print url tvshowtitle = cleantitle.query10(url['tvshowtitle']) r = '/tv-show/%s/season/%01d/episode/%01d' % ( tvshowtitle, int(season), int(episode)) y = '/tv-show/%s/season/%01d' % (tvshowtitle, int(season)) control.log('>>>>>> %s' % y) result = client.request(urlparse.urljoin(self.base_link, y)) result = client.parseDOM(result, 'span', attrs={'class': 'dat'})[0] if url['year'] == str(result.strip()): url = r.encode('utf-8') control.log('>>>>>> Putlocker URL %s' % url) return url return except: return
def resolve(url): return try: url = url.replace('/embed-', '/') url = re.compile('//.+?/([\w]+)').findall(url)[0] url = 'http://www.mightyupload.com/embed-%s.html' % url result = client.request(url, mobile=True) url = re.compile("file *: *'(.+?)'").findall(result) if len(url) > 0: return url[0] result = re.compile('(eval.*?\)\)\))').findall(result)[-1] result = jsunpack.unpack(result) url = client.parseDOM(result, 'embed', ret='src') url += re.compile("file *: *[\'|\"](.+?)[\'|\"]").findall(result) url = [i for i in url if not i.endswith('.srt')] url = 'http://' + url[0].split('://', 1)[-1] return url except: return
def search(self, title, localtitle, year): try: titles = [] title2 = title.split('.')[0] localtitle2 = localtitle.split('.')[0] titles.append(cleantitle.normalize(cleantitle.getsearch(title2))) titles.append( cleantitle.normalize(cleantitle.getsearch(localtitle2))) titles.append(title2) titles.append(localtitle2) for title in titles: title = title.replace(" ", "+") result = client.request(self.search_link % title) result = client.parseDOM(result, 'div', attrs={'class': 'col-xs-4'}) for item in result: try: rok = client.parseDOM(item, 'div', attrs={'class': 'col-sm-8'}) rok_nazwa = client.parseDOM(rok, 'p')[0].lower() link = client.parseDOM(item, 'a', ret='href')[0] link = self.base_link + link words = title.lower().split(" ") if self.contains_all_words( rok_nazwa, words) and year in rok_nazwa: return link except: continue return except Exception as e: print(e) return
def get_movie(self, imdb, title, year): try: query = self.search_link % (urllib.quote_plus( cleantitle.query2(title))) query = urlparse.urljoin(self.base_link, query) result = client.request(query) title = cleantitle.movie(title) result = client.parseDOM( result, 'div', attrs={'style': 'overflow: hidden; margin-top: 15px;'}) result = [ (client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'a')[1], str(re.findall(r"(\d{4})", client.parseDOM(i, 'a')[1])[0])) for i in result ] years = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1) ] result = [i for i in result if title in cleantitle.movie(i[1])] result = [i[0] for i in result if any(x in i[2] for x in years)][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') control.log('Segos URL %s' % url) return url except: return
def resolve(url): try: id = (urlparse.urlparse(url).path).split('/')[-1] result = client.request(url) result = result.replace('\r','').replace('\n','').replace('\t','') result = result.split('"%s"' % id)[-1].split(']]')[0] result = re.compile('\d*,\d*,\d*,"(.+?)"').findall(result) result = [i.replace('\\u003d','=').replace('\\u0026','&') for i in result][::-1] result = sum([tag(i) for i in result], []) url = [] try: url += [[i for i in result if i['quality'] == '1080p'][0]] except: pass try: url += [[i for i in result if i['quality'] == 'HD'][0]] except: pass try: url += [[i for i in result if i['quality'] == 'SD'][0]] except: pass if url == []: return return url except: return
def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) t = cleantitle.get(data['tvshowtitle']) print('###',t,data['tvshowtitle']) year = re.findall('(\d{4})', date)[0] years = [str(year), str(int(year)+1), str(int(year)-1)] season = '%01d' % int(season) episode = '%01d' % int(episode) headers = {'X-Requested-With': 'XMLHttpRequest'} query = urllib.urlencode({'keyword': '%s - Season %s' % (data['tvshowtitle'], season)}) url = urlparse.urljoin(self.base_link, self.search_link) r = client.request(url, post=query, headers=headers) r = json.loads(r)['content'] print('>>>',r) r = zip(client.parseDOM(r, 'a', ret='href', attrs = {'class': 'ss-title'}), client.parseDOM(r, 'a', attrs = {'class': 'ss-title'})) r = [(i[0], re.findall('(.+?) - season (\d+)$', i[1].lower())) for i in r] r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0] r = [i for i in r if t == cleantitle.get(i[1])] r = [i[0] for i in r if season == '%01d' % int(i[2])][:2] r = [(i, re.findall('(\d+)', i)[-1]) for i in r] print('>>>',r) for i in r: try: y, q = cache.get(self.muchmovies_info, 9000, i[1]) if not y in years: raise Exception() return urlparse.urlparse(i[0]).path + '?episode=%01d' % int(episode) except: pass except: return
def SolveCaptcha(captcha_response, fid, dlticket): burl = 'https://api.openload.co/1/file/' try: if 'http' in fid: fid = match_id(fid) url = None url = burl + 'dl?file=' + fid + '&ticket=' + dlticket + '&captcha_response=' + captcha_response data = client.request(url) data = json.loads(data) if data["status"] == 200: return data['result']['url'].replace("https", "http") else: log(type='FAIL', method='SolveCaptcha', err='cannot handle 2nd api link >>> %s' % data['msg']) except: log(type='ERROR', method='SolveCaptcha', err='cannot handle 2nd api link %s' % url) return None
def getVideoMetaData(url, httpsskip=False): try: res = ('', '', '', '') if 'google.com/file' in url: r_split = url.split('/') meta_url = 'https://docs.google.com/get_video_info?docid=%s' % r_split[ len(r_split) - 2] #print meta_url headers = {} headers['User-Agent'] = client.USER_AGENT result, headers, content, cookie = client.request( meta_url, output='extended', headers=headers, IPv4=True, httpsskip=httpsskip) #print content return (result, headers, content, cookie) return res except Exception as e: print 'ERROR: %s' % e return res