def check(self, i, hostDict): try: url = client.replaceHTMLCodes(i[0]) url = url.encode('utf-8') result = '' result = client.request(urlparse.urljoin(self.base_link, url), headers=self.headers) url = re.compile('class=[\'|\"]*myButton.+?href=[\'|\"|\s|\<]*(.+?)[\'|\"|\s|\>]').findall(result)[0] #print("URL2",url,i[1]) #control.log("WATCHSERIES CHECK %s | url: %s" % (url,i[0])) url = client.replaceHTMLCodes(url) host = urlparse.urlparse(url).netloc host = host.replace('www.', '').replace('embed.', '') host = host.lower() if not host in hostDict: #control.log("WATCHSERIES HOST %s" % host) raise Exception() host = host.rsplit('.', 1)[0] host = client.replaceHTMLCodes(host) host = host.encode('utf-8') self.sources.append({'source': host, 'quality': i[1], 'provider': 'Watchseries', 'url': url}) except: pass
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = client.request(url) links = client.parseDOM(result, 'td', attrs = {'class': 'even tdhost'}) links += client.parseDOM(result, 'td', attrs = {'class': 'odd tdhost'}) for i in links: try: host = client.parseDOM(i, 'a')[0] host = host.split('<', 1)[0] host = host.rsplit('.', 1)[0].split('.', 1)[-1] host = host.strip().lower() if not host in hostDict: raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') url = client.parseDOM(i, 'a', ret='href')[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') sources.append({'source': host, 'quality': 'SD', 'provider': 'WSOnline', 'url': url}) except: pass return sources except: return sources
def CartoonCrazy(image, fanart): addDirectoryItem('[B]SEARCH[/B]', 'CCsearch', '0', mediaPath+'CCsearch.png', fanart, '') try: url = 'http://kisscartoon.me/CartoonList/' result = cloudflare.request(url) items = client.parseDOM(result, 'div', attrs={'id': 'container'}) items = client.parseDOM(items, 'div', attrs={'id': 'rightside'}) items = client.parseDOM(items, 'div', attrs={'class': 'barContent'})[1] items = client.parseDOM(items, 'a', ret='href') except: return for item in items: try: name = '[B]'+ item[7:] +'[/B]' name = client.replaceHTMLCodes(name) name = name.encode('utf-8') url = item url = client.replaceHTMLCodes(url) url = url.encode('utf-8') addDirectoryItem(name, 'CCcat', image, image, fanart, url) except: pass endDirectory()
def radiotunes(image, fanart): try: url = 'http://radiotunes.com/channels' result = client.request(url) result = client.parseDOM(result, 'ul', attrs={'id': 'channel-nav'})[0] items = client.parseDOM(result, 'li') except: pass for item in items: try: name = client.parseDOM(item, 'span')[0] name = client.replaceHTMLCodes(name) name = name.encode('utf-8') url = client.parseDOM(item, 'a', ret='href')[0] url = url.replace('/', '') url = 'http://pub7.radiotunes.com:80/radiotunes_%s_aac' % url url = client.replaceHTMLCodes(url) url = url.encode('utf-8') thumb = client.parseDOM(item, 'img', ret='src')[0] thumb = thumb.rsplit('?', 1)[0] if thumb.startswith('//'): thumb = 'http:%s' % thumb thumb = client.replaceHTMLCodes(thumb) thumb = thumb.encode('utf-8') addDirectoryItem(name, url, thumb, image, fanart) except: pass endDirectory()
def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: if url == None: return url = urlparse.urljoin(self.base_link, url) result = client.request(url, 'tv_episode_item') result = client.parseDOM(result, 'div', attrs = {'class': 'tv_episode_item'}) title = cleantitle.get(title) premiered = re.compile('(\d{4})-(\d{2})-(\d{2})').findall(premiered)[0] premiered = '%s %01d %s' % (premiered[1].replace('01','January').replace('02','February').replace('03','March').replace('04','April').replace('05','May').replace('06','June').replace('07','July').replace('08','August').replace('09','September').replace('10','October').replace('11','November').replace('12','December'), int(premiered[2]), premiered[0]) result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs = {'class': 'tv_episode_name'}), client.parseDOM(i, 'span', attrs = {'class': 'tv_num_versions'})) for i in result] result = [(i[0], i[1][0], i[2]) for i in result if len(i[1]) > 0] + [(i[0], None, i[2]) for i in result if len(i[1]) == 0] result = [(i[0], i[1], i[2][0]) for i in result if len(i[2]) > 0] + [(i[0], i[1], None) for i in result if len(i[2]) == 0] result = [(i[0][0], i[1], i[2]) for i in result if len(i[0]) > 0] url = [i for i in result if title == cleantitle.get(i[1]) and premiered == i[2]][:1] if len(url) == 0: url = [i for i in result if premiered == i[2]] if len(url) == 0 or len(url) > 1: url = [i for i in result if 'season-%01d-episode-%01d' % (int(season), int(episode)) in i[0]] url = client.replaceHTMLCodes(url[0][0]) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0] except: pass url = re.findall('(?://.+?|)(/.+)', url)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: query = self.search_link % urllib.quote_plus(title) query = urlparse.urljoin(self.base_link, query) result = self.request(query, 'movie_table') result = client.parseDOM(result, 'div', attrs = {'class': 'movie_table'}) title = cleantitle.movie(title) years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)] result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'img', ret='alt')) for i in result] result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [i for i in result if any(x in i[1] for x in years)] result = [i[0] for i in result if title == cleantitle.movie(i[1])][0] url = client.replaceHTMLCodes(result) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0] except: pass try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass url = urlparse.urlparse(url).path url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def AnimeCrazy(image, fanart): thumb = mediaPath+'ACsearch.png' try: url = 'http://cartoons8.co/list/' result = client.request(url) items = client.parseDOM(result, 'div', attrs={'class': 'r_Content'})[0] items = client.parseDOM(items, 'li') except: return addDirectoryItem('[B]SEARCH[/B]','ACsearch',thumb, image, fanart, '') for item in items: url = client.parseDOM(item, 'a', ret='href')[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') name =client.parseDOM(item, 'a')[0] name = name.replace('\n','').replace(' ','').upper() name = client.replaceHTMLCodes(name) name = name.encode('utf-8') addDirectoryItem(name, 'ACcat', image, image, fanart, url+'/?filter=newest&req=anime') endDirectory()
def imdb_user_list(self, url): try: result = client.request(url) result = result.decode('iso-8859-1').encode('utf-8') items = client.parseDOM(result, 'div', attrs = {'class': 'list_name'}) except: pass for item in items: try: name = client.parseDOM(item, 'a')[0] name = client.replaceHTMLCodes(name) name = name.encode('utf-8') url = client.parseDOM(item, 'a', ret='href')[0] url = url.split('/list/', 1)[-1].replace('/', '') url = self.imdblist_link % url url = client.replaceHTMLCodes(url) url = url.encode('utf-8') self.list.append({'name': name, 'url': url, 'context': url}) except: pass return self.list
def ACpart(url, image, fanart): try: result = client.request(url) items = client.parseDOM(result, 'table', attrs={'class': 'listing'}) items = client.parseDOM(items, 'td') items = zip(client.parseDOM(items, 'a', ret='href'), client.parseDOM(items, 'a')) if len(items) == 1: return ACstream(items[0][0]) except: return for item in items[::-1]: try: name = item[1] name = name.replace('\n', '').replace(' ','') name = client.replaceHTMLCodes(name) name = name.encode('utf-8') url = item[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') print url addDirectoryItem(name,'ACstream',image,image,fanart,url) except: pass episodeCategory()
def radio1fm(image, fanart): try: url = 'http://1.fm/home/showstations?stations=showall' result = client.request(url) a = client.parseDOM(result, 'div', attrs={'class': 'staionitemcont'}) b = client.parseDOM(result, 'div', attrs={'class': 'contbtnrgt'}) items = zip(a, b) except: return for item in items: try: name = client.parseDOM(item[1], 'a', ret='rel')[0] name = client.replaceHTMLCodes(name) name = name.encode('utf-8') url = client.parseDOM(item[1], 'a', ret='data-scsrv')[0] if not url.startswith('http'): url = 'http://%s' % url url += ':%s' % client.parseDOM(item[1], 'a', ret='data-hiaac')[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') thumb = client.parseDOM(item[0], 'img', ret='src')[0] thumb = thumb.rsplit('?', 1)[0] thumb = client.replaceHTMLCodes(thumb) thumb = thumb.encode('utf-8') addDirectoryItem(name, url, thumb, image, fanart) except: pass endDirectory()
def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: if url == None: return url = urlparse.urljoin(self.base_link, url) result = proxy.request(url, 'main_body') result = client.parseDOM(result, 'div', attrs = {'class': 'tv_episode_item'}) title = cleantitle.get(title) result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs = {'class': 'tv_episode_name'}), re.compile('(\d{4}-\d{2}-\d{2})').findall(i)) for i in result] result = [(i[0], i[1][0], i[2]) for i in result if len(i[1]) > 0] + [(i[0], None, i[2]) for i in result if len(i[1]) == 0] result = [(i[0], i[1], i[2][0]) for i in result if len(i[2]) > 0] + [(i[0], i[1], None) for i in result if len(i[2]) == 0] result = [(i[0][0], i[1], i[2]) for i in result if len(i[0]) > 0] url = [i for i in result if title == cleantitle.get(i[1]) and premiered == i[2]][:1] if len(url) == 0: url = [i for i in result if premiered == i[2]] if len(url) == 0 or len(url) > 1: url = [i for i in result if 'season-%01d-episode-%01d' % (int(season), int(episode)) in i[0]] url = client.replaceHTMLCodes(url[0][0]) url = proxy.parse(url) url = re.findall('(?://.+?|)(/.+)', url)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = client.source(url) links = re.compile("/watch\.php\?q=(.+?)'").findall(result) for i in links: try: url = base64.urlsafe_b64decode(i.encode('utf-8')) url = client.replaceHTMLCodes(url) url = url.encode('utf-8') host = base64.urlsafe_b64decode(i.encode('utf-8')) host = urlparse.urlparse(host).netloc host = host.rsplit('.', 1)[0].split('.', 1)[-1] host = host.strip().lower() if not host in hostDict: raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') sources.append({'source': host, 'quality': 'SD', 'provider': 'Filmikz', 'url': url}) except: pass return sources except: return sources
def anime_list_3(self, url): try: url = urlparse.urljoin(self.anime_link, url) result = client.request(url) items = client.parseDOM(result, 'div', attrs={'id': 'left_content'})[0] items = client.parseDOM(items, 'zi') except: return for item in items: try: title = client.parseDOM(item, 'a')[0] if '>Movie<' in title: raise Exception() title = re.sub('<.+?>|</.+?>|\\\\|\n', '', title).strip() title = client.replaceHTMLCodes(title) title = title.encode('utf-8') url = client.parseDOM(item, 'a', ret='href')[0] url = urlparse.urljoin(self.anime_link, url) url = url.replace(' ','%20') url = client.replaceHTMLCodes(url) url = url.encode('utf-8') image = client.parseDOM(item, 'img', ret='src')[0] image = urlparse.urljoin(self.anime_link, image) image = image.encode('utf-8') self.list.append({'title': title, 'url': url, 'image': image}) except: pass return self.list
def radio181fm(image, fanart): try: url = 'http://www.181.fm/index.php?p=mp3links' result = client.request(url) index = [] items = client.parseDOM(result, 'td', attrs={'id': 'rightlinks'}) except: pass for item in items: try: if not item.startswith('http://'): raise Exception() name = items[:items.index(item)] name = [i for i in name if not 'http://' in i][-1] name = client.replaceHTMLCodes(name) name = name.encode('utf-8') url = item url = client.replaceHTMLCodes(url) url = url.encode('utf-8') index.append({'name': name, 'url': url, 'thumb': '0', 'image': image, 'fanart': fanart}) except: pass index = [i for x, i in enumerate(index) if i not in index[x+1:]] index = sorted(index, key=lambda k: k['name']) for i in index: addDirectoryItem(i['name'], i['url'], i['thumb'], i['image'], i['fanart']) endDirectory()
def anime_list_2(self, url, image, fanart): try: url = urlparse.urljoin(self.anime_link, url) result = client.request(url) items = client.parseDOM(result, 'ul', attrs={'class': 'cat_page_box'})[-1] items = client.parseDOM(items, 'li') items = items[::-1] except: return for item in items: try: title = client.parseDOM(item, 'a')[0] title = re.sub('<.+?>|</.+?>|\\\\|\n', ' ', title).strip() title = re.sub('Watch$', '', title).strip() title = client.replaceHTMLCodes(title) title = title.encode('utf-8') url = client.parseDOM(item, 'a', ret='href')[0] url = urlparse.urljoin(self.anime_link, url) url = url.replace(' ','%20') url = client.replaceHTMLCodes(url) url = url.encode('utf-8') self.list.append({'title': title, 'url': url, 'image': image}) except: pass return self.list
def resolve(self, url): control.log('RESSS %s' % url) try: if 'openload.co' in url: url = resolvers.request(url) return url if 'movieshd' in url: r = self.request(url)[0] r = re.findall("file: '([^']+)',label: '(\d+)", r) r1 = sorted(r, key=lambda k: k[1]) r2 = client.replaceHTMLCodes(r1[-1][0]) #r2 = client.googlepass(url) return r2 if 'seriesonline' in url: r = self.request(url)[0] r = [client.parseDOM(r, 'source', ret='src'), client.parseDOM(r,'source', ret='label')] r = zip(r[0],r[1]) r1 = sorted(r, key=lambda k: k[1]) r2 = client.replaceHTMLCodes(r1[-2][0]) r2 = client.googlepass(url) return r2 return False except Exception as e: control.log('RESSS %S' % e) pass
def cartoon_list_2(self, url, image, fanart): try: url = urlparse.urljoin(self.cartoons_link, url) result = client.request(url) items = client.parseDOM(result, 'ul', attrs = {'id': 'episode_related'})[0] items = client.parseDOM(items, 'li') except: return for item in items: try: title = client.parseDOM(item, 'a')[0] title = title.strip() title = client.replaceHTMLCodes(title) title = title.encode('utf-8') url = client.parseDOM(item, 'a', ret='href')[0] url = urlparse.urljoin(self.cartoons_link, url) url = client.replaceHTMLCodes(url) url = url.encode('utf-8') self.list.append({'title': title, 'url': url, 'image': image}) except: pass return self.list
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = client.source(url) links = client.parseDOM(result, "td", attrs={"class": "even tdhost"}) links += client.parseDOM(result, "td", attrs={"class": "odd tdhost"}) for i in links: try: host = client.parseDOM(i, "a")[0] host = host.split("<", 1)[0] host = host.rsplit(".", 1)[0].split(".", 1)[-1] host = host.strip().lower() if not host in hostDict: raise Exception() host = client.replaceHTMLCodes(host) host = host.encode("utf-8") url = client.parseDOM(i, "a", ret="href")[0] url = client.replaceHTMLCodes(url) url = url.encode("utf-8") sources.append({"source": host, "quality": "SD", "provider": "WSOnline", "url": url}) except: pass return sources except: return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: self.sources =[] mylinks = [] hostDict = hostDict.sort() for i in hostDict: control.log("WA HO %s" % i) if url == None: return self.sources url = url.replace('/json/', '/') result = '' result, headers, content, cookie = client.request(urlparse.urljoin(self.base_link, url), output='extended') #result, headers, content, cookie = client.request(url, limit='0', output='extended') self.headers['Referer'] = urlparse.urljoin(self.base_link, url) self.headers['Cookie'] = cookie result = result.replace('\n','') result = result.decode('iso-8859-1').encode('utf-8') result = client.parseDOM(result, 'div', attrs = {'id': 'lang_1'})[0] links = re.compile('href=[\'|\"|\s|\<]*(.+?)[\'|\"|\s|\>].+?title=[\'|\"|\s|\<]*(.+?)[\'|\"|\s|\>]').findall(result) links = [x for y,x in enumerate(links) if x not in links[:y]] for i in links: try: host = i[1] host = host.split('.', 1)[0] host = host.strip().lower() #if not host in hostDict: raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') url = i[0] url = client.replaceHTMLCodes(url) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass if not url.startswith('http'): url = urlparse.urljoin(self.base_link, url) if not '/cale/' in url: raise Exception() url = url.encode('utf-8') url = url.replace('/json/', '/') url = urlparse.urlparse(url).path mylinks.append([url, 'SD']) except: pass threads = [] for i in mylinks: threads.append(workers.Thread(self.check, i, hostDict)) [i.start() for i in threads] for i in range(0, 10 * 2): is_alive = [x.is_alive() for x in threads] if all(x == False for x in is_alive): break time.sleep(1) return self.sources except: return self.sources
def get_movie(self, imdb, title, year): try: query = self.search_link % (urllib.quote_plus(title)) query = urlparse.urljoin(self.base_link, query) result = client.source(query) result = client.parseDOM(result, 'div', attrs = {'class': 'home_post_cont.+?'}) title = cleantitle.movie(title) years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)] result = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'img', ret='title')[0]) for i in result] result = [(i[0], client.replaceHTMLCodes(i[1])) for i in result] result = [(i[0], client.parseDOM(i[1], 'a')) for i in result] result = [(i[0], i[1][0]) for i in result if len(i[1]) > 0] result = [i for i in result if title == cleantitle.movie(i[1])] result = [i[0] for i in result if any(x in i[1] for x in years)][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def check(self, i): try: control.log(">>>>>>>>>>>>>>> ONE CHECK %s" % (i[0])) url = client.replaceHTMLCodes(i[0]) url = url.encode('utf-8') host = urlparse.urlparse(url).netloc host = host.replace('www.', '').replace('embed.', '') host = host.rsplit('.', 1)[0] host = host.lower() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') #control.log("##OneClickWatch %s - url %s" % (host, i[0])) #if host in i[2]: check = url = resolvers.request(url) if host == 'openload': check = openload.check(url) elif host == 'uptobox': check = uptobox.check(url) elif host == 'cloudzilla': check = cloudzilla.check(url) elif host == 'zstream': check = zstream.check(url) elif host == 'videomega': check = videomega.check(url) else: raise Exception() if check == None or check == False: raise Exception() self.sources.append({'source': host, 'quality': i[1], 'provider': 'Oneclickwatch', 'url': url}) except: pass
def CCpart(url, image, fanart): try: url = urlparse.urljoin('http://kisscartoon.me', url) result = cloudflare.request(url) items = client.parseDOM(result, 'table', attrs={'class': 'listing'}) items = client.parseDOM(items, 'td') items = zip(client.parseDOM(items, 'a', ret='href'), client.parseDOM(items, 'a')) if len(items) == 1: return CCstream(items[0][0]) except: return for item in items[::-1]: try: name = item[1] name = name.replace('\n', '') name = client.replaceHTMLCodes(name) name = name.encode('utf-8') url = item[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') addDirectoryItem(name,'CCstream',image,image,fanart,url) except: pass episodeCategory()
def get_movie(self, imdb, title, year): try: t = cleantitle.get(title) query = '%s %s' % (title, year) query = base64.b64decode(self.search_link) % urllib.quote_plus(query) result = client.request(query) result = json.loads(result)['results'] result = [(i['url'], i['titleNoFormatting']) for i in result] result = [(i[0], re.findall('(?:^Ver |)(.+?)(?: HD |)\((\d{4})', i[1])) for i in result] result = [(i[0], i[1][0][0], i[1][0][1]) for i in result if len(i[1]) > 0] r = [i for i in result if t == cleantitle.get(i[1]) and year == i[2]] if len(r) == 0: t = 'http://www.imdb.com/title/%s' % imdb t = client.request(t, headers={'Accept-Language':'es-ES'}) t = client.parseDOM(t, 'title')[0] t = re.sub('(?:\(|\s)\d{4}.+', '', t).strip() t = cleantitle.get(t) r = [i for i in result if t == cleantitle.get(i[1]) and year == i[2]] try: url = re.findall('//.+?(/.+)', r[0][0])[0] except: url = r[0][0] try: url = re.findall('(/.+?/.+?/)', url)[0] except: pass url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: pass try: t = cleantitle.get(title) query = self.search3_link % urllib.quote_plus(cleantitle.query(title)) query = urlparse.urljoin(self.base_link, query) result = client.request(query) result = re.sub(r'[^\x00-\x7F]+','', result) r = result.split('<li class=') r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'i'), re.findall('\((\d{4})\)', i)) for i in r] r = [(i[0][0], re.sub('\(|\)','', i[1][0]), i[2][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0] r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0] try: url = re.findall('//.+?(/.+)', r)[0] except: url = r url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: pass
def get_sources(self, url, hosthdDict, hostDict, locDict): try: self.sources =[] mylinks = [] if url == None: return self.sources url = url.replace('/json/', '/') result = '' links = [self.link_1, self.link_2, self.link_3] for base_link in links: result = client.request(urlparse.urljoin(base_link, url), headers=self.headers) if 'lang_1' in str(result): break result = result.replace('\n','') result = result.decode('iso-8859-1').encode('utf-8') result = client.parseDOM(result, 'div', attrs = {'id': 'lang_1'})[0] links = re.compile('href=[\'|\"|\s|\<]*(.+?)[\'|\"|\s|\>].+?title=[\'|\"|\s|\<]*(.+?)[\'|\"|\s|\>]').findall(result) links = [x for y,x in enumerate(links) if x not in links[:y]] for i in links: try: host = i[1] host = host.split('.', 1)[0] host = host.strip().lower() if not host in hostDict: raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') url = i[0] url = client.replaceHTMLCodes(url) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass if not url.startswith('http'): url = urlparse.urljoin(self.base_link, url) if not '/cale/' in url: raise Exception() url = url.encode('utf-8') url = url.replace('/json/', '/') url = urlparse.urlparse(url).path #sources.append({'source': host, 'quality': 'SD', 'provider': 'Watchseries', 'url': url}) mylinks.append([url, 'SD']) except: pass threads = [] for i in mylinks: threads.append(workers.Thread(self.check, i)) [i.start() for i in threads] for i in range(0, 10 * 2): is_alive = [x.is_alive() for x in threads] if all(x == False for x in is_alive): break time.sleep(1) return self.sources except: return self.sources
def scn_list(self, url, lang=None): try : links = [self.base_link_1, self.base_link_1, self.base_link_1] for base_link in links: try: result = client.source(base_link + url) except: result = '' if 'nag cf' in result: break if result == '' : return result result = result.decode('iso-8859-1').encode('utf-8') result = client.parseDOM(result, "div", attrs={"class":"nag cf"})[0] movies = client.parseDOM(result,"div", attrs={"class":"thumb"}) for movie in movies: try: title = client.parseDOM(movie, "a", ret="title")[0] title = re.compile('(.+?) [(]\d{4}[)]').findall(title)[0] title = client.replaceHTMLCodes(title) try : title = title.encode('utf-8') except: pass year = client.parseDOM(movie, "a", ret="title")[0] year = re.compile('.+? [(](\d{4})[)]').findall(year)[0] year = year.encode('utf-8') name = '%s (%s)' % (title, year) try: name = name.encode('utf-8') except: pass url = client.parseDOM(movie, "a", ret="href")[0] url = client.replaceHTMLCodes(url) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass poster = '0' try: poster = client.parseDOM(movie, "img", ret="src")[0] except: pass poster = client.replaceHTMLCodes(poster) try: poster = urlparse.parse_qs(urlparse.urlparse(poster).query)['u'][0] except: pass poster = poster.encode('utf-8') duration = '0' ; tvdb = '0'; genre = '0' self.list.append({'title': title, 'originaltitle': title, 'duration':duration,'year': year, 'genre': genre, 'name': name, 'tvdb': tvdb, 'poster': poster, 'banner': '0', 'fanart': '0', 'lang':lang}) except: pass return self.list except: pass return
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = client.request(url) #print result result = result.replace('\n','') print result quality = re.compile('>Links - Quality(.+?)<').findall(result)[0] quality = quality.strip() print("Q",quality) if quality == 'CAM' or quality == 'TS': quality = 'CAM' elif quality == 'SCREENER': quality = 'SCR' else: quality = 'SD' links = client.parseDOM(result, 'div', attrs = {'id': 'links'})[0] links = links.split('link_name') for i in links: try: url = client.parseDOM(i, 'a', ret='href')[0] try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0] except: pass url = urlparse.urlparse(url).query url = base64.b64decode(url) url = re.findall('((?:http|https)://.+?/.+?)(?:&|$)', url)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') print("URL1",url) host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] #if not host in hostDict: raise Exception() print(host.split('.')[0],hostDict) if not host.split('.')[0] in hostDict: if not host.split('.')[0] in hosthdDict: raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') print("URL4", host) sources.append({'source': host.split('.')[0], 'quality': 'SD', 'provider': 'Movie25', 'url': url}) except: pass return sources except: return sources
def check(self, i): try: url = client.replaceHTMLCodes(i[0]) url = url.encode('utf-8') result = '' links = [self.link_1, self.link_2, self.link_3] for base_link in links: try: opener = urllib2.build_opener(NoRedirection) opener.addheaders = [('User-Agent', 'Apple-iPhone')] opener.addheaders = [('Referer', base_link + url)] response = opener.open(base_link + url) result = response.read() response.close() except: result = '' if 'myButton' in result: break url = re.compile('class=[\'|\"]*myButton.+?href=[\'|\"|\s|\<]*(.+?)[\'|\"|\s|\>]').findall(result)[ 0] url = client.replaceHTMLCodes(url) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['url'][0] except: pass host = urlparse.urlparse(url).netloc host = host.replace('www.', '').replace('embed.', '') host = host.rsplit('.', 1)[0] host = host.lower() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') #control.log('WWWW WATCHSERIES RESOLVE-2 host: %s url: %s ' % (host,url)) #if host == 'openload':check = openload.check(url) #elif host == 'streamin':check = streamin.check(url) #elif host == 'cloudzilla': # check = cloudzilla.check(url) #elif host == 'zstream': # check = zstream.check(url) #elif host == 'vidspot': # check = vidspot.check(url) if host == 'up2stream': raise Exception() if host == 'mightyupload': raise Exception() self.sources.append({'source': host, 'quality': i[1], 'provider': 'Watchseries', 'url': url}) except: pass
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources url = url.replace("/json/", "/") result = "" links = [self.link_1, self.link_2, self.link_3] for base_link in links: result = client.source(urlparse.urljoin(base_link, url), headers=self.headers) if "lang_1" in str(result): break result = result.replace("\n", "") result = result.decode("iso-8859-1").encode("utf-8") result = client.parseDOM(result, "div", attrs={"id": "lang_1"})[0] links = re.compile("href=['|\"|\s|\<]*(.+?)['|\"|\s|\>].+?title=['|\"|\s|\<]*(.+?)['|\"|\s|\>]").findall( result ) links = [x for y, x in enumerate(links) if x not in links[:y]] for i in links: try: host = i[1] host = host.split(".", 1)[0] host = host.strip().lower() if not host in hostDict: raise Exception() host = client.replaceHTMLCodes(host) host = host.encode("utf-8") url = i[0] url = client.replaceHTMLCodes(url) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)["u"][0] except: pass if not url.startswith("http"): url = urlparse.urljoin(self.base_link, url) if not "/cale/" in url: raise Exception() url = url.encode("utf-8") sources.append({"source": host, "quality": "SD", "provider": "Watchseries", "url": url}) except: pass return sources except: return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources result = '' links = [self.link_1, self.link_2, self.link_3] for base_link in links: result = client.source(urlparse.urljoin(base_link, url), headers=self.headers) if 'original-title' in str(result): break links = client.parseDOM(result, 'tr', attrs = {'id': 'pt.+?'}) for i in links: try: lang = re.compile('<img src=[\'|\"|\s|\<]*(.+?)[\'|\"|\s|\>]').findall(i)[1] if not 'English' in lang: raise Exception() host = re.compile('<img src=[\'|\"|\s|\<]*(.+?)[\'|\"|\s|\>]').findall(i)[0] host = host.split('/')[-1] host = host.split('.')[-3] host = host.strip().lower() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') if '>Cam<' in i or '>TS<' in i: quality = 'CAM' elif '>HD<' in i and host in hosthdDict: quality = 'HD' else: quality = 'SD' if quality == 'HD' and not host in hosthdDict: raise Exception() if quality == 'SD' and not host in hostDict: raise Exception() if '>3D<' in i: info = '3D' else: info = '' url = re.compile('href=[\'|\"|\s|\<]*(.+?)[\'|\"|\s|\>]').findall(i)[0] url = client.replaceHTMLCodes(url) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass if url.startswith('http'): url = urlparse.urlparse(url).path if not url.startswith('http'): url = urlparse.urljoin(self.base_link, url) url = url.encode('utf-8') sources.append({'source': host, 'quality': quality, 'provider': 'Iwatchonline', 'url': url, 'info': info}) except: pass return sources except: return sources
def cartoon_list(self, url): try: url = urlparse.urljoin(self.cartoons_link, url) r = client.request(url, output='extended') result = r[0] ; headers = r[3] items = client.parseDOM(result, 'div', attrs = {'class': 'anime_movies_items'}) try: items += client.parseDOM(result, 'ul', attrs = {'class': 'listin.+?'})[0].split('</li>') except: pass except: return try: next = client.parseDOM(result, 'li', attrs = {'class': 'page'}) next = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in next] next = [(i[0][0], i[1][0]) for i in next if len(i[0]) > 0 and len(i[1]) > 0] next = [i[0] for i in next if 'raquo' in i[1]][0] next = urlparse.urljoin(self.cartoons_link, next) next = client.replaceHTMLCodes(next) next = next.encode('utf-8') except: next = '' for item in items: try: try: title = client.parseDOM(item, 'a')[0] except: pass try: title = client.parseDOM(item, 'a', ret='title')[0] except: pass title = client.replaceHTMLCodes(title) title = title.encode('utf-8') url = client.parseDOM(item, 'a', ret='href')[0] url = urlparse.urljoin(self.cartoons_link, url) url = client.replaceHTMLCodes(url) url = url.encode('utf-8') image = client.parseDOM(item, 'img', ret='src')[0] image = urlparse.urljoin(self.cartoons_link, image) image += '|' + urllib.urlencode(headers) image = client.replaceHTMLCodes(image) image = image.encode('utf-8') self.list.append({'title': title, 'url': url, 'image': image, 'next': next}) except: pass return self.list
def get_show(self, tvshowtitle, season, imdb=None, tvdb=None, year=None, proxy_options=None, key=None): try: if control.setting('Provider-%s' % name) == False: log('INFO', 'get_show', 'Provider Disabled by User') return None t = cleantitle.get(tvshowtitle) year = '%s' % year q = urlparse.urljoin(self.base_link, self.search_link) q = q % urllib.quote_plus(tvshowtitle) #r = client.request(q) r = proxies.request(q, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, IPv4=True) r = client.parseDOM(r, 'ul', attrs={'class': 'items'}) r = client.parseDOM(r, 'li') if len(r) == 0: raise Exception('Could not find a matching show title: %s' % tvshowtitle) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title'), re.findall('\d{4}', i)) for i in r] r = [(i[0][0], i[1][0], i[2][-1]) for i in r if i[0] and i[1] and i[2]] r = [i for i in r if t == cleantitle.get(i[1]) and year == i[2]] r = r[0][0] url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except Exception as e: log('ERROR', 'get_show', '%s: %s' % (tvshowtitle, e), dolog=self.init) return
def get_show(self, imdb, tvdb, tvshowtitle, year): try: data = os.path.join(control.dataPath, 'movietv.db') try: control.deleteFile(data) except: pass data = os.path.join(control.dataPath, 'movietv2.db') download = True try: download = abs( datetime.datetime.fromtimestamp(os.path.getmtime(data)) - (datetime.datetime.now())) > datetime.timedelta(days=7) except: pass if download == True: result = client.source(base64.b64decode(self.data_link)) zip = zipfile.ZipFile(StringIO.StringIO(result)) zip.extractall(control.dataPath) zip.close() dbcon = database.connect(data) dbcur = dbcon.cursor() dbcur.execute("SELECT * FROM tvshows WHERE year = '%s'" % year) result = dbcur.fetchone() result = eval(result[1].encode('utf-8')) tvshowtitle = cleantitle.tv(tvshowtitle) years = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1) ] result = [i for i in result if tvshowtitle == cleantitle.tv(i[2])] result = [i[0] for i in result if any(x in i[3] for x in years)][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: query = urlparse.urljoin( self.base_link, self.search_link + urllib.quote_plus(title)) result = cloudflare.source(query) #if result == None: result = client.source(self.__proxy() + urllib.quote_plus(query)) r = client.parseDOM(result, 'li', attrs={'class': 'first element.+?'}) r += client.parseDOM(result, 'li', attrs={'class': 'element.+?'}) r += client.parseDOM(result, 'header', attrs={'class': 'entry-heade.+?'}) title = cleantitle.movie(title) years = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1) ] result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in r] result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [(i[0], re.compile('(.+?)(\.|\(|\[|\s)(\d{4})').findall(i[1])) for i in result] result = [(i[0], i[1][0][0], i[1][0][-1]) for i in result if len(i[1]) > 0] result = [i for i in result if title == cleantitle.movie(i[1])] result = [i[0] for i in result if any(x in i[2] for x in years)][0] url = client.replaceHTMLCodes(result) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0] except: pass url = urlparse.urlparse(url).path url = url.encode('utf-8') return url except: return
def get_sources(self, url, hosthdDict, hostDict, locDict): try: print "URL", url sources = [] if url == None: return sources url1 = urlparse.urljoin(self.base_link, url) result = client.request(url1) url1 = client.parseDOM(result, 'a', attrs={'id': 'main-down'}, ret='href')[0] print "LINKS1", url1 result = client.request(url1) print "LINKS2", result for quality in ['720p', '1080p']: links = client.parseDOM(result, 'div', attrs={'class': '.+?' + quality})[0] links = client.parseDOM(links, 'li') links = [(client.parseDOM(i, 'a', ret='href')[0]) for i in links] if '1080p' in quality: q = '1080p' elif '720p' in quality or 'hd' in quality: q = 'HD' else: q = 'SD' for j in links: print "j", j host = j.split('/')[2] host = host.strip().lower() host = client.replaceHTMLCodes(host) if not host in hostDict: raise Exception() host = host.encode('utf-8') print "HOST", host, j sources.append({ 'source': host, 'quality': q, 'provider': 'Filmxy', 'url': j }) print "LINKS3", links return sources except Exception as e: control.log('Filmxy Source ERROR %s' % e) return sources
def get_show(self, imdb, tvdb, tvshowtitle, year): try: """ url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year} url = urllib.urlencode(url) return url except: return None """ tk = cache.get(self.putlocker_token, 8) set = self.putlocker_set() rt = self.putlocker_rt(tk + set) sl = self.putlocker_sl() tm = int(time.time() * 1000) headers = {'X-Requested-With': 'XMLHttpRequest'} url = urlparse.urljoin(self.base_link, self.search_link) post = { 'q': tvshowtitle.lower(), 'limit': '100', 'timestamp': tm, 'verifiedCheck': tk, 'set': set, 'rt': rt, 'sl': sl } post = urllib.urlencode(post) r = client.request(url, post=post, headers=headers) print(">>>", r) r = json.loads(r) t = cleantitle.get(tvshowtitle) r = [i for i in r if 'year' in i and 'meta' in i] r = [(i['permalink'], i['title'], str(i['year']), i['meta'].lower()) for i in r] r = [i for i in r if 'tv' in i[3]] r = [ i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2] ][0] url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = cloudflare.source(url) links = client.parseDOM(result, 'div', attrs = {'class': 'server_line.+?'}) for i in links: try: host = client.parseDOM(i, 'p', attrs = {'class': 'server_servername'})[0] host = re.compile('Server (.+?)$').findall(host)[0] host = host.strip().lower() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') url = client.parseDOM(i, 'a', ret='href')[0] url = client.replaceHTMLCodes(url) url = urlparse.urljoin(self.base_link, url) url = url.encode('utf-8') if 'google' in host: url = self.__resolve(client.source(url)) for u in url: sources.append({'source': 'GVideo', 'quality': u['quality'], 'provider': 'Tunemovie', 'url': u['url']}) elif host in hostDict: raise Exception() sources.append({'source': host, 'quality': 'SD', 'provider': 'Tunemovie', 'url': url}) except: pass return sources except: return sources
def _info(self, url, year): try: url = urlparse.urljoin(self.base_link, url) url = urlparse.urlparse(url).path url = client.replaceHTMLCodes(url) url = url.encode('utf-8') u = urlparse.urljoin(self.base_link, self.info_link) u = u % re.findall('(\d+)', url)[-1] u = client.request(u) u = client.parseDOM(u, 'div', attrs={'class': 'jt-info'})[0] if year == u: return url except: return
def parse(url): try: url = client.replaceHTMLCodes(url) except: pass try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0] except: pass return url
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = proxy.request(url, 'main_body') links = client.parseDOM(result, 'tbody') for i in links: try: url = client.parseDOM(i, 'a', ret='href')[0] url = proxy.parse(url) url = urlparse.parse_qs( urlparse.urlparse(url).query)['url'][0] url = base64.b64decode(url) url = client.replaceHTMLCodes(url) url = url.encode('utf-8') host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] if not host in hostDict: raise Exception() host = host.encode('utf-8') quality = client.parseDOM(i, 'span', ret='class')[0] if quality == 'quality_cam' or quality == 'quality_ts': quality = 'CAM' elif quality == 'quality_dvd': quality = 'SD' else: raise Exception() sources.append({ 'source': host, 'quality': quality, 'provider': 'PrimeWire', 'url': url, 'direct': False, 'debridonly': False }) except: pass return sources except: return sources
def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: if url == None: return query = '%s S%02dE%02d' % (url, int(season), int(episode)) query = urlparse.urljoin( self.tvbase_link, self.search_link + urllib.quote_plus(query)) result = client.source(query) #if result == None: result = client.source(self.__proxy() + urllib.quote_plus(query)) r = client.parseDOM(result, 'li', attrs={'class': 'first element.+?'}) r += client.parseDOM(result, 'li', attrs={'class': 'element.+?'}) r += client.parseDOM(result, 'header', attrs={'class': 'entry-heade.+?'}) tvshowtitle = cleantitle.tv(url) hdlr = 'S%02dE%02d' % (int(season), int(episode)) result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in r] result = [(i[0][0], i[1][0].upper()) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [(i[0], re.compile('(.+?) (S\d+E\d+)').findall(i[1])) for i in result] result = [(i[0], i[1][0][0], i[1][0][-1]) for i in result if len(i[1]) > 0] result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])] result = [i[0] for i in result if hdlr == i[2]][0] url = client.replaceHTMLCodes(result) try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0] except: pass url = urlparse.urlparse(url).path url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): return try: url = title.replace('\'', '') url = re.sub(r'[^a-zA-Z0-9\s]+', ' ', url).lower().strip() url = re.sub('\s\s+', ' ', url) url = url.replace(' ', '-') url = '/%s-%s/' % (url, year) url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, tvshowtitle, year): try: key = urlparse.urljoin(self.base_link, self.key_link) key = client.request(key, 'searchform') key = client.parseDOM(key, 'input', ret='value', attrs = {'name': 'key'})[0] query = self.tvsearch_link % (urllib.quote_plus(cleantitle.query(tvshowtitle)), key) query = urlparse.urljoin(self.base_link, query) result = str(client.request(query, 'index_item')) if 'page=2' in result or 'page%3D2' in result: result += str(client.request(query + '&page=2', 'index_item')) result = client.parseDOM(result, 'div', attrs = {'class': 'index_item.+?'}) tvshowtitle = 'watch' + cleantitle.get(tvshowtitle) years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)] result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in result] result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [i for i in result if any(x in i[1] for x in years)] r = [] for i in result: u = i[0] try: u = urlparse.parse_qs(urlparse.urlparse(u).query)['u'][0] except: pass try: u = urlparse.parse_qs(urlparse.urlparse(u).query)['q'][0] except: pass r += [(u, i[1])] match = [i[0] for i in r if tvshowtitle == cleantitle.get(i[1]) and '(%s)' % str(year) in i[1]] match2 = [i[0] for i in r] match2 = [x for y,x in enumerate(match2) if x not in match2[:y]] if match2 == []: return for i in match2[:5]: try: if len(match) > 0: url = match[0] ; break r = client.request(urlparse.urljoin(self.base_link, i), 'tv_episode_item') if imdb in str(r): url = i ; break except: pass url = re.findall('(?://.+?|)(/.+)', url)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def check(self, i): try: url = client.replaceHTMLCodes(i[0]) url = url.encode('utf-8') host = urlparse.urlparse(url).netloc host = host.replace('www.', '').replace('embed.', '') host = host.rsplit('.', 1)[0] host = host.lower() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') if host in i[2]: check = url = resolvers.request(url) elif host == 'hugefiles': check = hugefiles.check(url) elif host == 'uploadrocket': check = uploadrocket.check(url) elif host == 'openload': check = openload.check(url) else: raise Exception() if check == None or check == False: raise Exception() self.sources.append({'source': host, 'quality': i[1], 'provider': 'Oneclickwatch', 'url': url}) except: pass
def get_show(self, imdb, tvdb, tvshowtitle, year): try: result = cache.get(self.serialeonlinepl_cache, 120) tvshowtitle = cleantitle.get(tvshowtitle) print tvshowtitle result = [i[0] for i in result if tvshowtitle in i[1]][0] print result try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: tvshowtitle, year = re.compile('(.+?) [(](\d{4})[)]$').findall( url)[0] query = self.search_link % urllib.quote(tvshowtitle) query = urlparse.urljoin(self.base_link, query) #result = client.source(query) result = client2.http_get(query) tvshowtitle = cleantitle.tv(tvshowtitle) season = '%01d' % int(season) episode = '%01d' % int(episode) years = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1) ] result = client.parseDOM(result, 'div', attrs={'class': 'ml-item'}) result = [ (client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'h2'), re.compile( 'class *= *[\'|\"]jt-info[\'|\"]>(\d{4})<').findall(i)) for i in result ] result = [(i[0][0], i[1][0], i[2][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0] result = [(i[0], re.compile('(.+?) - Season (\d*)$').findall(i[1]), i[2]) for i in result] result = [(i[0], i[1][0][0], i[1][0][1], i[2]) for i in result if len(i[1]) > 0] result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])] result = [i for i in result if season == i[2]] result = [(i[0], i[1], str(int(i[3]) - int(i[2]) + 1)) for i in result] result = [i[0] for i in result if any(x in i[2] for x in years)][0] result += '?S%02dE%02d' % (int(season), int(episode)) try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_sources(self, url, hosthdDict, hostDict, locDict): sources = [] try: if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = client.request(url) links = client.parseDOM(result, 'td', attrs = {'class': 'even tdhost'}) links += client.parseDOM(result, 'td', attrs = {'class': 'odd tdhost'}) for i in links: try: host = client.parseDOM(i, 'a')[0].strip() #control.log('#host# %s' % host) #host = host.split('<', 1)[0] #host = host.rsplit('.', 1)[0].split('.', 1)[-1] #host = host.strip().lower() if host in hostDict: host = host.rsplit('.', 1)[0].split('.', 1)[-1] host = host.strip().lower() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') url = client.parseDOM(i, 'a', ret='href')[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') print("Url",url) sources.append({'source': host, 'quality': 'SD', 'provider': 'wsonline', 'url': url}) except: pass return sources except Exception as e: control.log('ERROR WSO %s' % e) return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = client.source(url) links = re.compile("/watch\.php\?q=(.+?)'").findall(result) for i in links: try: url = base64.urlsafe_b64decode(i.encode('utf-8')) url = client.replaceHTMLCodes(url) url = url.encode('utf-8') host = base64.urlsafe_b64decode(i.encode('utf-8')) host = urlparse.urlparse(host).netloc host = host.rsplit('.', 1)[0].split('.', 1)[-1] host = host.strip().lower() if not host in hostDict: raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') sources.append({ 'source': host, 'quality': 'SD', 'provider': 'Filmikz', 'url': url }) except: pass return sources except: return sources
def resolve(self, url, resolverList=None): logger.debug('%s ORIGINAL URL [%s]' % (__name__, url)) try: url = urlparse.urljoin(self.base_link, url) result = client.request(url) except: pass try: url = re.compile( '"?file"?\s*=\s*"(.+?)"\s+"?label"?\s*=\s*"(\d+)p?"').findall( result) url = [(int(i[1]), i[0]) for i in url] url = sorted(url, key=lambda k: k[0]) url = url[-1][1] try: u = client.request(url, output='headers', redirect=False)['Location'] except: u = client.request(url, output='geturl') q = directstream.googletag(u)[0]['quality'] url = u if 'requiressl=yes' in url: url = url.replace('http://', 'https://') else: url = url.replace('https://', 'http://') return url except: pass try: url = re.compile('file\s*=\s*"(.+?)"').findall(result)[0] if self.base_link in url: raise Exception() url = client.replaceHTMLCodes(url) return url except: pass try: url = json.loads(result)['embed_url'] logger.debug('%s RESOLVED URL [%s]' % (__name__, url)) return url except: pass
def searchShow(self, title, season, year): try: title = cleantitle.normalize(title) t = cleantitle.get(title) url = urlparse.urljoin( self.base_link, self.search_link % urllib.quote_plus( cleantitle.query('%s Season %01d' % (title.replace('\'', '-'), int(season))))) r = client.request(url, timeout='10') r = client.parseDOM(r, 'h2', attrs={'class': 'tit'}) if r: r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r] r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0] r = [(i[0], re.findall('(.+?)\s+-\s+Season\s+(\d+)', i[1])) for i in r] r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0] r = [ i[0] for i in r if t == cleantitle.get(i[1]) and int(season) == int(i[2]) ][0] else: url = urlparse.urljoin( self.base_link, self.search_link % urllib.quote_plus( cleantitle.query( '%s %01d' % (title.replace('\'', '-'), int(year))))) r = client.request(url, timeout='10') r = client.parseDOM(r, 'h2', attrs={'class': 'tit'}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r] r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0] r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r] r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0] r = [ i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2] ][0] url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) return url.encode('utf-8') except: return
def __search(self, imdb, titles, year): try: q = self.search_link % urllib.quote_plus(cleantitle.query(titles[0])) q = urlparse.urljoin(self.base_link, q) t = [cleantitle.get(i) for i in set(titles) if i] y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0'] r = client.request(q) r = dom_parser.parse_dom(r, 'tr', attrs={'id': re.compile('coverPreview.+?')}) r = [(dom_parser.parse_dom(i, 'a', req='href'), dom_parser.parse_dom(i, 'div', attrs={'style': re.compile('.+?')}), dom_parser.parse_dom(i, 'img', req='src')) for i in r] r = [(i[0][0].attrs['href'].strip(), i[0][0].content.strip(), i[1], i[2]) for i in r if i[0] and i[2]] r = [(i[0], i[1], [x.content for x in i[2] if x.content.isdigit() and len(x.content) == 4], i[3]) for i in r] r = [(i[0], i[1], i[2][0] if i[2] else '0', i[3]) for i in r] r = [i for i in r if any('us_flag' in x.attrs['src'] for x in i[3])] r = [(i[0], i[1], i[2], [re.findall('(\d+)', x.attrs['src']) for x in i[3] if 'smileys' in x.attrs['src']]) for i in r] r = [(i[0], i[1], i[2], [x[0] for x in i[3] if x]) for i in r] r = [(i[0], i[1], i[2], int(i[3][0]) if i[3] else 0) for i in r] r = sorted(r, key=lambda x: x[3])[::-1] r = [(i[0], i[1], i[2], re.findall('\((.+?)\)$', i[1])) for i in r] r = [(i[0], i[1], i[2]) for i in r if not i[3]] r = [i for i in r if i[2] in y] r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year r = [(client.replaceHTMLCodes(i[0]), i[1], i[2]) for i in r] match = [i[0] for i in r if cleantitle.get(i[1]) in t and year == i[2]] match2 = [i[0] for i in r] match2 = [x for y, x in enumerate(match2) if x not in match2[:y]] if match2 == []: return for i in match2[:5]: try: if match: url = match[0]; break r = client.request(urlparse.urljoin(self.base_link, i)) r = re.findall('(tt\d+)', r) if imdb in r: url = i; break except: pass return source_utils.strip_domain(url) except: return
def get_episode(self, url, imdb, tvdb, title, date, season, episode): try: if url == None: return season = '%01d' % int(season) episode = '%01d' % int(episode) query = '%s "Season %s" "Episode %s"' % (url, season, episode) query = urlparse.urljoin( self.base_link, self.tvsearch_link + urllib.quote_plus(query)) result = cloudflare.source(query) r = client.parseDOM(result, 'li', attrs={'class': 'first element.+?'}) r += client.parseDOM(result, 'li', attrs={'class': 'element.+?'}) r += client.parseDOM(result, 'header', attrs={'class': 'entry-header'}) tvshowtitle = cleantitle.tv(url) result = [ (client.parseDOM(i, 'a', ret='href'), re.compile('(.+?): Season (\d*).+?Episode (\d*)').findall(i)) for i in r ] result = [(i[0][0], i[1][-1]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [(i[0], i[1][0].split('>')[-1], i[1][1], i[1][2]) for i in result] result = [ i for i in result if season == '%01d' % int(i[2]) and episode == '%01d' % int(i[3]) ] result = [ i[0] for i in result if tvshowtitle == cleantitle.tv(i[1]) ][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, tvshowtitle, year): try: result = cache.get(self.dizimag_shows, 72) tvshowtitle = cleantitle.tv(tvshowtitle) result = [i[0] for i in result if tvshowtitle == i[1]][0] try: url = re.compile('//.+?(/.+)').findall(result)[0] except: url = result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year): try: result = cache.get(self.dizigold_tvcache, 120) tvshowtitle = cleantitle.get(tvshowtitle) result = [i[0] for i in result if tvshowtitle == i[1]][0] url = urlparse.urljoin(self.base_link, result) url = urlparse.urlparse(url).path url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: query = self.search_link % imdb query = urlparse.urljoin(self.base_link, query) result = client.source(query) result = json.loads(result) result = result['movies'][0]['slug'] url = '/movies/%s' % result url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_show(self, imdb, tvdb, tvshowtitle, year, season, proxy_options=None, key=None): try: url = '%s (%s)' % (tvshowtitle, year) url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) show = data['url'].split('/')[4] r = urlparse.urljoin(self.base_link, self.episode_link % (show, season, episode)) url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: pass
def get_show(self, imdb, tvdb, tvshowtitle, year): try: result = cache.get(self.pelispedia_tvcache, 120) tvshowtitle = cleantitle.get(tvshowtitle) result = [i[0] for i in result if tvshowtitle == i[1] and year == i[2]][0] url = urlparse.urljoin(self.base_link, result) url = urlparse.urlparse(url).path url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def get_movie(self, imdb, title, year): try: url = self.search_link % (cleantitle.geturl(title), year) url = urlparse.urljoin(self.base_link, url) r = client.request(url, limit='1') r = client.parseDOM(r, 'title')[0] if r == '': raise Exception() url = re.findall('(?://.+?|)(/.+)', url)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def cartoon_resolver(self, url): try: url = urlparse.urljoin(self.cartoons_link, url) result = client.request(url) result = client.parseDOM(result, 'div', attrs = {'id': 'divDownload'})[0] url = client.parseDOM(result, 'a', ret='href')[-1] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: pass
def resolve(self, url): try: h = urlparse.urlparse(url.strip().lower()).netloc r = client.request(url) r = r.rsplit('"underplayer"')[0].rsplit("'underplayer'")[0] u = re.findall('\'(.+?)\'', r) + re.findall('\"(.+?)\"', r) u = [client.replaceHTMLCodes(i) for i in u] u = [i for i in u if i.startswith('http') and not h in i] url = u[-1].encode('utf-8') return url except: return