def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources r = self.scraper.get(url).content match = re.compile( '<p class="server_version"><img src="http://123putlocker.io/themes/movies/img/icon/server/(.+?).png" width="16" height="16" /> <a href="(.+?)">' ).findall(r) for host, url in match: if host == 'internet': pass if source_utils.limit_hosts() is True and host in str(sources): continue valid, host = source_utils.is_host_valid(host, hostDict) if valid: sources.append({ 'source': host, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources r = self.scraper.get(url).content try: match = re.compile( '<p class="server_version"><img src="http://www1.solarmovie.net/themes/movies/img/icon/server/(.+?).png" width="16" height="16" /> <a href="(.+?)">' ).findall(r) for host, url in match: if source_utils.limit_hosts() is True and host in str( sources): continue quality, info = source_utils.get_release_quality(url, url) valid, host = source_utils.is_host_valid(host, hostDict) if valid: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': url, 'direct': False, 'debridonly': False }) except: return except Exception: return return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = url.replace(' ', '-') r = self.session.get(url, headers=self.headers).content match = re.compile( '<li class="playlist_entry " id="(.+?)"><a><div class="list_number">.+?</div>(.+?)<span>></span></a></li>', re.DOTALL).findall(r) for id, host in match: url = self.base_link + '/embed/' + id valid, host = source_utils.is_host_valid(host, hostDict) if source_utils.limit_hosts() is True and host in str(sources): continue if valid: sources.append({ 'source': host, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: hostDict = hostDict + hostprDict sources = [] if url == None: return sources t = client.request(url) r = re.compile('data-src-player="(.+?)"').findall(t) for url in r: valid, host = source_utils.is_host_valid(url, hostDict) if valid: if source_utils.limit_hosts() is True and host in str( sources): continue quality, info = source_utils.get_release_quality(url, url) sources.append({ 'source': host, 'quality': quality, 'language': 'fr', 'url': url, 'info': info, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources r = self.scraper.get(url).content match = re.compile( 'cale\.html\?r=(.+?)" class="watchlink" title="(.+?)"' ).findall(r) for url, host in match: url = base64.b64decode(url) valid, host = source_utils.is_host_valid(host, hostDict) if source_utils.limit_hosts() is True and host in str(sources): continue if valid: quality, info = source_utils.get_release_quality(url, url) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': url, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources r = self.scraper.get(url).content match = re.compile('<IFRAME.+?SRC=.+?//(.+?)/(.+?)"').findall(r) for host, url in match: url = 'http://%s/%s' % (host, url) host = host.replace('www.', '') valid, host = source_utils.is_host_valid(host, hostDict) if source_utils.limit_hosts() is True and host in str(sources): continue if valid: sources.append({ 'source': host, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) return sources except Exception: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources r = self.scraper.get(url).content data = re.compile("callvalue\('.+?','.+?','(.+?)://(.+?)/(.+?)'\)", re.DOTALL).findall(r) for http, host, url in data: url = '%s://%s/%s' % (http, host, url) if source_utils.limit_hosts() is True and host in str(sources): continue if url in str(sources): continue valid, host = source_utils.is_host_valid(host, hostDict) if valid: sources.append({ 'source': host, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: if url == None: return sources sources = [] hostDict = hostprDict + hostDict headers = {'Referer': url} r = self.scraper.get(url, headers=headers).content u = client.parseDOM(r, "span", attrs={"class": "movie_version_link"}) for t in u: match = client.parseDOM(t, 'a', ret='data-href') for url in match: if url in str(sources): continue valid, host = source_utils.is_host_valid(url, hostDict) if valid: quality, info = source_utils.get_release_quality( url, url) if source_utils.limit_hosts() is True and host in str( sources): continue sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': url, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources html = self.scraper.get(url).content links = re.compile('id="linkplayer.+?href="(.+?)"', re.DOTALL).findall(html) for link in links: valid, host = source_utils.is_host_valid(link, hostDict) if source_utils.limit_hosts() is True and host in str(sources): continue if valid: quality, info = source_utils.get_release_quality( link, link) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': link, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] hostDict = hostprDict + hostDict if url is None: return sources r = self.scraper.get(url).content qual = re.findall(">(\w+)<\/p", r) for i in qual: quality, info = source_utils.get_release_quality(i, i) r = dom_parser.parse_dom(r, 'div', {'id': 'servers-list'}) r = [dom_parser.parse_dom(i, 'a', req=['href']) for i in r if i] for i in r[0]: url = {'url': i.attrs['href'], 'data-film': i.attrs['data-film'], 'data-server': i.attrs['data-server'], 'data-name': i.attrs['data-name']} url = urllib.urlencode(url) valid, host = source_utils.is_host_valid(i.content, hostDict) if source_utils.limit_hosts() is True and host in str(sources): continue if valid: sources.append({'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': url, 'direct': False, 'debridonly': False}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources hostDict = hostprDict + hostDict page = getSum.get(url) match = getSum.findEm( page, 'rel="nofollow" target="_blank" href="(.+?)">(.+?)</a>') for url, hoster in match: if 'font14' in hoster: continue url = self.base_link + url valid, host = source_utils.is_host_valid(hoster, hostDict) if source_utils.limit_hosts() is True and host in str(sources): continue if valid: sources.append({ 'source': host, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources hostDict = hostprDict + hostDict page = client.request(url, headers=self.headers) match = re.compile( 'rel="nofollow ugc" title="(.+?)" target="_blank" href="(.+?)">', flags=re.DOTALL | re.IGNORECASE).findall(page) for hoster, url in match: url = self.base_link + url valid, host = source_utils.is_host_valid(hoster, hostDict) if source_utils.limit_hosts() is True and host in str(sources): continue if valid: sources.append({ 'source': host, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources url = urlparse.urljoin(self.base_link, url) for i in range(3): result = self.scraper.get(url).content if not result is None: break links = re.compile('onclick="report\(\'([^\']+)').findall(result) for link in links: try: valid, hoster = source_utils.is_host_valid(link, hostDict) if not valid: continue urls, host, direct = source_utils.check_directstreams( link, hoster) if source_utils.limit_hosts() is True and host in str( sources): continue for x in urls: sources.append({ 'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources hostDict = hostDict + hostprDict r = self.session.get(url, headers=self.headers).content match = re.compile('<IFRAME.+?SRC="(.+?)"', re.DOTALL | re.IGNORECASE).findall(r) for url in match: url = "https:" + url if not url.startswith('http') else url valid, host = source_utils.is_host_valid(url, hostDict) if valid: if source_utils.limit_hosts() is True and host in str( sources): continue quality, info = source_utils.get_release_quality(url, url) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': url, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources hostDict = hostprDict + hostDict r = client.request(url) r = re.compile( 'class="watch-button" data-actuallink="(.+?)"').findall(r) for url in r: if url in str(sources): continue valid, host = source_utils.is_host_valid(url, hostDict) if source_utils.limit_hosts() is True and host in str(sources): continue if valid: sources.append({ 'source': host, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources hostDict = hostprDict + hostDict page = getSum.get(url) match = getSum.findEm( page, '<td class="linkdom2"><span><a class="btn-xs" rel="nofollow" title="(.+?)" target="_blank" href="(.+?)"' ) if match: for hoster, url in match: valid, host = source_utils.is_host_valid(hoster, hostDict) if valid: if source_utils.limit_hosts() is True and host in str( sources): continue sources.append({ 'source': host, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return urldata = urlparse.parse_qs(url) urldata = dict((i, urldata[i][0]) for i in urldata) title = urldata['title'].replace(':', ' ').lower() year = urldata['year'] search_id = title.lower() start_url = urlparse.urljoin( self.base_link, self.search_link % (search_id.replace(' ', '+') + '+' + year)) headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36' } html = self.scraper.get(start_url, headers=headers).content Links = re.compile( '<div class="col-8">.+?<a href="(.+?)" class="baslikust h5">(.+?)"', re.DOTALL).findall(html) for link, name in Links: if title.lower() in name.lower(): if year in name: holder = self.scraper.get(link, headers=headers).content Alterjnates = re.compile( '<button class="text-capitalize dropdown-item" value="(.+?)"', re.DOTALL).findall(holder) for alt_link in Alterjnates: alt_url = alt_link.split("e=")[1] if alt_url in str(sources): continue valid, host = source_utils.is_host_valid( alt_url, hostDict) if source_utils.limit_hosts( ) is True and host in str(sources): continue if valid: sources.append({ 'source': host, 'quality': '1080p', 'language': 'en', 'url': alt_url, 'info': [], 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources hostDict = hostprDict + hostDict page = self.session.get(url, headers=self.headers).content match = re.compile('<a class="w3-btn w3-white w3-border w3-round" title="(.+?)" rel="nofollow" target="_blank" href="(.+?)" class', re.DOTALL).findall(page) for hoster, url in match: valid, host = source_utils.is_host_valid(hoster, hostDict) if source_utils.limit_hosts() is True and hoster in str(sources): continue url = self.base_link + url sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources r = client.request(url) r = dom_parser.parse_dom(r, 'div', {'class': 'll-item'}) r = [(dom_parser.parse_dom(i, 'a', req='href'), \ dom_parser.parse_dom(i, 'div', {'class': 'notes'})) \ for i in r if i] r = [(i[0][0].attrs['href'], i[0][0].content, i[1][0].content if i[1] else 'None') for i in r] for i in r: try: url = i[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') if url in str(sources): continue valid, host = source_utils.is_host_valid(i[1], hostDict) if not valid: continue host = client.replaceHTMLCodes(host) host = host.encode('utf-8') if source_utils.limit_hosts() is True and host in str( sources): continue info = [] quality, info = source_utils.get_release_quality( i[2], i[2]) info = ' | '.join(info) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return sources url = urlparse.urljoin(self.base_link, url) r = self.scraper.get(url).content r = r.replace('\\"', '"') links = dom_parser.parse_dom(r, 'tr', attrs={'id': 'tablemoviesindex2'}) for i in links: try: host = dom_parser.parse_dom(i, 'img', req='alt')[0].attrs['alt'] host = host.split()[0].rsplit('.', 1)[0].strip().lower() host = host.encode('utf-8') if source_utils.limit_hosts() is True and host in str( sources): continue valid, host = source_utils.is_host_valid(host, hostDict) if valid: url = dom_parser.parse_dom(i, 'a', req='href')[0].attrs['href'] url = client.replaceHTMLCodes(url) url = urlparse.urljoin(self.base_link, url) url = url.encode('utf-8') if url in str(sources): continue sources.append({ 'source': host, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources r = cache.get(client.request, 1, url) try: v = re.findall('document.write\(Base64.decode\("(.+?)"\)', r)[0] b64 = base64.b64decode(v) url = client.parseDOM(b64, 'iframe', ret='src')[0] try: host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] host = client.replaceHTMLCodes(host) host = host.encode('utf-8') valid, host = source_utils.is_host_valid(host, hostDict) if valid: sources.append({ 'source': host, 'quality': 'SD', 'language': 'en', 'url': url.replace('\/', '/'), 'direct': False, 'debridonly': False }) except: pass except: pass r = client.parseDOM(r, 'div', {'class': 'server_line'}) r = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'p', attrs={'class': 'server_servername'})[0]) for i in r] if r: for i in r: try: host = re.sub('Server|Link\s*\d+', '', i[1]).lower() url = i[0] host = client.replaceHTMLCodes(host) host = host.encode('utf-8') valid, host = source_utils.is_host_valid(host, hostDict) if source_utils.limit_hosts() is True and host in str(sources): continue if valid: sources.append({ 'source': host, 'quality': 'SD', 'language': 'en', 'url': url.replace('\/', '/'), 'direct': False, 'debridonly': False }) except: pass return sources except : return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] hostDict = hostDict + hostprDict if url == None: return sources r = self.scraper.get(url).content r = dom_parser.parse_dom(r, 'p', {'class': 'server_play'}) r = [dom_parser.parse_dom(i, 'a', req=['href']) for i in r if i] r = [(i[0].attrs['href'], re.search('/(\w+).html', i[0].attrs['href'])) for i in r if i] r = [(i[0], i[1].groups()[0]) for i in r if i[0] and i[1]] for i in r: try: host = i[1] host = client.replaceHTMLCodes(host) host = host.encode('utf-8') valid, host = source_utils.is_host_valid(host, hostDict) if source_utils.limit_hosts() is True and host in str( sources): continue if valid: sources.append({ 'source': host, 'quality': 'SD', 'language': 'en', 'url': i[0].replace('\/', '/'), 'direct': False, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: self.sources = [] if url == None: return self.sources data = url.split('$$$$$') url = data[0] title = data[1] year = data[2] type = data[3] if type == 'tv': headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:62.0) Gecko/20100101 Firefox/62.0', 'Accept': 'text/html, */*; q=0.01', 'Accept-Language': 'en-US,en;q=0.5', 'Referer': url, 'X-Requested-With': 'XMLHttpRequest', 'Connection': 'keep-alive', 'Pragma': 'no-cache', 'Cache-Control': 'no-cache', 'TE': 'Trailers', } params = (('vid', url), ) response = requests.get( 'https://www.milversite.live/episode.php', headers=headers, params=params).content regex = 'class="fullm"><a href="(.+?)"' match2 = re.compile(regex).findall(response) for link_in in match2: quality = "720p" host = link_in.replace('\n', '').strip().split('//')[1].replace( 'www.', '') host = host.split('/')[0].lower() if source_utils.limit_hosts() is True and host in str( self.sources): continue valid, host = source_utils.is_host_valid(host, hostDict) if valid: self.sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': link_in, 'direct': False, 'debridonly': False }) else: headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:62.0) Gecko/20100101 Firefox/62.0', 'Accept': '*/*', 'Accept-Language': 'en-US,en;q=0.5', 'Referer': url, 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', 'X-Requested-With': 'XMLHttpRequest', 'Connection': 'keep-alive', 'Pragma': 'no-cache', 'Cache-Control': 'no-cache', 'TE': 'Trailers', } r = client.request(url, timeout=10) match = re.compile( "<div id=\"hd_s_g\">SUBS</div><a title = '(.+?)' href='(.+?)'" ).findall(r) for items, link in match: if title.lower() in items.lower() and year in items: y = client.request(link, timeout=10) match2 = re.compile( 'class="redirect" id="(.+?)"').findall(y) for link_in in match2: data = {'id': link_in} response = requests.post( 'https://www.milversite.live/morgan.php', headers=headers, data=data).content quality = "720p" host = response.replace( '\n', '').strip().split('//')[1].replace('www.', '') host = host.split('/')[0].lower() if source_utils.limit_hosts( ) is True and host in str(self.sources): continue valid, host = source_utils.is_host_valid( host, hostDict) if valid: self.sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': response.replace('\n', '').strip(), 'direct': False, 'debridonly': False }) return self.sources except: return self.sources