def sources(self, url, hostDict, hostprDict): try: hostDict = hostDict + hostprDict sources = [] if url == None: return sources t = client.request(url) r = re.compile('data-src-player="(.+?)"').findall(t) for url in r: valid, host = source_utils.is_host_valid(url, hostDict) if valid: if source_utils.limit_hosts() is True and host in str( sources): continue quality, info = source_utils.get_release_quality(url, url) sources.append({ 'source': host, 'quality': quality, 'language': 'fr', 'url': url, 'info': info, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: if url is None: return sources sources = [] hostDict = hostprDict + hostDict headers = {'Referer': url} r = self.scraper.get(url, headers=headers).content u = client.parseDOM(r, "span", attrs={"class": "movie_version_link"}) for t in u: match = client.parseDOM(t, 'a', ret='data-href') for url in match: if url in str(sources): continue valid, host = source_utils.is_host_valid(url, hostDict) if valid: quality, info = source_utils.get_release_quality( url, url) if source_utils.limit_hosts() is True and host in str( sources): continue sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': url, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources r = self.scraper.get(url).content try: match = re.compile('<IFRAME.+?SRC=.+?//(.+?)/(.+?)"').findall(r) for host, url in match: url = 'http://%s/%s' % (host, url) host = host.replace('www.', '') valid, host = source_utils.is_host_valid(host, hostDict) if source_utils.limit_hosts() is True and host in str(sources): continue if valid: sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False}) except: return except Exception: return return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources html = self.scraper.get(url).content links = re.compile('id="linkplayer.+?href="(.+?)"', re.DOTALL).findall(html) for link in links: valid, host = source_utils.is_host_valid(link, hostDict) if source_utils.limit_hosts() is True and host in str(sources): continue if valid: quality, info = source_utils.get_release_quality( link, link) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': link, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] hostDict = hostprDict + hostDict if url is None: return sources r = self.scraper.get(url).content qual = re.findall(">(\w+)<\/p", r) for i in qual: quality, info = source_utils.get_release_quality(i, i) r = dom_parser.parse_dom(r, 'div', {'id': 'servers-list'}) r = [dom_parser.parse_dom(i, 'a', req=['href']) for i in r if i] for i in r[0]: url = {'url': i.attrs['href'], 'data-film': i.attrs['data-film'], 'data-server': i.attrs['data-server'], 'data-name': i.attrs['data-name']} url = urllib.urlencode(url) valid, host = source_utils.is_host_valid(i.content, hostDict) if source_utils.limit_hosts() is True and host in str(sources): continue if valid: sources.append({'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': url, 'direct': False, 'debridonly': False}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources url = urlparse.urljoin(self.base_link, url) for i in range(3): result = self.scraper.get(url).content if not result is None: break links = re.compile('onclick="report\(\'([^\']+)').findall(result) for link in links: try: valid, hoster = source_utils.is_host_valid(link, hostDict) if not valid: continue urls, host, direct = source_utils.check_directstreams( link, hoster) if source_utils.limit_hosts() is True and host in str( sources): continue for x in urls: sources.append({ 'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources r = cache.get(client.request, 1, url) try: v = re.findall('document.write\(Base64.decode\("(.+?)"\)', r)[0] b64 = base64.b64decode(v) url = client.parseDOM(b64, 'iframe', ret='src')[0] try: host = re.findall('([\w]+[.][\w]+)$', urlparse(url.strip().lower()).netloc)[0] host = client.replaceHTMLCodes(host) try: host = host.encode('utf-8') except: pass valid, host = source_utils.is_host_valid(host, hostDict) if valid: sources.append({ 'source': host, 'quality': 'SD', 'language': 'en', 'url': url.replace('\/', '/'), 'direct': False, 'debridonly': False }) except: pass except: pass r = client.parseDOM(r, 'div', {'class': 'server_line'}) r = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'p', attrs={'class': 'server_servername'})[0]) for i in r] if r: for i in r: try: host = re.sub('Server|Link\s*\d+', '', i[1]).lower() url = i[0] host = client.replaceHTMLCodes(host) try: host = host.encode('utf-8') except: pass if 'other' in host: continue if source_utils.limit_hosts() is True and host in str( sources): continue valid, host = source_utils.is_host_valid( host, hostDict) if valid: sources.append({ 'source': host, 'quality': 'SD', 'language': 'en', 'url': url.replace('\/', '/'), 'direct': False, 'debridonly': False }) except: pass return sources except: return sources