Exemple #1
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         r = self.scraper.get(url).content
         try:
             match = re.compile(
                 '<iframe class="metaframe rptss" src="https\://veohb\.net/(.+?)"'
             ).findall(r)
             for url in match:
                 url = 'https://veohb.net/' + url
                 info = source_utils.check_url(url)
                 quality = source_utils.check_url(url)
                 sources.append({
                     'source': 'veohb',
                     'quality': quality,
                     'language': 'en',
                     'info': info,
                     'url': url,
                     'direct': False,
                     'debridonly': False
                 })
         except:
             return
     except Exception:
         return
     return sources
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         r = client.request(url)
         try:
             match = re.compile(
                 'href="(.+?)" rel="noindex\,nofollow">Watch This Link</a>'
             ).findall(r)
             for url in match:
                 r = client.request(url)
                 match = re.compile(
                     '<a href="(.+?)://(.+?)/(.+?)"><button class="wpb\_button  wpb\_btn\-primary wpb\_regularsize"> Click Here To Play</button> </a>'
                 ).findall(r)
                 for http, host, url in match:
                     url = '%s://%s/%s' % (http, host, url)
                     info = source_utils.check_url(url)
                     quality = source_utils.check_url(url)
                     valid, host = source_utils.is_host_valid(
                         host, hostDict)
                     if valid:
                         sources.append({
                             'source': host,
                             'quality': quality,
                             'language': 'en',
                             'info': info,
                             'url': url,
                             'direct': False,
                             'debridonly': False
                         })
         except:
             return
     except Exception:
         return
     return sources
Exemple #3
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if url is None:
             return sources
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         q = '%s' % cleantitle.geturl(data['title'])
         url = self.base_link + self.search_link % q.replace('-', '+')
         r = self.scraper.get(url).content
         v = re.compile(
             '<a href="(.+?)" class="ml-mask jt" title="(.+?)">\n<span class=".+?">(.+?)</span>'
         ).findall(r)
         for url, check, quality in v:
             t = '%s (%s)' % (data['title'], data['year'])
             if t not in check: raise Exception()
             r = self.scraper.get(url + '/watch.html').content
             url = re.compile('<iframe.+?src="(.+?)"').findall(r)[0]
             quality = source_utils.check_url(quality)
             valid, host = source_utils.is_host_valid(url, hostDict)
             if valid:
                 sources.append({
                     'source': host,
                     'quality': quality,
                     'language': 'en',
                     'url': url,
                     'direct': False,
                     'debridonly': False
                 })
         return sources
     except BaseException:
         return sources
Exemple #4
0
 def work(self, link, testDict):
     if str(link).startswith("http"):
         link = self.getlink(link)
         q = source_utils.check_url(link)
         valid, host = source_utils.is_host_valid(link, testDict)
         if not valid: return 0
         return host, q, link
Exemple #5
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url == None:
             return sources
         r = client.request(url)
         try:
             match = re.compile('<iframe .+? src="(.+?)"').findall(r)
             for url in match:
                 quality = source_utils.check_url(url)
                 valid, host = source_utils.is_host_valid(url, hostDict)
                 if valid:
                     sources.append({
                         'source': host,
                         'quality': quality,
                         'language': 'en',
                         'url': url,
                         'direct': False,
                         'debridonly': False
                     })
         except:
             return
     except Exception:
         return
     return sources
Exemple #6
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url == None:
             return sources
         hostDict = hostprDict + hostDict
         r = client.request(url)
         qual = re.compile('class="quality">(.+?)<').findall(r)
         for i in qual:
             quality = source_utils.check_url(i)
             info = i
         u = client.parseDOM(r,
                             "div",
                             attrs={"class": "pa-main anime_muti_link"})
         for t in u:
             u = re.findall('data-video="(.+?)"', t)
             for url in u:
                 if 'vidcloud' in url:
                     continue
                 valid, host = source_utils.is_host_valid(url, hostDict)
                 if valid:
                     sources.append({
                         'source': host,
                         'quality': quality,
                         'language': 'en',
                         'info': info,
                         'url': url,
                         'direct': False,
                         'debridonly': False
                     })
             return sources
     except:
         return
Exemple #7
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url is None:
             return sources
         r = self.scraper.get(url).content
         try:
             match = re.compile('<iframe src="(.+?)"').findall(r)
             for url in match:
                 quality = source_utils.check_url(url)
                 valid, host = source_utils.is_host_valid(url, hostDict)
                 if valid:
                     sources.append({
                         'source': host,
                         'quality': quality,
                         'language': 'en',
                         'url': url,
                         'direct': False,
                         'debridonly': False
                     })
         except:
             return
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('1putlocker - Exception: \n' + str(failure))
         return
     return sources
Exemple #8
0
	def sources(self, url, hostDict, hostprDict):
		sources = []
		try:
			# log_utils.log('url = %s' % url, log_utils.LOGDEBUG)
			r = client.request(url)
			if r is None:
				return sources

			try:
				# match = re.compile('<iframe src=".+?//(.+?)/(.+?)"').findall(r)
				match = re.compile('<iframe src=".*//(.+?)/(.+?)"').findall(r)

				for host, url in match:
					url = 'https://%s/%s' % (host, url)
					quality = source_utils.check_url(url)
					host = host.replace('www.', '')
					valid, host = source_utils.is_host_valid(host, hostDict)
					if valid:
						sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url,
						                'direct': False, 'debridonly': False})
			except:
				source_utils.scraper_error('HDPOPCORNEU')
				return
		except Exception:
			source_utils.scraper_error('HDPOPCORNEU')
			return
		return sources
Exemple #9
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url == None:
             return sources
         html = client.request(url)
         quality = re.compile(
             '<div>Quanlity: <span class="quanlity">(.+?)</span></div>',
             re.DOTALL).findall(html)
         for qual in quality:
             quality = source_utils.check_url(qual)
             info = qual
         links = re.compile('var link_.+? = "(.+?)"',
                            re.DOTALL).findall(html)
         for url in links:
             if not url.startswith('http'):
                 url = "https:" + url
             valid, host = source_utils.is_host_valid(url, hostDict)
             if valid:
                 sources.append({
                     'source': host,
                     'quality': quality,
                     'language': 'en',
                     'info': info,
                     'url': url,
                     'direct': False,
                     'debridonly': False
                 })
         return sources
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('FmoviesIO - Exception: \n' + str(failure))
         return sources
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         r = client.request(url)
         try:
             match = re.compile('<iframe src=".+?//(.+?)/(.+?)"').findall(r)
             for host, url in match:
                 url = 'https://%s/%s' % (host, url)
                 quality = source_utils.check_url(url)
                 host = host.replace('www.', '')
                 valid, host = source_utils.is_host_valid(host, hostDict)
                 if valid:
                     sources.append({
                         'source': host,
                         'quality': quality,
                         'language': 'en',
                         'url': url,
                         'direct': False,
                         'debridonly': False
                     })
         except:
             return
     except Exception:
         return
     return sources
Exemple #11
0
def more_cdapl(link, hostDict, lang, info):
    if "cda.pl" in link:
        sources = []
        try:
            headers = {
                'User-Agent':
                "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3555.0 Safari/537.36"
            }
            response = requests.get(link, headers=headers).content
            test = client.parseDOM(response,
                                   'div',
                                   attrs={'class': 'wrapqualitybtn'})
            urls = client.parseDOM(test, 'a', ret='href')
            if urls:
                for url in urls:
                    valid, host = source_utils.is_host_valid(url, hostDict)
                    q = source_utils.check_url(url)
                    direct = re.findall(
                        """file":"(.*)","file_cast""",
                        requests.get(url, headers=headers).content)[0].replace(
                            "\\/", "/")
                    sources.append({
                        'source': 'CDA',
                        'quality': q,
                        'language': lang,
                        'url': direct,
                        'info': info,
                        'direct': True,
                        'debridonly': False
                    })
            return sources
        except:
            return sources
    else:
        return []
Exemple #12
0
def more_rapidvideo(link, hostDict, lang, info):
    if "rapidvideo.com" in link:
        sources = []
        try:
            headers = {
                'User-Agent':
                "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3555.0 Safari/537.36"
            }
            response = requests.get(link, headers=headers).content
            test = re.findall("""(https:\/\/www.rapidvideo.com\/e\/.*)">""",
                              response)
            numGroups = len(test)
            for i in range(1, numGroups):
                url = test[i]
                valid, host = source_utils.is_host_valid(url, hostDict)
                q = source_utils.check_url(url)
                sources.append({
                    'source': host,
                    'quality': q,
                    'language': lang,
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': False
                })
            return sources
        except:
            return sources
    else:
        return []
 def sources(self, url, hostDict, hostprDict):
     try:
         hostDict = hostprDict + hostDict
         sources = []
         if url == None:
             return sources
         headers = {'User-Agent': self.User_Agent}
         html = requests.get(url, headers=headers, timeout=10).content
         qual = re.compile('<div class="cf">.+?class="quality">(.+?)</td>',
                           re.DOTALL).findall(html)
         for i in qual:
             quality = source_utils.check_url(i)
         links = re.compile('data-href="(.+?)"', re.DOTALL).findall(html)
         for link in links:
             if 'http' not in link:
                 link = 'https://' + link
             valid, host = source_utils.is_host_valid(link, hostDict)
             if valid and link not in str(sources):
                 sources.append({
                     'source': host,
                     'quality': quality,
                     'language': 'en',
                     'url': link,
                     'direct': False,
                     'debridonly': False
                 })
         return sources
     except:
         return sources
Exemple #14
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         r = self.scraper.get(url).content
         match = re.compile('<div><iframe src="(.+?)"').findall(r)
         for url in match:
             host = url.split('//')[1].replace('www.', '')
             host = host.split('/')[0].split('.')[0].title()
             quality = source_utils.check_url(url)
             r = self.scraper.get(url).content
             if 'http' in url:
                 match = re.compile("url: '(.+?)',").findall(r)
             else:
                 match = re.compile('file: "(.+?)",').findall(r)
             for url in match:
                 sources.append({
                     'source': host,
                     'quality': quality,
                     'info': '',
                     'language': 'en',
                     'url': url,
                     'direct': False,
                     'debridonly': False
                 })
     except:
         source_utils.scraper_error('ANIMETOON')
         return
     return sources
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         hostDict = hostprDict + hostDict
         r = self.scraper.get(url).content
         u = client.parseDOM(r, "ul", attrs={"id": "serverul"})
         for t in u:
             u = client.parseDOM(t, 'a', ret='href')
             for url in u:
                 if 'getlink' in url:
                     continue
                 quality = source_utils.check_url(url)
                 valid, host = source_utils.is_host_valid(url, hostDict)
                 if valid:
                     sources.append({
                         'source': host,
                         'quality': quality,
                         'language': 'en',
                         'url': url,
                         'direct': False,
                         'debridonly': False
                     })
             return sources
     except:
         return
Exemple #16
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         quality = source_utils.check_url(url)
         sources.append({'source': 'Direct', 'quality': quality, 'language': 'en', 'url': url, 'direct': True,
                         'debridonly': False})
         return sources
     except:
         return
	def sources(self, url, hostDict, hostprDict):
		scraper = cfscrape.create_scraper()
		sources = []
		try:
			if url is None:
				return sources

			if debrid.status() is False:
				return sources

			data = urlparse.parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

			q = '%s' % cleantitle.get_gan_url(data['title'])

			url = self.base_link + self.search_link % q
			# log_utils.log('url = %s' % url, log_utils.LOGDEBUG)

			r = scraper.get(url).content
			v = re.compile('<a href="(.+?)" class="ml-mask jt" title="(.+?)">\s+<span class=".+?">(.+?)</span>').findall(r)
			t = '%s (%s)' % (data['title'], data['year'])

			for url, check, quality in v:
				if t not in check:
					continue

				key = url.split('-hd')[1]

				r = scraper.get('https://fmovies.tw/moviedownload.php?q=' + key).content
				r = re.compile('<a rel=".+?" href="(.+?)" target=".+?">').findall(r)

				for url in r:
					if any(x in url for x in ['.rar']):
						continue

					quality = source_utils.check_url(quality)

					valid, host = source_utils.is_host_valid(url, hostDict)
					if not valid:
						continue

# size info only available if I make a new 2nd request, line 83 skips directly to download links vs. loading info page, after query, where size is
					dsize = 0

					sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False,
									'debridonly': True, 'size': dsize})
			return sources
		except:
			return sources
Exemple #18
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         r = client.request(url)
         try:
             match = re.compile(
                 '<li><a href="(.+?)" rel="nofollow">(.+?)<').findall(r)
             for url, check in match:
                 info = source_utils.check_url(url)
                 quality = source_utils.check_url(url)
                 sources.append({
                     'source': 'Direct',
                     'quality': quality,
                     'language': 'en',
                     'info': info,
                     'url': url,
                     'direct': False,
                     'debridonly': False
                 })
         except:
             return
     except Exception:
         return
     return sources
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources

            if debrid.status() is False:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            q = '%s' % cleantitle.get_gan_url(data['title'])

            url = self.base_link + self.search_link % q

            r = self.scraper.get(url).content

            v = re.compile(
                '<a href="(.+?)" class="ml-mask jt" title="(.+?)">\r\n\t\t\t\t\t\t\t\t\t\t\t\t<span class=".+?">(.+?)</span>').findall(
                r)

            for url, check, quality in v:
                t = '%s (%s)' % (data['title'], data['year'])

                if t not in check:
                    raise Exception()

                key = url.split('-hd')[1]

                r = self.scraper.get('https://ganool.ws/moviedownload.php?q=' + key).content
                r = re.compile('<a rel=".+?" href="(.+?)" target=".+?">').findall(r)

                for url in r:
                    if any(x in url for x in ['.rar']):
                        continue

                    quality = source_utils.check_url(quality)

                    valid, host = source_utils.is_host_valid(url, hostDict)
                    if not valid:
                        continue

                    sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False,
                                    'debridonly': True})
            return sources
        except:
            return sources
Exemple #20
0
 def sources(self, url, hostDict, hostprDict):
     try:
         # import pydevd
         # pydevd.settrace('localhost', port=5678, stdoutToServer=True, stderrToServer=True)
         sources = []
         result = self.session.get(url).content
         result = result.decode('utf-8')
         h = HTMLParser()
         result = h.unescape(result)
         result = client.parseDOM(result,
                                  'div',
                                  attrs={'class': 'tabela_wiersz mb-1'})
         for counter, item in enumerate(result, 0):
             try:
                 test = client.parseDOM(result,
                                        'span',
                                        attrs={'class': 'tabela_text'})
                 info = test[(2 + (3 * counter))]
                 info = self.get_lang_by_type(info)
                 quality = test[(1 + (3 * counter))]
                 quality = source_utils.check_url(quality)
                 try:
                     id = re.findall("""ShowMovie\('(.*?)'\)""", item)[0]
                 except:
                     id = re.findall("""ShowSer\('(.*?)'\)""", item)[0]
                 try:
                     host = re.findall("""<\/i> (.*?)<\/span>""", item)[0]
                     if 'serial' in url:
                         id = id + '/s'
                     sources.append({
                         'source': host,
                         'quality': quality,
                         'language': info[0],
                         'url': id,
                         'info': info[1],
                         'direct': False,
                         'debridonly': False
                     })
                 except:
                     continue
             except:
                 continue
         return sources
     except:
         return sources
Exemple #21
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources
            headers = {
                'User-Agent':
                'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0'
            }
            result = self.scraper.get(url, headers=headers).content
            streams = re.compile(
                'data-player="&lt;[A-Za-z]{6}\s[A-Za-z]{3}=&quot;(.+?)&quot;',
                re.DOTALL).findall(result)

            for link in streams:
                quality = source_utils.check_url(link)
                host = link.split('//')[1].replace('www.', '')
                host = host.split('/')[0].lower()

                if quality == 'SD':
                    sources.append({
                        'source': host,
                        'quality': '720p',
                        'info': '',
                        'language': 'en',
                        'url': link,
                        'direct': False,
                        'debridonly': False
                    })
                else:
                    sources.append({
                        'source': host,
                        'quality': quality,
                        'info': '',
                        'language': 'en',
                        'url': link,
                        'direct': False,
                        'debridonly': False
                    })

            return sources
        except:
            source_utils.scraper_error('FILEXY')
            return sources
Exemple #22
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url == None:
             return sources
         headers = {'User-Agent': User_Agent}
         html = requests.get(url, headers=headers, timeout=10).content
         try:
             qual = re.compile(
                 '<div class="cf">.+?class="quality">(.+?)</td>',
                 re.DOTALL).findall(html)
             for i in qual:
                 quality = source_utils.check_url(i)
             links = re.compile('li class=.+?data-href="(.+?)"',
                                re.DOTALL).findall(html)
             for link in links:
                 if 'http' not in link:
                     link = 'http:' + link
                 host = link.split('//')[1].replace('www.', '')
                 host = host.split('/')[0].split('.')[0].title()
                 valid, host = source_utils.is_host_valid(host, hostDict)
                 if link in str(sources):
                     continue
                 if valid:
                     sources.append({
                         'source': host,
                         'quality': quality,
                         'language': 'en',
                         'url': link,
                         'direct': False,
                         'debridonly': False
                     })
             return sources
         except:
             return
     except Exception:
         return
     return sources
Exemple #23
0
	def sources(self, url, hostDict, hostprDict):

		sources = []
		try:
			if url is None:
				return sources

			typ = url[4]

			headers = {
				"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:60.0) Gecko/20100101 Firefox/60.0",
				"http.content_type": "application/x-www-form-urlencoded; charset=UTF-8"
			}
			data = ''
			if typ == "SERIAL":
				title = url[0]
				id = url[1]
				year = url[2]
				orgtitle = url[3]
				sezon = url[5]
				epizod = url[6]
				if orgtitle != "0":
					data = {"id": int(id),
					        "type": typ,
					        "title": title,
					        "year": int(year),
					        "sezon": str(sezon),
					        "odcinek": str(epizod),
					        "site": "filmdb",
					        "browser": "chrome"
					        }
				else:
					data = {"id": int(id),
					        "type": typ,
					        "title": title,
					        "originalTitle": str(orgtitle),
					        "year": int(year),
					        "sezon": str(sezon),
					        "odcinek": str(epizod),
					        "site": "filmdb",
					        "browser": "chrome"
					        }
			if typ == "FILM":
				title = url[0]
				id = url[1]
				year = url[2]
				orgtitle = url[3]
				if orgtitle != "0":
					data = {"id": int(id),
					        "type": typ,
					        "title": str(title),
					        "originalTitle": str(orgtitle),
					        "year": int(year),
					        "site": "filmdb",
					        "browser": "chrome"
					        }
				else:
					data = {"id": int(id),
					        "type": typ,
					        "title": str(title),
					        "year": int(year),
					        "site": "filmdb",
					        "browser": "chrome"
					        }
			data = {"json": json.dumps(data, ensure_ascii=False)}
			response = requests.post("http://fboost.pl/api/api.php", data=data, headers=headers)
			content = json.loads(response.content)
			for code in zip(content[u'link'], content[u'wersja']):
				wersja = str(code[1])
				lang, info = self.get_lang_by_type(wersja)
				test = requests.post("http://fboost.pl/api/player.php?src=%s" % code[0]).content
				link = re.search("""iframe src="(.*)" style""", test)
				link = link.group(1)
				if len(link) < 2:
					continue
				if "cda.pl" in link:
					try:
						response = requests.get(link).content
						test = client.parseDOM(response, 'div', attrs={'class': 'wrapqualitybtn'})
						urls = client.parseDOM(test, 'a', ret='href')
						for url in urls:
							valid, host = source_utils.is_host_valid(url, hostDict)
							q = source_utils.check_url(url)
							sources.append({'source': host, 'quality': q, 'language': lang, 'url': url, 'info': info,
							                'direct': False, 'debridonly': False})
						continue
					except:
						pass
				if "rapidvideo.com" in link:
					try:
						response = requests.get(link).content
						test = re.findall("""(https:\/\/www.rapidvideo.com\/e\/.*)">""", response)
						numGroups = len(test)
						for i in range(1, numGroups):
							url = test[i]
							valid, host = source_utils.is_host_valid(url, hostDict)
							q = source_utils.check_url(url)
							sources.append({'source': host, 'quality': q, 'language': lang, 'url': url, 'info': info,
							                'direct': False, 'debridonly': False})
						continue
					except:
						pass
				valid, host = source_utils.is_host_valid(link, hostDict)
				q = source_utils.check_url(link)
				sources.append(
					{'source': host, 'quality': q, 'language': lang, 'url': link, 'info': info, 'direct': False,
					 'debridonly': False})
			return sources
		except:
			return sources
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url == None:
             return sources
         if not str(url).startswith('http'):
             data = urlparse.parse_qs(url)
             data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
             title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
             if 'season' in data:
                 season = data['season']
             if 'episode' in data:
                 episode = data['episode']
             year = data['year']
             r = client.request(self.base_link, output='extended', timeout='10')
             cookie = r[4];
             headers = r[3];
             result = r[0]
             headers['Cookie'] = cookie
             query = urlparse.urljoin(self.base_link,
                                      self.search_link % urllib.quote_plus(cleantitle.getsearch(title)))
             r = client.request(query, headers=headers, XHR=True)
             r = json.loads(r)['content']
             r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a'))
             if 'tvshowtitle' in data:
                 cltitle = cleantitle.get(title + 'season' + season)
                 cltitle2 = cleantitle.get(title + 'season%02d' % int(season))
                 r = [i for i in r if cltitle == cleantitle.get(i[1]) or cltitle2 == cleantitle.get(i[1])]
                 vurl = '%s%s-episode-%s' % (self.base_link, str(r[0][0]).replace('/info', ''), episode)
                 vurl2 = None
             else:
                 cltitle = cleantitle.getsearch(title)
                 cltitle2 = cleantitle.getsearch('%s (%s)' % (title, year))
                 r = [i for i in r if
                      cltitle2 == cleantitle.getsearch(i[1]) or cltitle == cleantitle.getsearch(i[1])]
                 vurl = '%s%s-episode-0' % (self.base_link, str(r[0][0]).replace('/info', ''))
                 vurl2 = '%s%s-episode-1' % (self.base_link, str(r[0][0]).replace('/info', ''))
             r = client.request(vurl, headers=headers)
             headers['Referer'] = vurl
             slinks = client.parseDOM(r, 'div', attrs={'class': 'anime_muti_link'})
             slinks = client.parseDOM(slinks, 'li', ret='data-video')
             if len(slinks) == 0 and not vurl2 == None:
                 r = client.request(vurl2, headers=headers)
                 headers['Referer'] = vurl2
                 slinks = client.parseDOM(r, 'div', attrs={'class': 'anime_muti_link'})
                 slinks = client.parseDOM(slinks, 'li', ret='data-video')
             for slink in slinks:
                 try:
                     if 'vidnode.net/streaming.php' in slink:
                         r = client.request('https:%s' % slink, headers=headers)
                         clinks = re.findall(r'sources:\[(.*?)\]', r)[0]
                         clinks = re.findall(r'file:\s*\'(http[^\']+)\',label:\s*\'(\d+)', clinks)
                         for clink in clinks:
                             q = source_utils.label_to_quality(clink[1])
                             sources.append(
                                 {'source': 'cdn', 'quality': q, 'language': 'en', 'url': clink[0], 'direct': True,
                                  'debridonly': False})
                     else:
                         quality = source_utils.check_url(slink)
                         valid, hoster = source_utils.is_host_valid(slink, hostDict)
                         if valid:
                             sources.append({'source': hoster, 'quality': quality, 'language': 'en', 'url': slink,
                                             'direct': False, 'debridonly': False})
                 except:
                     pass
         return sources
     except:
         return sources
Exemple #25
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url is None:
             return sources
         data = parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
             'title']
         imdb = data['imdb']
         aliases = eval(data['aliases'])
         headers = {}
         if 'tvshowtitle' in data:
             url = self.searchShow(title, int(data['season']),
                                   int(data['episode']), aliases, headers)
         else:
             url = self.searchMovie(title, data['year'], aliases, headers)
         # log_utils.log('url = %s' % url, log_utils.LOGDEBUG)
         r = client.request(url,
                            headers=headers,
                            output='extended',
                            timeout='10')
         if r is None:
             return sources
         if not imdb in r[0]:
             return sources
         cookie = r[4]
         headers = r[3]
         result = r[0]
         try:
             r = re.findall('(https:.*?redirector.*?)[\'\"]', result)
             for i in r:
                 sources.append({
                     'source':
                     'gvideo',
                     'quality':
                     directstream.googletag(i)[0]['quality'],
                     'language':
                     'en',
                     'url':
                     i,
                     'direct':
                     True,
                     'debridonly':
                     False
                 })
         except:
             source_utils.scraper_error('CARTOONHD')
             pass
         try:
             auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0]
         except:
             auth = 'false'
         auth = 'Bearer %s' % unquote_plus(auth)
         headers['Authorization'] = auth
         headers['Referer'] = url
         u = '/ajax/vsozrflxcw.php'
         self.base_link = client.request(self.base_link,
                                         headers=headers,
                                         output='geturl')
         u = urljoin(self.base_link, u)
         action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb'
         elid = quote(base64.encodestring(str(int(time.time()))).strip())
         token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0]
         idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0]
         post = {
             'action': action,
             'idEl': idEl,
             'token': token,
             'nopop': '',
             'elid': elid
         }
         post = urlencode(post)
         cookie += ';%s=%s' % (idEl, elid)
         headers['Cookie'] = cookie
         r = client.request(u,
                            post=post,
                            headers=headers,
                            cookie=cookie,
                            XHR=True)
         r = str(json.loads(r))
         r = re.findall('\'(http.+?)\'', r) + re.findall('\"(http.+?)\"', r)
         for i in r:
             if 'google' in i:
                 quality = 'SD'
                 if 'googleapis' in i:
                     quality = source_utils.check_url(i)
                 elif 'googleusercontent' in i:
                     i = directstream.googleproxy(i)
                     quality = directstream.googletag(i)[0]['quality']
                 sources.append({
                     'source': 'gvideo',
                     'quality': quality,
                     'language': 'en',
                     'url': i,
                     'direct': True,
                     'debridonly': False
                 })
             elif 'llnwi.net' in i or 'vidcdn.pro' in i:
                 quality = source_utils.check_url(i)
                 sources.append({
                     'source': 'CDN',
                     'quality': quality,
                     'language': 'en',
                     'url': i,
                     'direct': True,
                     'debridonly': False
                 })
             else:
                 valid, hoster = source_utils.is_host_valid(i, hostDict)
                 if not valid:
                     continue
                 sources.append({
                     'source': hoster,
                     'quality': '720p',
                     'language': 'en',
                     'url': i,
                     'direct': False,
                     'debridonly': False
                 })
         return sources
     except:
         source_utils.scraper_error('CARTOONHD')
         return sources
Exemple #26
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        hostDict = hostprDict + hostDict

        try:
            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['title'].replace('&', 'and')
            year = data['year']

            search = title.lower()

            url = urlparse.urljoin(
                self.base_link, self.search_link % (search.replace(' ', '+')))

            shell = requests.Session()

            headers = {
                'User-Agent':
                'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:67.0) Gecko/20100101 Firefox/67.0'
            }
            Digital = shell.get(url, headers=headers).content

            BlackFlag = re.compile(
                'data-movie-id="" class="ml-item".+?href="(.+?)" class="ml-mask jt".+?<div class="moviename">(.+?)</div>',
                re.DOTALL).findall(Digital)

            for Digibox, Powder in BlackFlag:
                if title.lower() in Powder.lower():
                    if year in str(Powder):
                        r = shell.get(Digibox, headers=headers).content
                        quals = re.compile(
                            '<strong>Quality:</strong>\s+<a href=.+?>(.+?)</a>',
                            re.DOTALL).findall(r)

                        for url in quals:
                            quality = source_utils.check_url(url)

                        key = re.compile("var randomKeyNo = '(.+?)'",
                                         re.DOTALL).findall(r)
                        post_link = urlparse.urljoin(self.base_link,
                                                     self.download_links)
                        payload = {'key': key}
                        post = shell.post(post_link,
                                          headers=headers,
                                          data=payload)
                        response = post.content

                        grab = re.compile(
                            '<a rel="\w+" href="(.+?)">\w{5}\s\w+\s\w+\s\w+\s\w{5}<\/a>',
                            re.DOTALL).findall(response)

                        for links in grab:
                            r = shell.get(links, headers=headers).content
                            links = re.compile(
                                '<a rel="\w+" href="(.+?)" target="\w+">',
                                re.DOTALL).findall(r)

                        for link in links:
                            valid, host = source_utils.is_host_valid(
                                link, hostDict)

                            if 'rar' in link:
                                continue

                            sources.append({
                                'source': host,
                                'quality': quality,
                                'language': 'en',
                                'url': link,
                                'direct': False,
                                'debridonly': False
                            })

            return sources
        except Exception:
            return sources
Exemple #27
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url is None:
             return sources
         if not str(url).startswith('http'):
             data = parse_qs(url)
             data = dict([(i, data[i][0]) if data[i] else (i, '')
                          for i in data])
             title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                 'title']
             if 'season' in data:
                 season = data['season']
             if 'episode' in data:
                 episode = data['episode']
             year = data['year']
             r = client.request(self.base_link,
                                output='extended',
                                timeout='10')
             cookie = r[4]
             headers = r[3]
             result = r[0]
             headers['Cookie'] = cookie
             query = urljoin(
                 self.base_link,
                 self.search_link % quote_plus(cleantitle.getsearch(title)))
             r = client.request(query, headers=headers, XHR=True)
             r = json.loads(r)['content']
             r = zip(client.parseDOM(r, 'a', ret='href'),
                     client.parseDOM(r, 'a'))
             if 'tvshowtitle' in data:
                 cltitle = cleantitle.get(title + 'season' + season)
                 cltitle2 = cleantitle.get(title +
                                           'season%02d' % int(season))
                 r = [
                     i for i in r if cltitle == cleantitle.get(i[1])
                     or cltitle2 == cleantitle.get(i[1])
                 ]
                 vurl = '%s%s-episode-%s' % (self.base_link, str(
                     r[0][0]).replace('/info', ''), episode)
                 vurl2 = None
             else:
                 cltitle = cleantitle.getsearch(title)
                 cltitle2 = cleantitle.getsearch('%s (%s)' % (title, year))
                 r = [
                     i for i in r if cltitle2 == cleantitle.getsearch(i[1])
                     or cltitle == cleantitle.getsearch(i[1])
                 ]
                 vurl = '%s%s-episode-0' % (self.base_link, str(
                     r[0][0]).replace('/info', ''))
                 vurl2 = '%s%s-episode-1' % (self.base_link, str(
                     r[0][0]).replace('/info', ''))
             r = client.request(vurl, headers=headers)
             headers['Referer'] = vurl
             slinks = client.parseDOM(r,
                                      'div',
                                      attrs={'class': 'anime_muti_link'})
             slinks = client.parseDOM(slinks, 'li', ret='data-video')
             if len(slinks) == 0 and vurl2 is not None:
                 r = client.request(vurl2, headers=headers)
                 headers['Referer'] = vurl2
                 slinks = client.parseDOM(
                     r, 'div', attrs={'class': 'anime_muti_link'})
                 slinks = client.parseDOM(slinks, 'li', ret='data-video')
             for slink in slinks:
                 try:
                     if 'vidnode.net' in slink:
                         for source in more_sources.more_vidnode(
                                 slink, hostDict):
                             sources.append(source)
                     else:
                         quality = source_utils.check_url(slink)
                         valid, hoster = source_utils.is_host_valid(
                             slink, hostDict)
                         if valid:
                             sources.append({
                                 'source': hoster,
                                 'quality': quality,
                                 'info': '',
                                 'language': 'en',
                                 'url': slink,
                                 'direct': False,
                                 'debridonly': False
                             })
                 except:
                     source_utils.scraper_error('GOWATCHSERIES')
                     pass
         return sources
     except:
         source_utils.scraper_error('GOWATCHSERIES')
         return sources