Ejemplo n.º 1
0
	def sources(self, url, hostDict, hostprDict):
		try:
			hostDict = hostDict + hostprDict
			sources = []
			r = self.scraper.get(url).content
			u = client.parseDOM(r, "div", attrs={"class": "ml-item"})
			for i in u:
				t = re.compile('<a href="(.+?)"').findall(i)
				for r in t:
					t = self.scraper.get(r).content
					results1 = re.compile('<a href="(.+?)" class="lnk').findall(t)
					for url in results1:
						if self.base_link in url:
							continue
						quality, info = source_utils.get_release_quality(url, url)
						valid, host = source_utils.is_host_valid(url, hostDict)
						sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info,
						                'direct': False, 'debridonly': False})
					results2 = re.compile('<iframe src="(.+?)"').findall(t)
					for link in results2:
						if "gomostream.com" in link:
							for source in more_sources.more_gomo(link, hostDict):
								sources.append(source)
						else:
							if "//ouo.io/" in link:
								continue
							quality, info = source_utils.get_release_quality(link, link)
							valid, host = source_utils.is_host_valid(link, hostDict)
							sources.append(
								{'source': host, 'quality': quality, 'language': 'en', 'url': link, 'info': info,
								 'direct': False, 'debridonly': False})
			return sources
		except:
			return sources
Ejemplo n.º 2
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url is None:
             return sources
         hostDict = hostDict + hostprDict
         r = self.scraper.get(url).content
         match = re.compile(
             '<a href="http://www.tvmovieflix.com/report-.+?/(.+?)" target="_blank"><span class="a">Report Broken</span></a></li>',
             re.DOTALL | re.M).findall(r)
         for link in match:
             if "/show/" in url:
                 surl = "http://www.tvmovieflix.com/e/" + link
             else:
                 surl = "http://www.tvmovieflix.com/m/" + link
             i = self.scraper.get(surl).content
             match = re.compile('<IFRAME.+?SRC="(.+?)"',
                                re.DOTALL | re.IGNORECASE).findall(i)
             for link in match:
                 if "realtalksociety.com" in link:
                     r = requests.get(link).content
                     match = re.compile(
                         '<source src="(.+?)" type="video/mp4">',
                         re.DOTALL | re.IGNORECASE).findall(r)
                     for url in match:
                         valid, host = source_utils.is_host_valid(
                             url, hostDict)
                         quality, info = source_utils.get_release_quality(
                             url, url)
                         sources.append({
                             'source': host,
                             'quality': quality,
                             'language': 'en',
                             'info': info,
                             'url': url,
                             'direct': True,
                             'debridonly': False
                         })
                 else:
                     valid, host = source_utils.is_host_valid(
                         link, hostDict)
                     quality, info = source_utils.get_release_quality(
                         link, link)
                     sources.append({
                         'source': host,
                         'quality': quality,
                         'language': 'en',
                         'info': info,
                         'url': link,
                         'direct': False,
                         'debridonly': False
                     })
         return sources
     except:
         return sources
Ejemplo n.º 3
0
	def sources(self, url, hostDict, hostprDict):
		try:
			sources = []
			if url is None:
				return sources
			data = urlparse.parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
			aliases = eval(data['aliases'])
			if 'tvshowtitle' in data:
				ep = data['episode']
				url = '%s/film/%s-season-%01d/watching.html?ep=%s' % (
				self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), ep)
				r = client.request(url, timeout='10', output='geturl')
				if url is None:
					url = self.searchShow(data['tvshowtitle'], data['season'], aliases)
			else:
				url = self.searchMovie(data['title'], data['year'], aliases)
				if url is None:
					url = '%s/film/%s/watching.html?ep=0' % (self.base_link, cleantitle.geturl(data['title']))
			if url is None:
				raise Exception()
			r = client.request(url, timeout='10')
			r = client.parseDOM(r, 'div', attrs={'class': 'les-content'})
			if 'tvshowtitle' in data:
				ep = data['episode']
				links = client.parseDOM(r, 'a', attrs={'episode-data': ep}, ret='player-data')
			else:
				links = client.parseDOM(r, 'a', ret='player-data')
			for link in links:
				link = "https:" + link if not link.startswith('http') else link
				if 'vidcloud' in link:
					r = client.request(link, timeout='10')
					match = getSum.findSum(r)
					for url in match:
						url = "https:" + url if not url.startswith('http') else url
						url = requests.get(url).url if 'api.vidnode' in url else url
						valid, host = source_utils.is_host_valid(url, hostDict)
						if valid:
							quality, info = source_utils.get_release_quality(url, url)
							sources.append(
								{'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': url,
								 'direct': False, 'debridonly': False})
				else:
					valid, host = source_utils.is_host_valid(link, hostDict)
					if valid:
						quality, info = source_utils.get_release_quality(link, link)
						sources.append({'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': link,
						                'direct': False, 'debridonly': False})
			return sources
		except:
			return sources
Ejemplo n.º 4
0
 def sources(self, url, hostDict, hostprDict):
     try:
         if url is None:
             return sources
         sources = []
         hostDict = hostprDict + hostDict
         r = getSum.get(url)
         match = getSum.findSum(r)
         for url in match:
             if 'vidcloud' in url:
                 result = getSum.get(url)
                 match = getSum.findSum(result)
                 for link in match:
                     link = "https:" + link if not link.startswith(
                         'http') else link
                     link = requests.get(
                         link).url if 'vidnode' in link else link
                     valid, host = source_utils.is_host_valid(
                         link, hostDict)
                     if valid:
                         quality, info = source_utils.get_release_quality(
                             link, link)
                         sources.append({
                             'source': host,
                             'quality': quality,
                             'language': 'en',
                             'info': info,
                             'url': link,
                             'direct': False,
                             'debridonly': False
                         })
             else:
                 valid, host = source_utils.is_host_valid(url, hostDict)
                 if valid:
                     quality, info = source_utils.get_release_quality(
                         url, url)
                     sources.append({
                         'source': host,
                         'quality': quality,
                         'language': 'en',
                         'info': info,
                         'url': url,
                         'direct': False,
                         'debridonly': False
                     })
         return sources
     except:
         return sources
Ejemplo n.º 5
0
    def get_links_from_box(self, result, hostDict):
        sources = []

        src_url = client.parseDOM(result, 'tr', attrs={'id': 'mov\w+|tv\w+'})
        for item in src_url:

            url = client.parseDOM(item, 'a', ret='href')[0]

            url = client.request(url.replace('https://www.', 'http://'))

            url = client.parseDOM(url, 'a', ret='href')[0]

            data = re.findall('<td>(.+?)</td>', item, re.DOTALL)

            # lang_type = data[2].split()[1]

            if 'HD' in data[1]:
                q = 'HD'
            else:
                q = 'SD'

            # host = re.findall('">(.+?)\.',data[0], re.DOTALL )[0]
            valid, host = source_utils.is_host_valid(url, hostDict)

            lang, info = 'es', 'LAT'

            sources.append(
                {'source': host, 'quality': q, 'language': lang, 'url': url, 'info': info, 'direct': False,
                 'debridonly': False})

        return sources
Ejemplo n.º 6
0
	def work(self, link, testDict):
		if str(link).startswith("http"):
			link = self.getlink(link)
			q = source_utils.check_sd_url(link)
			valid, host = source_utils.is_host_valid(link, testDict)
			if not valid: return 0
			return host, q, link
Ejemplo n.º 7
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url is None:
             return sources
         hostDict = hostDict + hostprDict
         sourcePage = self.scraper.get(url).content
         links = re.compile('<iframe.+?src="(.+?)"',
                            re.DOTALL).findall(sourcePage)
         for link in links:
             if "gomostream.com" in link:
                 for source in more_sources.more_gomo(link, hostDict):
                     sources.append(source)
             else:
                 quality, info = source_utils.get_release_quality(
                     link, link)
                 valid, host = source_utils.is_host_valid(link, hostDict)
                 sources.append({
                     'source': host,
                     'quality': quality,
                     'language': 'en',
                     'url': link,
                     'info': info,
                     'direct': False,
                     'debridonly': False
                 })
         return sources
     except:
         return sources
Ejemplo n.º 8
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if not url:
             return sources
         r = client.request(urlparse.urljoin(self.base_link, url))
         r = re.findall('''vicode\s*=\s*["'](.*?)["'];''',
                        r)[0].decode('string_escape')
         r = dom_parser.parse_dom(r, 'iframe', req='src')
         r = [i.attrs['src'] for i in r]
         for i in r:
             valid, host = source_utils.is_host_valid(i, hostDict)
             if not valid: continue
             sources.append({
                 'source': host,
                 'quality': 'SD',
                 'language': 'de',
                 'url': i,
                 'direct': False,
                 'debridonly': False,
                 'checkquality': True
             })
         return sources
     except:
         return sources
Ejemplo n.º 9
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if url == None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            data = urllib.urlencode({'ID': re.sub('[^0-9]', '', str(data['imdb'])), 'lang': 'de'})

            data = client.request(urlparse.urljoin(self.base_link, self.request_link), post=data, XHR=True)
            data = json.loads(data)
            data = [(i, data['links'][i]) for i in data['links'] if 'links' in data]
            data = [(i[0], i[1][0], (i[1][1:])) for i in data]

            for hoster, quli, links in data:
                valid, hoster = source_utils.is_host_valid(hoster, hostDict)
                if not valid: continue

                for link in links:
                    try:
                        sources.append(
                            {'source': hoster, 'quality': 'SD', 'language': 'de', 'url': self.out_link % link,
                             'direct': False, 'debridonly': False})
                    except:
                        pass

            return sources
        except:
            return sources
Ejemplo n.º 10
0
	def sources(self, url, hostDict, hostprDict):
		sources = []
		try:
			if not url:
				return sources
			query = urlparse.urljoin(self.base_link, url)
			r = client.request(query)
			r = dom_parser.parse_dom(r, 'div', attrs={'id': 'tab-plot_german'})
			r = dom_parser.parse_dom(r, 'tbody')
			r = dom_parser.parse_dom(r, 'tr')
			for i in r:
				if re.search('(?<=">)(\n.*?)(?=<\/a>)', i[1]).group().strip():
					hoster = re.search('(?<=">)(\n.*?)(?=<\/a>)', i[1]).group().strip()
					link = re.search('(?<=href=\")(.*?)(?=\")', i[1]).group()
					rel = re.search('(?<=oddCell qualityCell">)(\n.*?)(?=<\/td>)', i[1]).group().strip()
					quality, info = source_utils.get_release_quality(rel)
					if not quality:
						quality = 'SD'
					valid, hoster = source_utils.is_host_valid(hoster, hostDict)
					if not valid: continue
					sources.append(
						{'source': hoster, 'quality': quality, 'language': 'de', 'url': link, 'direct': False,
						 'debridonly': False})
			return sources
		except:
			return sources
Ejemplo n.º 11
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url == None:
             return sources
         html = client.request(url)
         quality = re.compile(
             '<div>Quanlity: <span class="quanlity">(.+?)</span></div>',
             re.DOTALL).findall(html)
         for qual in quality:
             quality = source_utils.check_url(qual)
             info = qual
         links = re.compile('var link_.+? = "(.+?)"',
                            re.DOTALL).findall(html)
         for url in links:
             if not url.startswith('http'):
                 url = "https:" + url
             valid, host = source_utils.is_host_valid(url, hostDict)
             if valid:
                 sources.append({
                     'source': host,
                     'quality': quality,
                     'language': 'en',
                     'info': info,
                     'url': url,
                     'direct': False,
                     'debridonly': False
                 })
         return sources
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('FmoviesIO - Exception: \n' + str(failure))
         return sources
Ejemplo n.º 12
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if not url:
             return sources
         query = urlparse.urljoin(self.base_link, url)
         r = client.request(query)
         q = re.findall("'(http://www.elreyxhd.+?)'", r, re.DOTALL)[0]
         links = client.request(q)
         links = client.parseDOM(links, 'a', ret='href')
         for url in links:
             lang, info = 'es', 'LAT'
             qual = 'HD'
             if not 'http' in url: continue
             if 'elrey' in url: continue
             valid, host = source_utils.is_host_valid(url, hostDict)
             if not valid: continue
             sources.append({
                 'source': host,
                 'quality': qual,
                 'language': lang,
                 'url': url,
                 'info': info,
                 'direct': False,
                 'debridonly': False
             })
         return sources
     except:
         return sources
Ejemplo n.º 13
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url is None:
             return sources
         h = {'User-Agent': client.randomagent()}
         html = self.scraper.get(url, headers=h).content
         qual = re.compile('<span class="calidad2">(.+?)</span>',
                           flags=re.DOTALL | re.IGNORECASE).findall(html)[0]
         links = re.compile('<iframe src="(.+?)"',
                            flags=re.DOTALL | re.UNICODE | re.MULTILINE
                            | re.IGNORECASE).findall(html)
         for link in links:
             valid, host = source_utils.is_host_valid(link, hostDict)
             quality, info = source_utils.get_release_quality(qual, link)
             sources.append({
                 'source': host,
                 'quality': quality,
                 'language': 'en',
                 'info': info,
                 'url': link,
                 'direct': False,
                 'debridonly': False
             })
         return sources
     except:
         return sources
Ejemplo n.º 14
0
 def sources(self, url, hostDict, hostprDict):
     try:
         hostDict = hostprDict + hostDict
         sources = []
         if url is None:
             return sources
         headers = {'User-Agent': self.User_Agent}
         html = requests.get(url, headers=headers, timeout=10).content
         qual = re.compile('<div class="cf">.+?class="quality">(.+?)</td>',
                           re.DOTALL).findall(html)
         for i in qual:
             quality = source_utils.check_url(i)
         links = re.compile('data-href="(.+?)"', re.DOTALL).findall(html)
         for link in links:
             if 'http' not in link:
                 link = 'https://' + link
             valid, host = source_utils.is_host_valid(link, hostDict)
             if valid and link not in str(sources):
                 sources.append({
                     'source': host,
                     'quality': quality,
                     'language': 'en',
                     'url': link,
                     'direct': False,
                     'debridonly': False
                 })
         return sources
     except:
         return sources
Ejemplo n.º 15
0
	def sources(self, url, hostDict, hostprDict):
		sources = []
		try:
			if url is None:
				return sources

			data = urlparse.parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

			j = self.__get_json(data['url'])

			if not j:
				return

			sid = data['sid'] if 'sid' in data else j.keys()[0]
			pcnt = int(j[sid]['1']) if '1' in j[sid] else 1

			for jHoster in j[sid]['links']:
				jLinks = [i[3] for i in j[sid]['links'][jHoster] if i[5] == 'stream']
				if len(jLinks) < pcnt: continue
				h_url = jLinks[0]
				valid, hoster = source_utils.is_host_valid(h_url, hostDict)
				if not valid: continue
				h_url = h_url if pcnt == 1 else 'stack://' + ' , '.join(jLinks)

				try:
					sources.append(
						{'source': hoster, 'quality': 'SD', 'language': 'de', 'info': '' if pcnt == 1 else 'multi-part',
						 'url': h_url, 'direct': False, 'debridonly': False})
				except:
					pass
			return sources
		except:
			return sources
Ejemplo n.º 16
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if url is None:
             return sources
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         data.update({'raw': 'true', 'language': 'de'})
         data = urllib.urlencode(data)
         data = client.request(urlparse.urljoin(self.base_link,
                                                self.request_link),
                               post=data)
         data = json.loads(data)
         data = [i[1] for i in data[1].items()]
         data = [(i['name'].lower(), i['links']) for i in data]
         for host, links in data:
             valid, host = source_utils.is_host_valid(host, hostDict)
             if not valid: continue
             for link in links:
                 try:
                     sources.append({
                         'source': host,
                         'quality': 'SD',
                         'language': 'de',
                         'url': link['URL'],
                         'direct': False,
                         'debridonly': False
                     })
                 except:
                     pass
         return sources
     except:
         return sources
Ejemplo n.º 17
0
    def get_from_main_player(self, result, sources, hostDict):
        result_sources = []

        data = client.parseDOM(result, 'div', attrs={'id': 'playex'})

        links = client.parseDOM(data, 'iframe', ret='src')
        r = client.parseDOM(result, 'a', attrs={'class': 'options'})

        for i in range(len(r)):

            item = r[i].split()
            host = item[-4]
            q = item[-3]

            if 'Latino' in item[-1]:
                lang, info = 'es', 'LAT'
            else:
                lang, info = 'es', None

            url = links[i]
            if 'megapelistv' in url:
                url = client.request(url.replace('https://www.', 'http://'))
                url = client.parseDOM(url, 'a', ret='href')[0]
            else:
                url = url
            if (self.url_not_on_list(url, sources)):
                valid, host = source_utils.is_host_valid(url, hostDict)
                result_sources.append(
                    {'source': host, 'quality': q, 'language': lang, 'url': url, 'info': info, 'direct': False,
                     'debridonly': False})

        return result_sources
Ejemplo n.º 18
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            hostDict = hostprDict + hostDict
            if url is None:
                return sources

            h = {'User-Agent': client.randomagent()}
            title = cleantitle.geturl(url['title']).replace('-', '+')
            url = urlparse.urljoin(self.base_link, self.search_link % title)
            r = self.scraper.get(url, headers=h)
            r = BeautifulSoup(r.text,
                              'html.parser').find('div', {'class': 'item'})
            r = r.find('a')['href']
            r = self.scraper.get(r, headers=h)
            r = BeautifulSoup(r.content, 'html.parser')
            quality = r.find('span', {'class': 'calidad2'}).text
            url = r.find('div', {'class': 'movieplay'}).find('iframe')['src']
            if quality not in ['1080p', '720p']:
                quality = 'SD'

            valid, host = source_utils.is_host_valid(url, hostDict)
            if valid:
                sources.append({
                    'source': host,
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'direct': False,
                    'debridonly': False
                })
            return sources
        except:
            return sources
Ejemplo n.º 19
0
    def sources(self, url, hostDict, hostprDict):

        sources = []
        try:

            if url == None: return sources
            result = client.request(urlparse.urljoin(self.base_link, url), redirect=False)

            section = client.parseDOM(result, 'section', attrs={'id': 'video_player'})[0]
            link = client.parseDOM(section, 'iframe', ret='src')[0]
            valid, host = source_utils.is_host_valid(link, hostDict)
            if not valid: return sources
            spans = client.parseDOM(section, 'span')
            info = None
            for span in spans:
                if span == 'Z lektorem':
                    info = 'Lektor'

            q = source_utils.check_sd_url(link)
            sources.append({'source': host, 'quality': q, 'language': 'pl', 'url': link, 'info': info, 'direct': False,
                            'debridonly': False})

            return sources
        except:
            return sources
Ejemplo n.º 20
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         hostDict = hostprDict + hostDict
         r = self.scraper.get(url).content
         u = client.parseDOM(r, "ul", attrs={"id": "serverul"})
         for t in u:
             u = client.parseDOM(t, 'a', ret='href')
             for url in u:
                 if 'getlink' in url:
                     continue
                 quality = source_utils.check_url(url)
                 valid, host = source_utils.is_host_valid(url, hostDict)
                 if valid:
                     sources.append({
                         'source': host,
                         'quality': quality,
                         'language': 'en',
                         'url': url,
                         'direct': False,
                         'debridonly': False
                     })
             return sources
     except:
         return
Ejemplo n.º 21
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         hostDict = hostprDict + hostDict
         r = self.scraper.get(url).content
         try:
             qual = re.compile('class="quality">(.+?)<').findall(r)
             for i in qual:
                 if 'HD' in i:
                     quality = '1080p'
                 else:
                     quality = 'SD'
             match = re.compile('<iframe.+?src="(.+?)"').findall(r)
             for url in match:
                 if 'youtube' in url:
                     continue
                 valid, host = source_utils.is_host_valid(url, hostDict)
                 sources.append({
                     'source': host,
                     'quality': quality,
                     'language': 'en',
                     'url': url,
                     'direct': False,
                     'debridonly': False
                 })
         except:
             return
     except Exception:
         return
     return sources
Ejemplo n.º 22
0
 def sources(self, url, hostDict, hostprDict):
     try:
         if url == None: return sources
         sources = []
         r = self.scraper.get(url).content
         try:
             match = re.compile(
                 'themes/movies/img/icon/server/(.+?)\.png" width="16" height="16" /> <a href="(.+?)">Version '
             ).findall(r)
             for host, url in match:
                 if host == 'internet': pass
                 if host in str(sources): continue
                 if url in str(sources): continue
                 valid, host = source_utils.is_host_valid(host, hostDict)
                 if valid:
                     sources.append({
                         'source': host,
                         'quality': 'SD',
                         'language': 'en',
                         'url': url,
                         'direct': False,
                         'debridonly': False
                     })
         except:
             return
     except Exception:
         return
     return sources
Ejemplo n.º 23
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url is None:
             return sources
         r = self.scraper.get(url).content
         qual = re.findall(">(\w+)<\/p", r)
         for i in qual:
             quality, info = source_utils.get_release_quality(i, i)
         r = dom_parser.parse_dom(r, 'div', {'id': 'servers-list'})
         r = [dom_parser.parse_dom(i, 'a', req=['href']) for i in r if i]
         for i in r[0]:
             url = {
                 'url': i.attrs['href'],
                 'data-film': i.attrs['data-film'],
                 'data-server': i.attrs['data-server'],
                 'data-name': i.attrs['data-name']
             }
             url = urllib.urlencode(url)
             valid, host = source_utils.is_host_valid(i.content, hostDict)
             if valid:
                 sources.append({
                     'source': host,
                     'quality': quality,
                     'language': 'en',
                     'info': info,
                     'url': url,
                     'direct': False,
                     'debridonly': False
                 })
         return sources
     except:
         return sources
Ejemplo n.º 24
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if url is None:
             return sources
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
         q = '%s' % cleantitle.geturl(data['title'])
         url = self.base_link + self.search_link % q.replace('-', '+')
         r = self.scraper.get(url).content
         v = re.compile('<a href="(.+?)" class="ml-mask jt" title="(.+?)">\n<span class=".+?">(.+?)</span>').findall(
             r)
         for url, check, quality in v:
             t = '%s (%s)' % (data['title'], data['year'])
             if t not in check: raise Exception()
             r = self.scraper.get(url + '/watch.html').content
             url = re.compile('<iframe.+?src="(.+?)"').findall(r)[0]
             quality = source_utils.check_url(quality)
             valid, host = source_utils.is_host_valid(url, hostDict)
             if valid:
                 sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False,
                                 'debridonly': False})
         return sources
     except BaseException:
         return sources
Ejemplo n.º 25
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if url is None:
             return sources
         hostDict = hostprDict + hostDict
         headers = {'Referer': url}
         r = self.scraper.get(url, headers=headers).content
         u = client.parseDOM(r,
                             "span",
                             attrs={"class": "movie_version_link"})
         for t in u:
             match = client.parseDOM(t, 'a', ret='data-href')
             for url in match:
                 if url in str(sources):
                     continue
                 quality, info = source_utils.get_release_quality(url, url)
                 valid, host = source_utils.is_host_valid(url, hostDict)
                 if valid:
                     sources.append({
                         'source': host,
                         'quality': quality,
                         'language': 'en',
                         'info': info,
                         'url': url,
                         'direct': False,
                         'debridonly': False
                     })
         return sources
     except:
         return sources
Ejemplo n.º 26
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = url.replace('/en/', '/de/')

            video_id = re.search('(?<=\/)(\d*?)(?=-)', url).group()
            if not video_id:
                return sources

            # load player
            query = self.get_player % (video_id)
            query = urlparse.urljoin(self.base_link, query)
            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'le-server'})

            # for each hoster
            for i in r:
                hoster = dom_parser.parse_dom(i,
                                              'div',
                                              attrs={'class': 'les-title'})
                hoster = dom_parser.parse_dom(hoster, 'strong')
                hoster = hoster[0][1]

                valid, hoster = source_utils.is_host_valid(hoster, hostDict)
                if not valid: continue

                links = dom_parser.parse_dom(i,
                                             'a',
                                             attrs={'class': 'ep-item'})

                # for each link
                for i in links:
                    if '1080p' in i[0]['title']:
                        quality = '1080p'
                    elif 'HD' in i[0]['title']:
                        quality = 'HD'
                    else:
                        quality = 'SD'

                    url = i[0]['id']
                    if not url: continue

                    sources.append({
                        'source': hoster,
                        'quality': quality,
                        'language': 'de',
                        'url': url,
                        'direct': False,
                        'debridonly': False,
                        'checkquality': True
                    })

            return sources
        except:
            return sources
Ejemplo n.º 27
0
def more_gomo(link, hostDict):
	sources = []  # By Mpie
	try:
		gomo_link = 'https://gomostream.com/decoding_v3.php'
		User_Agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
		result = client.request(link)
		tc = re.compile('tc = \'(.+?)\';').findall(result)[0]
		if (tc):
			token = re.compile('"_token": "(.+?)",').findall(result)[0]
			post = {'tokenCode': tc, '_token': token}

			def tsd(tokenCode):
				_13x48X = tokenCode
				_71Wxx199 = _13x48X[4:18][::-1]
				return _71Wxx199 + "18" + "432782"

			headers = {'Host': 'gomostream.com', 'Referer': link, 'User-Agent': User_Agent, 'x-token': tsd(tc)}
			result = client.request(gomo_link, XHR=True, post=post, headers=headers)
			urls = json.loads(result)
			for url in urls:
				if 'gomostream' in url:
					continue
					# sources.append({'source': 'CDN', 'quality': 'SD', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False})
				else:
					quality, info = source_utils.get_release_quality(url, url)
					valid, host = source_utils.is_host_valid(url, hostDict)
					sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info,
					                'direct': False, 'debridonly': False})
		return sources
	except:
		return sources
Ejemplo n.º 28
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            query = urlparse.urljoin(self.base_link, url)
            r = client.request(query)
            links = client.parseDOM(r, 'div', attrs={'class': 'xg_user_generated'})
            links = dom_parser.parse_dom(links, 'a')

            for i in links:
                url = i[0]['href']
                if 'youtube' in url: continue
                quality = 'SD'
                lang, info = 'gr', 'SUB'
                valid, host = source_utils.is_host_valid(url, hostDict)
                if 'hdvid' in host: valid = True
                if not valid: continue

                sources.append({'source': host, 'quality': quality, 'language': lang, 'url': url, 'info': info,
                                'direct': False, 'debridonly': False})

            return sources
        except:
            return sources
Ejemplo n.º 29
0
	def sources(self, url, hostDict, hostprDict):
		sources = []
		try:
			if not url:
				return sources
			query = urlparse.urljoin(self.base_link, url)
			r = client.request(query)
			links = client.parseDOM(r, 'tbody')
			links = client.parseDOM(links, 'a', ret='href')
			for i in range(len(links)):
				url = links[i]
				if 'target' in url: continue
				data = client.request(url)
				url = client.parseDOM(data, 'iframe', ret='src')[0]
				if url.startswith('/go'): url = re.findall('go\?(.+?)-', url)[0]
				if 'crypt' in url: continue
				if 'redvid' in url:
					data = client.request(url)
					url = client.parseDOM(data, 'iframe', ret='src')[0]
				if any(x in url for x in ['.online', 'xrysoi.se', 'filmer', '.bp', '.blogger', 'youtu']):
					continue
				quality = 'SD'
				lang, info = 'gr', 'SUB'
				valid, host = source_utils.is_host_valid(url, hostDict)
				if 'hdvid' in host: valid = True
				if not valid: continue
				sources.append({'source': host, 'quality': quality, 'language': lang, 'url': url, 'info': info,
				                'direct': False, 'debridonly': False})
			return sources
		except:
			return sources
Ejemplo n.º 30
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if not url:
             return sources
         r = client.request(url)
         links = client.parseDOM(r, 'div', attrs={'class': 'mtos'})
         for i in range(1, len(links)):
             idioma = client.parseDOM(links[i], 'img', ret='src')[0]
             if 'in.' in idioma: continue
             quality = client.parseDOM(links[i],
                                       'div',
                                       attrs={'class': 'dcalidad'})[0]
             servidor = re.findall("src='.+?'\s*/>(.+?)</div>", links[i])[0]
             lang, info = self.get_lang_by_type(idioma)
             quality = self.quality_fixer(quality)
             link = dom_parser.parse_dom(links[i], 'a',
                                         req='href')[0][0]['href']
             url = link
             if 'streamcloud' in url: quality = 'SD'
             valid, host = source_utils.is_host_valid(servidor, hostDict)
             sources.append({
                 'source': host,
                 'quality': quality,
                 'language': lang,
                 'url': url,
                 'info': info,
                 'direct': False,
                 'debridonly': False
             })
         return sources
     except:
         return sources