Beispiel #1
0
 def searchMovie(self, title, year, aliases, headers):
     try:
         for alias in aliases:
             url = '%s/film/%s' % (self.base_link,
                                   cleantitle.geturl(alias['title']))
             url = client.request(url,
                                  headers=headers,
                                  output='geturl',
                                  timeout='10')
             if not url is None and url != self.base_link:
                 break
         if url is None:
             for alias in aliases:
                 url = '%s/film/%s-%s' % (self.base_link,
                                          cleantitle.geturl(
                                              alias['title']), year)
                 url = client.request(url,
                                      headers=headers,
                                      output='geturl',
                                      timeout='10')
                 if not url is None and url != self.base_link:
                     break
         return url
     except:
         return
Beispiel #2
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url == None:
             return sources
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
         aliases = eval(data['aliases'])
         headers = {}
         if 'tvshowtitle' in data:
             ep = data['episode']
             url = '%s/film/%s-season-%01d/watching.html?ep=%s' % (
             self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), ep)
             r = client.request(url, headers=headers, timeout='10', output='geturl')
             if url == None:
                 url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers)
         else:
             url = self.searchMovie(data['title'], data['year'], aliases, headers)
             if url == None:
                 url = '%s/film/%s/watching.html?ep=0' % (self.base_link, cleantitle.geturl(data['title']))
         if url == None:
             raise Exception()
         r = client.request(url, headers=headers, timeout='10')
         r = client.parseDOM(r, 'div', attrs={'class': 'les-content'})
         if 'tvshowtitle' in data:
             ep = data['episode']
             links = client.parseDOM(r, 'a', attrs={'episode-data': ep}, ret='player-data')
         else:
             links = client.parseDOM(r, 'a', ret='player-data')
         for link in links:
             if '123movieshd' in link or 'seriesonline' in link:
                 r = client.request(link, headers=headers, timeout='10')
                 r = re.findall('(https:.*?redirector.*?)[\'\"]', r)
                 for i in r:
                     try:
                         sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'],
                                         'language': 'en', 'url': i, 'direct': True, 'debridonly': False})
                     except:
                         pass
             else:
                 try:
                     host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(link.strip().lower()).netloc)[0]
                     if not host in hostDict:
                         raise Exception()
                     host = client.replaceHTMLCodes(host)
                     host = host.encode('utf-8')
                     sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': link, 'direct': False,
                                     'debridonly': False})
                 except:
                     pass
         return sources
     except:
         return sources
Beispiel #3
0
	def sources(self, url, hostDict, hostprDict):
		try:
			sources = []
			if url is None:
				return sources
			data = urlparse.parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
			aliases = eval(data['aliases'])
			if 'tvshowtitle' in data:
				ep = data['episode']
				url = '%s/film/%s-season-%01d/watching.html?ep=%s' % (
				self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), ep)
				r = client.request(url, timeout='10', output='geturl')
				if url is None:
					url = self.searchShow(data['tvshowtitle'], data['season'], aliases)
			else:
				url = self.searchMovie(data['title'], data['year'], aliases)
				if url is None:
					url = '%s/film/%s/watching.html?ep=0' % (self.base_link, cleantitle.geturl(data['title']))
			if url is None:
				raise Exception()
			r = client.request(url, timeout='10')
			r = client.parseDOM(r, 'div', attrs={'class': 'les-content'})
			if 'tvshowtitle' in data:
				ep = data['episode']
				links = client.parseDOM(r, 'a', attrs={'episode-data': ep}, ret='player-data')
			else:
				links = client.parseDOM(r, 'a', ret='player-data')
			for link in links:
				link = "https:" + link if not link.startswith('http') else link
				if 'vidcloud' in link:
					r = client.request(link, timeout='10')
					match = getSum.findSum(r)
					for url in match:
						url = "https:" + url if not url.startswith('http') else url
						url = requests.get(url).url if 'api.vidnode' in url else url
						valid, host = source_utils.is_host_valid(url, hostDict)
						if valid:
							quality, info = source_utils.get_release_quality(url, url)
							sources.append(
								{'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': url,
								 'direct': False, 'debridonly': False})
				else:
					valid, host = source_utils.is_host_valid(link, hostDict)
					if valid:
						quality, info = source_utils.get_release_quality(link, link)
						sources.append({'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': link,
						                'direct': False, 'debridonly': False})
			return sources
		except:
			return sources
 def searchMovie(self, title, year, aliases, headers):
     try:
         for alias in aliases:
             url = '%s/film/%s' % (self.base_link, cleantitle.geturl(alias['title']))
             result = self.scraper.get(url, headers=headers).status
             if not result == 200 and url != self.base_link: break
         if url is None:
             for alias in aliases:
                 url = '%s/film/%s-%s' % (self.base_link, cleantitle.geturl(alias['title']), year)
                 result = self.scraper.get(url, headers=headers).status
                 if not result == 200 and url != self.base_link: break
         return url
     except:
         return
Beispiel #5
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         title = cleantitle.geturl(title).replace('-', '+')
         u = self.base_link + self.search_link % title
         u = client.request(u)
         i = client.parseDOM(u, "div", attrs={"class": "movies-list"})
         for r in i:
             r = re.compile('<a href="(.+?)"').findall(r)
             for url in r:
                 title = cleantitle.geturl(title).replace("+", "-")
                 if not title in url:
                     continue
                 return url
     except:
         return
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return
            url = urlparse.parse_qs(url)
            url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
            url['premiered'], url['season'], url['episode'] = premiered, season, episode
            try:
                clean_title = cleantitle.geturl(url['tvshowtitle']) + '-season-%d' % int(season)
                search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
                r = self.scraper.get(search_url).content
                r = client.parseDOM(r, 'div', {'id': 'movie-featured'})
                r = [(client.parseDOM(i, 'a', ret='href'),
                      re.findall('<b><i>(.+?)</i>', i)) for i in r]
                r = [(i[0][0], i[1][0]) for i in r if
                     cleantitle.get(i[1][0]) == cleantitle.get(clean_title)]
                url = r[0][0]
            except:
                pass
            data = self.scraper.get(url).content
            data = client.parseDOM(data, 'div', attrs={'id': 'details'})
            data = zip(client.parseDOM(data, 'a'), client.parseDOM(data, 'a', ret='href'))
            url = [(i[0], i[1]) for i in data if i[0] == str(int(episode))]

            return url[0][1]
        except:
            return
Beispiel #7
0
	def movie(self, imdb, title, localtitle, aliases, year):
		try:
			title = cleantitle.geturl(title).replace('-', '+')
			url = self.base_link + self.search_link % (title, year)
			return url
		except:
			return
Beispiel #8
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            hostDict = hostprDict + hostDict
            if url is None:
                return sources

            h = {'User-Agent': client.randomagent()}
            title = cleantitle.geturl(url['title']).replace('-', '+')
            url = urlparse.urljoin(self.base_link, self.search_link % title)
            r = self.scraper.get(url, headers=h)
            r = BeautifulSoup(r.text,
                              'html.parser').find('div', {'class': 'item'})
            r = r.find('a')['href']
            r = self.scraper.get(r, headers=h)
            r = BeautifulSoup(r.content, 'html.parser')
            quality = r.find('span', {'class': 'calidad2'}).text
            url = r.find('div', {'class': 'movieplay'}).find('iframe')['src']
            if quality not in ['1080p', '720p']:
                quality = 'SD'

            valid, host = source_utils.is_host_valid(url, hostDict)
            if valid:
                sources.append({
                    'source': host,
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'direct': False,
                    'debridonly': False
                })
            return sources
        except:
            return sources
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if url is None:
             return sources
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
         q = '%s' % cleantitle.geturl(data['title'])
         url = self.base_link + self.search_link % q.replace('-', '+')
         r = self.scraper.get(url).content
         v = re.compile('<a href="(.+?)" class="ml-mask jt" title="(.+?)">\n<span class=".+?">(.+?)</span>').findall(
             r)
         for url, check, quality in v:
             t = '%s (%s)' % (data['title'], data['year'])
             if t not in check: raise Exception()
             r = self.scraper.get(url + '/watch.html').content
             url = re.compile('<iframe.+?src="(.+?)"').findall(r)[0]
             quality = source_utils.check_url(quality)
             valid, host = source_utils.is_host_valid(url, hostDict)
             if valid:
                 sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False,
                                 'debridonly': False})
         return sources
     except BaseException:
         return sources
Beispiel #10
0
	def episode(self, url, imdb, tvdb, title, premiered, season, episode):
		try:
			if url is None:
				return
			url = urlparse.parse_qs(url)
			url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
			clean_title = cleantitle.geturl(url['tvshowtitle']) + '+s%02d' % int(season)
			search_url = urlparse.urljoin(self.base_link, (self.search_link % (clean_title.replace('-', '+'),
			                                                                   url['year'])))
			search_results = self.scraper.get(search_url, headers={'referer': self.base_link}).content

			not_found = dom_parser.parse_dom(search_results, 'div', {'class': 'not-found'})
			if len(not_found) > 0:
				return

			links = client.parseDOM(search_results, "a", ret="href", attrs={"class": "ml-mask jt"})
			results = []
			for link in links:
				if '%ss%02d' % (cleantitle.get(url['tvshowtitle']), int(season)) in cleantitle.get(link):
					link_results = self.scraper.get(link, headers={'referer': search_url}).content
					r2 = dom_parser.parse_dom(link_results, 'div', {'id': 'ip_episode'})
					r3 = [dom_parser.parse_dom(i, 'a', req=['href']) for i in r2 if i]
					for i in r3[0]:
						if i.content == 'Episode %s' % episode:
							results.append(i.attrs['href'])
			return results
		except:
			return
Beispiel #11
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         ctitle = cleantitle.geturl(title)
         url = self.base_link + self.movie_link % (ctitle)
         return url
     except:
         return
    def searchMovie(self, title, year):
        title = cleantitle.normalize(title)
        url = self.search_link % cleantitle.geturl(title)
        r = self.scraper.get(url, params={'link_web': self.base_link}).content
        r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
        r = zip(client.parseDOM(r, 'a', ret='href'),
                client.parseDOM(r, 'a', ret='title'))
        results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r]
        try:
            r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
            url = [
                i[0] for i in r
                if cleantitle.get(i[1]) == cleantitle.get(title) and (
                    year == i[2])
            ][0]
        except:
            url = None
            log_utils.log('series9 - Exception: \n' +
                          str(traceback.format_exc()))
            pass

        if (url == None):
            url = [
                i[0] for i in results
                if cleantitle.get(i[1]) == cleantitle.get(title)
            ][0]

        url = urlparse.urljoin(self.base_link, '%s/watching.html' % url)
        return url
 def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
     try:
         url = cleantitle.geturl(tvshowtitle)
         url = url.replace('-', '+')
         return url
     except:
         return
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         clean_title = cleantitle.geturl(title)
         url = urlparse.urljoin(self.base_link, (self.search_link % (clean_title, year)))
         return url
     except:
         return
Beispiel #15
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            clean_title = cleantitle.geturl(title)
            search_url = urlparse.urljoin(
                self.base_link, (self.search_link % (clean_title, year)))
            search_results = self.scraper.get(search_url,
                                              headers={
                                                  'referer': self.base_link
                                              }).content

            not_found = dom_parser.parse_dom(search_results, 'div',
                                             {'class': 'not-found'})
            if len(not_found) > 0:
                return

            links = client.parseDOM(search_results,
                                    "a",
                                    ret="href",
                                    attrs={"class": "ml-mask jt"})
            results = []
            for link in links:
                if '%s%s' % (cleantitle.get(title),
                             year) in cleantitle.get(link):
                    results.append(link)
            return results
        except:
            return
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         title = cleantitle.geturl(title).replace('--', '-')
         url = {'title': title, 'year': year}
         return url
     except:
         return
Beispiel #17
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         title = cleantitle.geturl(title)
         url = self.base_link + self.search_movie % title
         return url
     except:
         return
Beispiel #18
0
 def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
     try:
         tvtitle = cleantitle.geturl(tvshowtitle)
         url = self.base_link + self.search_tv % tvtitle
         return url
     except:
         return
Beispiel #19
0
 def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
     try:
         tvshowtitle = cleantitle.geturl(tvshowtitle)
         url = tvshowtitle
         return url
     except:
         return
Beispiel #20
0
    def searchMovie(self, title, year, aliases):
        try:
            title = cleantitle.normalize(title)
            url = urlparse.urljoin(self.base_link,
                                   self.search_link % cleantitle.geturl(title))
            r = self.scraper.get(url).content
            r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
            r = zip(client.parseDOM(r, 'a', ret='href'),
                    client.parseDOM(r, 'a', ret='title'))
            results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r]
            try:
                r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
                url = [
                    i[0] for i in r
                    if self.matchAlias(i[1], aliases) and (year == i[2])
                ][0]
            except:
                url = None
                pass

            if (url == None):
                url = [
                    i[0] for i in results if self.matchAlias(i[1], aliases)
                ][0]

            url = urlparse.urljoin(self.base_link, '%s/watching.html' % url)
            return url
        except:
            return
Beispiel #21
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         mtitle = cleantitle.geturl(title)
         url = self.base_link + '/%s-%s' % (mtitle, year)
         return url
     except:
         return
Beispiel #22
0
	def movie(self, imdb, title, localtitle, aliases, year):
		try:
			mTitle = cleantitle.geturl(title)
			url = self.base_link + '/movie/' + mTitle
			return url
		except:
			return
Beispiel #23
0
	def movie(self, imdb, title, localtitle, aliases, year):
		try:
			title = cleantitle.geturl(title)
			url = self.base_link + '/%s/' % title
			# https://hdpopcorns.eu/avengers-infinity-war/
			return url
		except:
			return
Beispiel #24
0
 def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
     try:
         url = cleantitle.geturl(tvshowtitle)
         return url
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('1putlocker - Exception: \n' + str(failure))
         return
Beispiel #25
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         searchName = cleantitle.getsearch(title)
         searchURL = self.base_link + self.search_link % (
             searchName.replace(':', ' ').replace(' ', '+'))
         searchPage = self.scraper.get(searchURL).content
         results = re.compile(
             '<a href="(.+?)">(.+?)</a>.+?<span class="year">(.+?)</span>',
             re.DOTALL).findall(searchPage)
         for url, zName, zYear in results:
             if cleantitle.geturl(title).lower() in cleantitle.geturl(
                     zName).lower():
                 if year in str(zYear):
                     url = url + "?watching"
                     return url
     except:
         return
Beispiel #26
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         title = cleantitle.geturl(title)
         url = self.base_link + '/%s/' % title
         return url
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('1putlocker - Exception: \n' + str(failure))
         return
Beispiel #27
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         clean_title = cleantitle.geturl(title).replace('-', '+').replace(': ', '+')
         url = urlparse.urljoin(self.base_link, self.search_link % clean_title).lower()
         url = {'url': url, 'title': title, 'year': year}
         url = urllib.urlencode(url)
         return url
     except:
         return
 def searchShow(self, title, season, episode, aliases, headers):
     try:
         for alias in aliases:
             url = '%s/show/%s/season/%01d/episode/%01d' % (
                 self.base_link, cleantitle.geturl(title), int(season), int(episode))
             result = self.scraper.get(url, headers=headers).status
             if not result == 200 and url != self.base_link: break
         return url
     except:
         return
Beispiel #29
0
	def movie(self, imdb, title, localtitle, aliases, year):
		try:
			q = cleantitle.geturl(title)
			q2 = q.replace('-', '+')
			url = self.base_link + self.search_link % q2
			r = self.scraper.get(url).content
			match = re.compile('<div class="title"><a href="(.+?)">' + title + '</a></div>').findall(r)
			for url in match:
				return url
		except:
			return
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         url = urlparse.urljoin(self.base_link, self.search_link % cleantitle.geturl(title).replace('-', '+'))
         r = self.scraper.get(url, cookie='check=2').content
         m = dom_parser.parse_dom(r, 'div', attrs={'class': 'masonry'})
         m = dom_parser.parse_dom(m, 'a', req='href')
         m = [(i.attrs['href']) for i in m if i.content == title]
         if m is not None:
             url = urlparse.urljoin(self.base_link, m[0])
         return url
     except Exception:
         return