def searchMovie(self, title, year, aliases, headers):
     try:
         for alias in aliases:
             url = '%s/film/%s' % (self.base_link,
                                   cleantitle.geturl(alias['title']))
             url = client.request(url,
                                  headers=headers,
                                  output='geturl',
                                  timeout='10')
             if not url is None and url != self.base_link:
                 break
         if url is None:
             for alias in aliases:
                 url = '%s/film/%s-%s' % (self.base_link,
                                          cleantitle.geturl(
                                              alias['title']), year)
                 url = client.request(url,
                                      headers=headers,
                                      output='geturl',
                                      timeout='10')
                 if not url is None and url != self.base_link:
                     break
         return url
     except:
         return
Esempio n. 2
0
 def searchMovie(self, title, year, aliases, headers):
     try:
         for alias in aliases:
             url = '%s/movie/%s' % (self.base_link,
                                    cleantitle.geturl(alias['title']))
             url = client.request(url,
                                  headers=headers,
                                  output='geturl',
                                  timeout='10')
             if not url is None and url != self.base_link:
                 break
         if url is None:
             for alias in aliases:
                 url = '%s/movie/%s-%s' % (self.base_link,
                                           cleantitle.geturl(
                                               alias['title']), year)
                 url = client.request(url,
                                      headers=headers,
                                      output='geturl',
                                      timeout='10')
                 if not url is None and url != self.base_link:
                     break
         return url
     except:
         source_utils.scraper_error('SHOWBOX')
         return
Esempio n. 3
0
    def sources_packs(self,
                      url,
                      hostDict,
                      hostprDict,
                      search_series=False,
                      total_seasons=None,
                      bypass_filter=False):
        self.sources = []
        try:
            self.search_series = search_series
            self.total_seasons = total_seasons
            self.bypass_filter = bypass_filter

            if url is None:
                return self.sources
            if debrid.status() is False:
                return self.sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            self.title = data['tvshowtitle'].replace('&', 'and').replace(
                'Special Victims Unit', 'SVU')
            self.aliases = data['aliases']
            self.imdb = data['imdb']
            self.year = data['year']
            self.season_x = data['season']
            self.season_xx = self.season_x.zfill(2)

            query = re.sub('[^A-Za-z0-9\s\.-]+', '', self.title)
            queries = [
                self.search_link.format(
                    query[0].lower(),
                    cleantitle.geturl(query + ' S%s' % self.season_xx)),
                self.search_link.format(
                    query[0].lower(),
                    cleantitle.geturl(query + ' Season %s' % self.season_x))
            ]
            if search_series:
                queries = [
                    self.search_link.format(
                        query[0].lower(),
                        cleantitle.geturl(query + ' Season')),
                    self.search_link.format(
                        query[0].lower(),
                        cleantitle.geturl(query + ' Complete'))
                ]

            threads = []
            for url in queries:
                link = urljoin(self.base_link, url)
                threads.append(workers.Thread(self.get_sources_packs, link))
            [i.start() for i in threads]
            [i.join() for i in threads]
            return self.sources
        except:
            source_utils.scraper_error('MAGNETDL')
            return self.sources
Esempio n. 4
0
	def sources(self, url, hostDict, hostprDict):
		try:
			sources = []
			if url is None:
				return sources
			data = urlparse.parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
			aliases = eval(data['aliases'])
			headers = {}
			if 'tvshowtitle' in data:
				ep = data['episode']
				url = '%s/film/%s-season-%01d/watching.html?ep=%s' % (
					self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), ep)
				r = client.request(url, headers=headers, timeout='10', output='geturl')
				if url is None:
					url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers)
			else:
				url = self.searchMovie(data['title'], data['year'], aliases, headers)
				if url is None:
					url = '%s/film/%s/watching.html?ep=0' % (self.base_link, cleantitle.geturl(data['title']))
			if url is None:
				raise Exception()
			r = client.request(url, headers=headers, timeout='10')
			r = client.parseDOM(r, 'div', attrs={'class': 'les-content'})
			if 'tvshowtitle' in data:
				ep = data['episode']
				links = client.parseDOM(r, 'a', attrs={'episode-data': ep}, ret='player-data')
			else:
				links = client.parseDOM(r, 'a', ret='player-data')
			for link in links:
				if '123movieshd' in link or 'seriesonline' in link:
					r = client.request(link, headers=headers, timeout='10')
					r = re.findall('(https:.*?redirector.*?)[\'\"]', r)
					for i in r:
						try:
							sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'],
							                'language': 'en', 'url': i, 'direct': True, 'debridonly': False})
						except:
							pass
				else:
					try:
						host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(link.strip().lower()).netloc)[0]
						if not host in hostDict:
							raise Exception()
						host = client.replaceHTMLCodes(host)
						host = host.encode('utf-8')
						sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': link, 'direct': False,
						                'debridonly': False})
					except:
						pass
			return sources
		except:
			return sources
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         title = cleantitle.geturl(title).replace('-', '+')
         u = self.base_link + self.search_link % title
         u = self.scraper.get(u).content
         i = client.parseDOM(u, "div", attrs={"class": "movies-list"})
         for r in i:
             r = re.compile('<a href="(.+?)"').findall(r)
             for url in r:
                 title = cleantitle.geturl(title).replace("+", "-")
                 if title not in url:
                     continue
                 return url
     except:
         return
Esempio n. 6
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         title = cleantitle.geturl(title).replace('--', '-')
         url = {'title': title, 'year': year}
         return url
     except:
         return
 def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
     try:
         clean_title = cleantitle.geturl(tvshowtitle)
         url = clean_title
         return url
     except:
         return
Esempio n. 8
0
	def movie(self, imdb, title, localtitle, aliases, year):
		try:
			movietitle = cleantitle.geturl(title)
			url = self.base_link + self.movie_link % movietitle + '/watching.html'
			return url
		except:
			return
Esempio n. 9
0
	def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
		try:
			url = cleantitle.geturl(tvshowtitle)
			return url
		except:
			source_utils.scraper_error('TOONGET')
			return
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         title = cleantitle.geturl(title)
         url = self.base_link + '/movie/%s-%s/watching.html' % (title, imdb)
         return url
     except:
         return
Esempio n. 11
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if url is None:
             return sources
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         q = '%s' % cleantitle.geturl(data['title'])
         url = self.base_link + self.search_link % q.replace('-', '+')
         r = self.scraper.get(url).content
         v = re.compile(
             '<a href="(.+?)" class="ml-mask jt" title="(.+?)">\n<span class=".+?">(.+?)</span>'
         ).findall(r)
         for url, check, quality in v:
             t = '%s (%s)' % (data['title'], data['year'])
             if t not in check: raise Exception()
             r = self.scraper.get(url + '/watch.html').content
             url = re.compile('<iframe.+?src="(.+?)"').findall(r)[0]
             quality = source_utils.check_url(quality)
             valid, host = source_utils.is_host_valid(url, hostDict)
             if valid:
                 sources.append({
                     'source': host,
                     'quality': quality,
                     'language': 'en',
                     'url': url,
                     'direct': False,
                     'debridonly': False
                 })
         return sources
     except BaseException:
         return sources
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if url == None: return
         url = urlparse.parse_qs(url)
         url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
         url['premiered'], url['season'], url['episode'] = premiered, season, episode
         try:
             clean_title = cleantitle.geturl(url['tvshowtitle']) + '-season-%d' % int(season)
             search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
             r = self.scraper.get(search_url).content
             r = dom_parser2.parse_dom(r, 'li', {'class': 'item'})
             r = [(dom_parser2.parse_dom(i, 'a', attrs={'class': 'title'}),
                   dom_parser2.parse_dom(i, 'div', attrs={'class': 'status'})[0]) for i in r if i]
             r = [(i[0][0].attrs['href'], re.findall('(.+?)</b><br', i[0][0].content, re.DOTALL)[0],
                   re.findall('(\d+)', i[1].content)[0]) for i in r if i]
             r = [(i[0], i[1].split(':')[0], i[2]) for i in r
                  if (cleantitle.get(i[1].split(':')[0]) == cleantitle.get(url['tvshowtitle']) and i[2] == str(
                     int(season)))]
             url = r[0][0]
         except:
             pass
         data = self.scraper.get(url).content
         data = client.parseDOM(data, 'div', attrs={'id': 'details'})
         data = zip(client.parseDOM(data, 'a'), client.parseDOM(data, 'a', ret='href'))
         url = [(i[0], i[1]) for i in data if i[0] == str(int(episode))]
         return url[0][1]
     except:
         return
Esempio n. 13
0
 def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
     try:
         url = cleantitle.geturl(tvshowtitle).replace('--', '-')
         return url
     except:
         source_utils.scraper_error('YESMOVIESGG')
         return
Esempio n. 14
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         mtitle = cleantitle.geturl(title)
         url = self.base_link + '/movie/%s-%s' % (mtitle, year)
         return url
     except:
         return
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url is None:
                return
            url = urlparse.parse_qs(url)
            url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
            clean_title = cleantitle.geturl(url['tvshowtitle']) + '+s%02d' % int(season)
            search_url = urlparse.urljoin(self.base_link, (self.search_link % (clean_title.replace('-', '+'),
                                                                               url['year'])))
            search_results = self.scraper.get(search_url, headers={'referer': self.base_link}).content

            not_found = dom_parser.parse_dom(search_results, 'div', {'class': 'not-found'})
            if len(not_found) > 0:
                return

            links = client.parseDOM(search_results, "a", ret="href", attrs={"class": "ml-mask jt"})
            results = []
            for link in links:
                if '%ss%02d' % (cleantitle.get(url['tvshowtitle']), int(season)) in cleantitle.get(link):
                    link_results = self.scraper.get(link, headers={'referer': search_url}).content
                    r2 = dom_parser.parse_dom(link_results, 'div', {'id': 'ip_episode'})
                    r3 = [dom_parser.parse_dom(i, 'a', req=['href']) for i in r2 if i]
                    for i in r3[0]:
                        if i.content == 'Episode %s' % episode:
                            results.append(i.attrs['href'])
            return results
        except:
            return
Esempio n. 16
0
	def movie(self, imdb, title, localtitle, aliases, year):
		try:
			clean_title = cleantitle.geturl(title)
			url = urlparse.urljoin(self.base_link, (self.search_link % (clean_title, year)))
			return url
		except:
			return
 def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
     try:
         tvtitle = cleantitle.geturl(tvshowtitle)
         url = self.base_link + self.search_tv % tvtitle
         return url
     except:
         return
Esempio n. 18
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url is None: return
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            tvshowtitle = data['tvshowtitle']
            year = data['year']

            query = '%s+s%02de%02d' % (cleantitle.geturl(tvshowtitle).replace(
                '-', '+'), int(season), int(episode))
            url2 = urlparse.urljoin(self.base_link, self.search_link % (query))
            url = urlparse.parse_qs(url)
            url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
            url = {
                'imdb': imdb,
                'title': title,
                'year': year,
                'url': url2,
                'content': 'episdoe',
                'tvshowtitle': tvshowtitle,
                'season': season,
                'episode': episode,
                'premiered': premiered
            }
            url = urllib.urlencode(url)
            return url
        except:
            return
Esempio n. 19
0
 def searchMovie(self, title, year, aliases, headers):
     try:
         title = cleantitle.normalize(title)
         url = urljoin(self.base_link,
                       self.search_link % cleantitle.geturl(title))
         r = self.scraper.get(url).content
         r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
         r = zip(client.parseDOM(r, 'a', ret='href'),
                 client.parseDOM(r, 'a', ret='oldtitle'))
         results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r]
         try:
             r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
             url = [
                 i[0] for i in r
                 if self.matchAlias(i[1], aliases) and (year == i[2])
             ][0]
         except:
             url = None
             pass
         if url is None:
             try:
                 url = [
                     i[0] for i in results
                     if self.matchAlias(i[1], aliases)
                 ][0]
             except:
                 return
         url = urljoin(self.base_link, '%s/watching.html' % url)
         return url
     except:
         source_utils.scraper_error('SERIES9')
         return
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return

            url = urlparse.parse_qs(url)
            url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
            url['premiered'], url['season'], url[
                'episode'] = premiered, season, episode
            try:
                clean_title = cleantitle.geturl(
                    url['tvshowtitle']) + '-season-%d' % int(season)
                search_url = urlparse.urljoin(
                    self.base_link,
                    self.search_link % clean_title.replace('-', '+'))
                r = self.scraper.get(search_url).content
                r = client.parseDOM(r, 'div', {'id': 'movie-featured'})
                r = [(client.parseDOM(i, 'a', ret='href'),
                      re.findall('<b><i>(.+?)</i>', i)) for i in r]
                r = [(i[0][0], i[1][0]) for i in r
                     if cleantitle.get(i[1][0]) == cleantitle.get(clean_title)]
                url = r[0][0]
            except:
                pass
            data = client.request(url)
            data = client.parseDOM(data, 'div', attrs={'id': 'details'})
            data = zip(client.parseDOM(data, 'a'),
                       client.parseDOM(data, 'a', ret='href'))
            url = [(i[0], i[1]) for i in data if i[0] == str(int(episode))]

            return url[0][1]
        except:
            return
 def searchMovie(self, title, year, aliases, headers):
     try:
         title = cleantitle.normalize(title)
         url = urlparse.urljoin(self.base_link,
                                self.search_link % cleantitle.geturl(title))
         r = client.request(url, headers=headers, timeout='10')
         r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
         r = zip(client.parseDOM(r, 'a', ret='href'),
                 client.parseDOM(r, 'a', ret='oldtitle'))
         results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r]
         try:
             r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
             url = [
                 i[0] for i in r
                 if self.matchAlias(i[1], aliases) and (year == i[2])
             ][0]
         except:
             url = None
             pass
         if (url == None):
             url = [
                 i[0] for i in results if self.matchAlias(i[1], aliases)
             ][0]
         url = urlparse.urljoin(self.base_link, '%s/watching.html' % url)
         return url
     except:
         return
Esempio n. 22
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         title = cleantitle.geturl(title)
         url = self.base_link + '/%s/' % title
         return url
     except:
         return
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            clean_title = cleantitle.geturl(title)
            search_url = urlparse.urljoin(
                self.base_link, (self.search_link % (clean_title, year)))
            search_results = self.scraper.get(search_url,
                                              headers={
                                                  'referer': self.base_link
                                              }).content

            not_found = dom_parser.parse_dom(search_results, 'div',
                                             {'class': 'not-found'})
            if len(not_found) > 0:
                return

            links = client.parseDOM(search_results,
                                    "a",
                                    ret="href",
                                    attrs={"class": "ml-mask jt"})
            results = []
            for link in links:
                if '%s%s' % (cleantitle.get(title),
                             year) in cleantitle.get(link):
                    results.append(link)
            return results
        except:
            return
Esempio n. 24
0
	def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
		try:
			url = cleantitle.geturl(tvshowtitle)
			url = url.replace('-','+')
			return url
		except:
			return
	def movie(self, imdb, title, localtitle, aliases, year):
		try:
			title = cleantitle.geturl(title).replace('--', '-')
			url = self.base_link + self.search_link % title
			return url
		except:
			return
Esempio n. 26
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         hostDict = hostprDict + hostDict
         if url == None:
             return sources
         h = {'User-Agent': client.randomagent()}
         title = cleantitle.geturl(url['title']).replace('-', '+')
         url = urlparse.urljoin(self.base_link, self.search_link % title)
         r = self.scraper.get(url, headers=h)
         r = BeautifulSoup(r.text,
                           'html.parser').find('div', {'class': 'item'})
         r = r.find('a')['href']
         r = self.scraper.get(r, headers=h)
         r = BeautifulSoup(r.content, 'html.parser')
         quality = r.find('span', {'class': 'calidad2'}).text
         url = r.find('div', {'class': 'movieplay'}).find('iframe')['src']
         if not quality in ['1080p', '720p']:
             quality = 'SD'
         valid, host = source_utils.is_host_valid(url, hostDict)
         if valid:
             sources.append({
                 'source': host,
                 'quality': quality,
                 'language': 'en',
                 'url': url,
                 'direct': False,
                 'debridonly': False
             })
         return sources
     except:
         return sources
Esempio n. 27
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         title = cleantitle.geturl(title)
         url = self.base_link + self.search_link % (title, year)
         return url
     except:
         return
 def searchMovie(self, title, year, aliases, headers):
     try:
         for alias in aliases:
             url = '%s/film/%s' % (self.base_link,
                                   cleantitle.geturl(alias['title']))
             result = self.scraper.get(url, headers=headers).status_code
             if not result == 200 and url != self.base_link: break
         if url is None:
             for alias in aliases:
                 url = '%s/film/%s-%s' % (self.base_link,
                                          cleantitle.geturl(
                                              alias['title']), year)
                 result = self.scraper.get(url, headers=headers).status_code
                 if not result == 200 and url != self.base_link: break
         return url
     except:
         return
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         title = cleantitle.geturl(title)
         url = self.base_link + '/%s/' % title
         # https://hdpopcorns.eu/avengers-infinity-war/
         return url
     except:
         return
Esempio n. 30
0
 def movie(self, imdb, title, localtitle, aliases, year):
     if 1:  # try:
         clean_title = cleantitle.geturl(title).replace('-', '%20')
         url = urlparse.urljoin(
             self.base_link,
             (self.search_link % (clean_title)
              )) + '$$$$$' + title + '$$$$$' + year + '$$$$$' + 'movie'
         return url