Beispiel #1
0
    def movie(self, imdb, title, year):
        self.zen_url = []
        try:

            # print("WATCHCARTOON")
            title = cleantitle_query(title)
            cleanmovie = cleantitle_get(title)
            query = self.search_link % (urllib.quote_plus(title), year)
            query = urlparse.urljoin(self.base_link, query)

            r = OPEN_URL(query, timeout='15')

            html = BeautifulSoup(r.content)
            print("WONLINE BeautifulSoup", html)
            r = html.findAll('div', attrs={'class': 'resultado'})
            print("WONLINE s1", r)
            for u in r:
                r_title = u.findAll('img')[0]['alt']
                r_title = r_title.encode('utf-8')

                r_href = u.findAll('a')[0]['href']
                r_title = r_title.encode('utf-8')

                print("WONLINE MATCHES", r_title, r_href)
                if year in r_title and cleanmovie == cleantitle_get(r_title):
                    if "http:" in r_href: url = replaceHTMLCodes(r_href)
                    print("WONLINE PASSED", url)
                    return url

        except:
            return
Beispiel #2
0
	def movie(self, imdb, title, year):
		self.zen_url = []
		try:
			
			# print("WATCHCARTOON")
			title = cleantitle_query(title)
			cleanmovie = cleantitle_get(title)
			query = self.search_link % (urllib.quote_plus(title),year)
			query = urlparse.urljoin(self.base_link, query)
			
			r = OPEN_URL(query, timeout='15')
			
			html = BeautifulSoup(r.content)
			print ("WONLINE BeautifulSoup", html)
			r = html.findAll('div', attrs={'class': 'resultado'})
			print ("WONLINE s1",  r)
			for u in r:
				r_title = u.findAll('img')[0]['alt']
				r_title = r_title.encode('utf-8')
			
				r_href = u.findAll('a')[0]['href']
				r_title = r_title.encode('utf-8')
				
				
				print("WONLINE MATCHES", r_title,r_href)
				if year in r_title and cleanmovie == cleantitle_get(r_title): 
					if "http:" in r_href: url = replaceHTMLCodes(r_href)
					print("WONLINE PASSED", url)
					return url
					
			
		except:
			return
Beispiel #3
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:

            if url == None: return

            link = OPEN_URL(url, timeout='10').content
            html = BeautifulSoup(link)
            r = html.findAll('div', attrs={'class': 'play-btn'})

            for result in r:
                href = result.findAll('a')[0]['href'].encode('utf-8')
                try:
                    host = re.findall(
                        '([\w]+[.][\w]+)$',
                        urlparse.urlparse(href.strip().lower()).netloc)[0]
                except:
                    host = 'none'
                quality = 'SD'

                url = replaceHTMLCodes(href)
                url = url.encode('utf-8')
                if host in hostDict:
                    sources.append({
                        'source': host,
                        'quality': quality,
                        'provider': 'Wso',
                        'url': href,
                        'direct': False,
                        'debridonly': False
                    })

            return sources
        except:
            return sources
Beispiel #4
0
	def sources(self, url, hostDict, hostprDict):
		sources = []
		try:
			
			if url == None: return

					
			link = OPEN_URL(url, timeout='10').content
			html = BeautifulSoup(link)
			r = html.findAll('div', attrs={'class': 'play-btn'})

			for result in r:
				href = result.findAll('a')[0]['href'].encode('utf-8')
				try:host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(href.strip().lower()).netloc)[0]
				except: host = 'none'
				quality = 'SD'

				url = replaceHTMLCodes(href)
				url = url.encode('utf-8')
				if host in hostDict: sources.append({'source': host, 'quality':quality, 'provider': 'Wso', 'url': href, 'direct': False, 'debridonly': False})




			return sources
		except:
			return sources
Beispiel #5
0
	def sources(self, url, hostDict, hostprDict):
		sources = []
		try:
			
			if url == None: return
			try:
					
				link = OPEN_URL(url, timeout='10')
				print("Watchfilm link", link.content)
				html = link.content
					
				r = re.compile('<a href="(.+?)" target="streamplayer">').findall(html)
				for result in r:
					print("Watchfilm SOURCES", result)
					result = result.encode('utf-8')
					
					if result.startswith("//"): result = "http:" + result
						
					if "player.watchfilm.to" in result:
						try:
							s = OPEN_URL(result, timeout='10')
							s = s.content
							match = re.compile('file:\s*"(.+?)",label:"(.+?)",').findall(s)
							for href, quality in match:
								quality = google_tag(href)
								print("WONLINE SCRIPTS", href,quality)
								sources.append({'source': 'gvideo', 'quality':quality, 'provider': 'Watchfilm', 'url': href, 'direct': True, 'debridonly': False})
						except:
							pass
						try:
							s = OPEN_URL(result, timeout='10')
							s = s.content

							match = re.compile('var ff =\s*"(.+?)";').findall(s)
							for href in match:
								
								quality = "SD"
								try:host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(href.strip().lower()).netloc)[0]
							
								except: host = 'none'
								url = replaceHTMLCodes(href)
								url = url.encode('utf-8')
								if host in hostDict: sources.append({'source': host, 'quality':quality, 'provider': 'Watchfilm', 'url': href, 'direct': False, 'debridonly': False})
						except:
							pass

			except:
				pass

			return sources
		except:
			return sources
Beispiel #6
0
	def sources(self, url, hostDict, hostprDict):
		sources = []
		try:
			
			if url == None: return
			try:
					
				link = OPEN_URL(url, timeout='10')
				html = BeautifulSoup(link.content)
					
				r = html.findAll('iframe')
				for u in r:
					result = u['src'].encode('utf-8')
					print("WONLINE sources", result)
					if result.startswith("//"): result = "http:" + result
						
					if "wp-embed.php" in result:
						
						s = OPEN_URL(result, timeout='10')
						s = s.content
						match = re.compile('file:\s*"(.+?)",label:"(.+?)",').findall(s)
						for href, quality in match:
							quality = google_tag(href)
							print("WONLINE SCRIPTS", href,quality)
							sources.append({'source': 'gvideo', 'quality':quality, 'provider': 'Wonline', 'url': href, 'direct': True, 'debridonly': False})
					else:
							if "google" in result: quality = google_tag(result)
							else: quality = quality_tag(result)
							try:host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(result.strip().lower()).netloc)[0]
						
							except: host = 'none'
							url = replaceHTMLCodes(result)
							url = url.encode('utf-8')
							if host in hostDict: sources.append({'source': host, 'quality':quality, 'provider': 'Wonline', 'url': url, 'direct': False, 'debridonly': False})

			except:
				pass

			return sources
		except:
			return sources
Beispiel #7
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            # for url in self.elysium_url:
            if url == None: return
            print("PROJECTFREETV SOURCE", url)
            r = OPEN_URL(url).content
            links = client.parseDOM(r, 'tr')
            for i in links:
                try:
                    print("PROJECTFREETV SOURCE r2", i)
                    url = re.findall('callvalue\((.+?)\)', i)[0]
                    url = re.findall('(http.+?)(?:\'|\")', url)[0]
                    url = replaceHTMLCodes(url)
                    url = url.encode('utf-8')
                    print("PROJECTFREETV SOURCE r3", url)

                    host = re.findall(
                        '([\w]+[.][\w]+)$',
                        urlparse.urlparse(url.strip().lower()).netloc)[0]
                    host = host.encode('utf-8')

                    if not host in hostDict: raise Exception()

                    sources.append({
                        'source': host,
                        'quality': 'SD',
                        'provider': 'Projectfree',
                        'url': url,
                        'direct': False,
                        'debridonly': False
                    })
                except:
                    pass

            return sources
        except:
            return sources
Beispiel #8
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:

            if url == None: return
            try:

                link = OPEN_URL(url, timeout='10')
                html = BeautifulSoup(link.content)

                r = html.findAll('iframe')
                for u in r:
                    result = u['src'].encode('utf-8')
                    print("WONLINE sources", result)
                    if result.startswith("//"): result = "http:" + result

                    if "wp-embed.php" in result:

                        s = OPEN_URL(result, timeout='10')
                        s = s.content
                        match = re.compile(
                            'file:\s*"(.+?)",label:"(.+?)",').findall(s)
                        for href, quality in match:
                            quality = google_tag(href)
                            print("WONLINE SCRIPTS", href, quality)
                            sources.append({
                                'source': 'gvideo',
                                'quality': quality,
                                'provider': 'Wonline',
                                'url': href,
                                'direct': True,
                                'debridonly': False
                            })
                    else:
                        if "google" in result: quality = google_tag(result)
                        else: quality = quality_tag(result)
                        try:
                            host = re.findall(
                                '([\w]+[.][\w]+)$',
                                urlparse.urlparse(
                                    result.strip().lower()).netloc)[0]

                        except:
                            host = 'none'
                        url = replaceHTMLCodes(result)
                        url = url.encode('utf-8')
                        if host in hostDict:
                            sources.append({
                                'source': host,
                                'quality': quality,
                                'provider': 'Wonline',
                                'url': url,
                                'direct': False,
                                'debridonly': False
                            })

            except:
                pass

            return sources
        except:
            return sources
Beispiel #9
0
	def sources(self, url, hostDict, hostprDict):
		sources = []
		try:
			
			if url == None: return
			try:
					
				link = OPEN_URL(url, timeout='10')
				html = BeautifulSoup(link.content)
					
				r = html.findAll('iframe')
				for u in r:
					src = u['src'].encode('utf-8')
					print("WONLINE sources", src)
					if src.startswith("//"): src = "http:" + src
						
					if "wp-embed.php" in src or "player.123movies" in src:
						try:
							s = OPEN_URL(src).content
							
							match = get_sources(s)
							for h in match:
								files = get_files(h)
								for href in files:
									href = href.replace('\\','')
									quality = google_tag(href)
									
									sources.append({'source': 'gvideo', 'quality':quality, 'provider': 'Wonline', 'url': href, 'direct': True, 'debridonly': False})
						except:
							pass
							
							
					elif "raptu.com" in src:
						try:
							s = OPEN_URL(src).content
							
							match = get_sources(s)
							for h in match:
								files = re.compile('"file":"(.+?)","label":"(.+?)",').findall(h)
								for href, q in files:
									href = href.replace('\\','')
									quality = quality_tag(q)
									
									sources.append({'source': 'gvideo', 'quality':quality, 'provider': 'Wonline', 'url': href, 'direct': True, 'debridonly': False})
						except:
							pass
							
					else:
							if "google" in src: quality = google_tag(src)
							else: quality = quality_tag(src)
							try:host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(src.strip().lower()).netloc)[0]
						
							except: host = 'none'
							url = replaceHTMLCodes(src)
							url = url.encode('utf-8')
							if host in hostDict: sources.append({'source': host, 'quality':quality, 'provider': 'Wonline', 'url': url, 'direct': False, 'debridonly': False})

			except:
				pass

			return sources
		except:
			return sources
Beispiel #10
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:

            if url == None: return
            try:

                link = OPEN_URL(url, timeout='10')
                print("Watchfilm link", link.content)
                html = link.content

                r = re.compile(
                    '<a href="(.+?)" target="streamplayer">').findall(html)
                for result in r:
                    print("Watchfilm SOURCES", result)
                    result = result.encode('utf-8')

                    if result.startswith("//"): result = "http:" + result

                    if "player.watchfilm.to" in result:
                        try:
                            s = OPEN_URL(result, timeout='10')
                            s = s.content
                            match = re.compile(
                                'file:\s*"(.+?)",label:"(.+?)",').findall(s)
                            for href, quality in match:
                                quality = google_tag(href)
                                print("WONLINE SCRIPTS", href, quality)
                                sources.append({
                                    'source': 'gvideo',
                                    'quality': quality,
                                    'provider': 'Watchfilm',
                                    'url': href,
                                    'direct': True,
                                    'debridonly': False
                                })
                        except:
                            pass
                        try:
                            s = OPEN_URL(result, timeout='10')
                            s = s.content

                            match = re.compile('var ff =\s*"(.+?)";').findall(
                                s)
                            for href in match:

                                quality = "SD"
                                try:
                                    host = re.findall(
                                        '([\w]+[.][\w]+)$',
                                        urlparse.urlparse(
                                            href.strip().lower()).netloc)[0]

                                except:
                                    host = 'none'
                                url = replaceHTMLCodes(href)
                                url = url.encode('utf-8')
                                if host in hostDict:
                                    sources.append({
                                        'source': host,
                                        'quality': quality,
                                        'provider': 'Watchfilm',
                                        'url': href,
                                        'direct': False,
                                        'debridonly': False
                                    })
                        except:
                            pass

            except:
                pass

            return sources
        except:
            return sources