예제 #1
0
	def sources(self, url, hostDict, hostprDict):
		try:
			sources = []

			if url == None: return sources

			url = urlparse.urljoin(self.base_link, url)

			r = client.request(url)

			quality = client.parseDOM(r, 'div', attrs = {'class': 'poster-qulabel'})[0]
			quality = quality.lower()
			if 'cam' in quality or 'ts' in quality: raise Exception()


			ref = client.parseDOM(r, 'iframe', ret='src')[0]

			r = client.request(ref, referer=url)

			s = re.compile('<script>(.+?)</script>', re.DOTALL).findall(r)

			for i in s:
				try: r += jsunpack.unpack(i)
				except: pass

			streams = client.parseDOM(r, 'source', ret='src')

			links = client.parseDOM(r, 'li', ret='onclick')
			links = [re.findall('\'(.+?\d+)', i) for i in links]
			links = [i[0] for i in links if i]

			for link in links:
				try:
					link = ref + "?source=%s" % link

					r = client.request(link, referer=link)
					s = re.compile('<script>(.+?)</script>', re.DOTALL).findall(r)
					for i in s:
						try: r += jsunpack.unpack(i)
						except: pass

					streams += client.parseDOM(r, 'source', ret='src')
				except:
					pass

			streams = [x for y,x in enumerate(streams) if x not in streams[:y]]

			for i in streams:
				try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False})
				except: pass

			return sources
		except:
			return sources
예제 #2
0
def run(e):
    try:
        e = re.findall(r'JuicyCodes.Run\(([^\)]+)', e, re.IGNORECASE)[0]
        e = re.sub(r'\"\s*\+\s*\"','', e)
        e = re.sub(r'[^A-Za-z0-9+\\/=]','', e)
    except:
        return None

    t = ""
    n=r=i=s=o=u=a=f=0

    while f < len(e):
        try:
            s = Juice.index(e[f]);f+=1; 
            o = Juice.index(e[f]);f+=1; 
            u = Juice.index(e[f]);f+=1; 
            a = Juice.index(e[f]);f+=1; 
            n = s << 2 | o >> 4; r = (15 & o) << 4 | u >> 2; i = (3 & u) << 6 | a
            t += chr(n)
            if 64 != u: t += chr(r)
            if 64 != a: t += chr(i)
        except:
            continue
        pass

    try:
        t = jsunpack.unpack(t)
        t = unicode(t, 'utf-8')
    except:
        t = None
    
    return t
예제 #3
0
    def resolve(self, data):
        try:
            hostURL = None
            DELAY_PER_REQUEST = 1000 # In milliseconds.

            startTime = datetime.now()
            session = self._createSession(data['UA'], data['cookies'], data['referer'])
            r = self._sessionGET(data['pageURL'], session, allowRedirects=False)
            if r.ok:
                if 'Location' in r.headers:
                    hostURL = r.headers['Location'] # For most hosts they redirect.
                else:
                    # On rare cases they JS-pack the host link in the page source.
                    try:
                        hostURL = re.search(r'''go\(\\['"](.*?)\\['"]\);''', jsunpack.unpack(r.text)).group(1)
                    except:
                        pass # Or sometimes their page is just broken.

            # Do a little delay, if necessary, between resolve() calls.
            elapsed = int((datetime.now() - startTime).total_seconds() * 1000)
            if elapsed < DELAY_PER_REQUEST:
                xbmc.sleep(max(DELAY_PER_REQUEST - elapsed, 100))

            return hostURL
        except:
            self._logException()
            return None
예제 #4
0
    def resolve(self, data):
        try:
            hostURL = None
            DELAY_PER_REQUEST = 1000 # In milliseconds.

            startTime = datetime.now()
            session = self._createSession(data['UA'], data['cookies'], data['referer'])
            r = self._sessionGET(data['pageURL'], session, allowRedirects=False)
            if r.ok:
                if 'Location' in r.headers:
                    hostURL = r.headers['Location'] 
                else:
                   
                    try:
                        hostURL = re.search(r'''go\(\\['"](.*?)\\['"]\);''', jsunpack.unpack(r.text)).group(1)
                    except:
                        pass
            elapsed = int((datetime.now() - startTime).total_seconds() * 1000)
            if elapsed < DELAY_PER_REQUEST:
                xbmc.sleep(max(DELAY_PER_REQUEST - elapsed, 100))

            return hostURL
        except:
            self._logException()
            return None
예제 #5
0
    def __token(self, dic):
        '''
        Takes a dictionary containing id, update, server, and ts, then returns
        a token which is used by info_path to retrieve grabber api
        information

        Thanks to coder-alpha for the updated bitshifting obfuscation
        https://github.com/coder-alpha

        Keyword arguments:

        d -- dictionary - containing id, update, ts, server

        Returns:

        token -- integer - a unique integer
        '''
        def bitshifthex(t, e):
            i = 0
            n = 0

            for i in range(0, max(len(t), len(e))):
        		if i < len(e):
        			n += ord(e[i])
        		if i < len(t):
        			n += ord(t[i])

            h = format(int(hex(n),16),'x')
            return h

        def bitshiftadd(t):
            i = 0

            for e in range(0, len(t)):
            	i += ord(t[e]) + e

            return i

        try:
            url = urlparse.urljoin(self.base_link, self.js_path)
            response = client.request(url)

            unpacked = jsunpack.unpack(response)

            phrase = 'function\(t,\s*i,\s*n\)\s*{\s*"use strict";\s*function e\(\)\s*{\s*return (.*?)\s*}\s*function r\(t\)'

            seed_var = re.findall(r'%s' % phrase, unpacked)[0]
            seed = re.findall(r'%s=.*?\"(.*?)\"' % seed_var, unpacked)[0]

            token = bitshiftadd(seed)

            for i in dic:
                token += bitshiftadd(bitshifthex(seed + i, dic[i]))

            return str(token)

        except Exception:
            return
예제 #6
0
def resolve(url):
    try:
        page = re.compile('//(.+?)/(?:embed|v)/([0-9a-zA-Z-_]+)').findall(url)[0]
        page = 'http://%s/embed/%s' % (page[0], page[1])

        try: referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0]
        except: referer = page

        result = client.request(page, referer=referer)
        
        unpacked = ''
        packed = result.split('\n')
        
        for i in packed: 
            try: unpacked += jsunpack.unpack(i)
            except: pass
        result += unpacked
        result = urllib.unquote_plus(result)
        
        result = re.sub('\s\s+', ' ', result)
        url = client.parseDOM(result, 'iframe', ret='src')[-1]
        url = url.replace(' ', '').split("'")[0]
        ch = re.compile('ch=""(.+?)""').findall(str(result))
        ch = ch[0].replace(' ','')
        sw = re.compile(" sw='(.+?)'").findall(str(result))
        url = url+'/'+ch+'/'+sw[0]
       
        result = client.request(url, referer=referer)
        file = re.compile("'file'.+?'(.+?)'").findall(result)[0]
        print file
        try:
            if not file.startswith('http'): raise Exception()
            url = client.request(file, output='geturl')
            print url
            if not '.m3u8' in url: raise Exception()
            url += '|%s' % urllib.urlencode({'User-Agent': client.agent(), 'Referer': file})
            return url
            
        except:
            pass

        strm = re.compile("'streamer'.+?'(.+?)'").findall(result)[0]
        swf = re.compile("SWFObject\('(.+?)'").findall(result)[0]
        
        url = '%s playpath=%s swfUrl=%s pageUrl=%s live=1 timeout=30' % (strm, file, swf, url)
        return url
    except:
        return
예제 #7
0
    def sources(self, url, hostDict, hostprDict):
        try:
			sources = []
			alt_links = []
			play_links = []
			link = client.request(url)
			film_quality = re.findall('<div class="poster-qulabel">(.*?)</div>',link)[0]
			if "1080" in film_quality: quality = "1080p"
			elif "720" in film_quality: quality = "HD"
			else: quality = "SD"
			# print ("MOVIEGO SOURCES", quality)
			r = BeautifulSoup(link)
			r = r.findAll('iframe')
			
			try:
				for u in r:
					iframe = u['src'].encode('utf-8')
					if '/play/' in iframe:
						# print ("MOVIEGO IFRAME", iframe)
						videourl = client.request(iframe)
						s = BeautifulSoup(videourl)
						s = s.findAll('script')
						unpacked_script = ""
						for scripts in s:
							try: unpacked_script += jsunpack.unpack(scripts.text)
							except:pass
							links = get_video(unpacked_script)
							for url in links:
								# print ("MOVIEGO pack", url)
								try:sources.append({'source': 'gvideo', 'quality': google_tag(url), 'provider': 'Moviego', 'url': url, 'direct': True, 'debridonly': False})
								except:pass
					else:
						try:
							
							host = get_host(iframe)
							if host in hostDict: sources.append({'source': host, 'quality': quality, 'provider': 'Moviego', 'url': iframe, 'direct': True, 'debridonly': False})
						except:
							pass

			except:
				pass

				

			
			return sources
        except:
            return sources
예제 #8
0
    def __token(self, dic):

        def bitshifthex(t, e):
            i = 0
            n = 0

            for i in range(0, max(len(t), len(e))):
        		if i < len(e):
        			n += ord(e[i])
        		if i < len(t):
        			n += ord(t[i])

            h = format(int(hex(n),16),'x')
            return h

        def bitshiftadd(t):
            i = 0

            for e in range(0, len(t)):
            	i += ord(t[e]) + e

            return i

        try:
            url = urlparse.urljoin(self.base_link, self.js_path)
            response = client.request(url)

            unpacked = jsunpack.unpack(response)

            phrase = 'function\(t,\s*i,\s*n\)\s*{\s*"use strict";\s*function e\(\)\s*{\s*return (.*?)\s*}\s*function r\(t\)'

            seed_var = re.findall(r'%s' % phrase, unpacked)[0]
            seed = re.findall(r'%s=.*?\"(.*?)\"' % seed_var, unpacked)[0]

            token = bitshiftadd(seed)

            for i in dic:
                token += bitshiftadd(bitshifthex(seed + i, dic[i]))

            return str(token)

        except Exception:
            return
예제 #9
0
    def sources(self, url, hostDict, hostprDict):
        try:
			sources = []
			for movielink,referer in self.url:
				try:
					# print("CMOVIES SOURCE LINKS", movielink)
					if len(sources) > 2: break
					referer = referer
					pages = client.request(movielink, timeout='3')
					scripts = re.findall('<script src="(.*?)">', pages)
					# print("CMOVIES SERVER SCRIPT", scripts)
					for items in scripts:
						if "slug=" in items:
							if len(sources) > 2: break
							headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.89 Safari/537.36'}
							result = s.get(items, headers=headers, timeout=3).content
							if jsunpack.detect(result):
								js_data = jsunpack.unpack(result)
								match = re.search('"sourcesPlaylist"\s*:\s*"([^"]+)', js_data)
								video_url = match.group(1).replace('\\','')								
								headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.89 Safari/537.36'}
								streams = s.get(video_url, headers=headers, timeout=3).json()			
								# print("CMOVIES FOUND PLAYLIST UNPACKED", streams)
								playurl  = streams['playlist'][0]['sources']
								for results in playurl:	
									url = results['file']
									quality = results['label']
									if "1080" in quality: quality = "1080p"
									elif "720" in quality: quality = "HD"
									else: quality = "SD"
									url = url.encode('utf-8')
									if "google" in url:
										print("CMOVIES PLAYABLE LINKS", url, quality)
										sources.append({'source': 'gvideo', 'quality': quality, 'provider': 'Watch5s', 'url': url, 'direct': True, 'debridonly': False})

				except:
					pass
			return sources
        except:
            return sources
예제 #10
0
파일: sawlive.py 프로젝트: bialagary/mw
def resolve(url):
    #try:
        page = re.compile('//.+?/(?:embed|v)/([0-9a-zA-Z-_]+)').findall(url)[0]
        page = 'http://sawlive.tv/embed/%s' % page
        try: referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0]
        except: referer = page
        result = client.request(page, referer=referer)


        unpacked = ''
        packed = result.split('\n')
        for i in packed: 
            try: unpacked += jsunpack.unpack(i)
            except: pass
        result += unpacked
        result = urllib.unquote_plus(result)
        result = re.sub('\s\s+', ' ', result)
        var = re.compile('var\s(.+?)\s*=\s*\'(.+?)\'').findall(result)
        for i in range(100):
            for v in var: result = result.replace("' %s '" % v[0], v[1]).replace("'%s'" % v[0], v[1])


        result = re.compile('<iframe(.+?)</iframe>').findall(result)[-1]


        url = re.compile('src\s*=\s*[\'|\"](.+?)[\'|\"].+?[\'|\"](.+?)[\'|\"]').findall(result)
        if len(url) == 0: url = re.compile('src\s*=\s*[\'|\"](.+?)[\'|\"](.+?)[\'|\"]').findall(result)
        if len(url) == 0: url = re.compile('src\s*=\s*[\'|\"](.+?)[\'|\"]').findall(result)

        url = '/'.join([i.strip('/') for i in url])

        result = client.request(url, referer=referer)
        strm = re.compile("'streamer'.+?'(.+?)'").findall(result)[0]
        file = re.compile("'file'.+?'(.+?)'").findall(result)[0]
        swf = re.compile("SWFObject\('(.+?)'").findall(result)[0]

        url = '%s playpath=%s swfUrl=%s pageUrl=%s live=1 timeout=30' % (strm, file, swf, url)
        return url
예제 #11
0
def streamdor(html, src, olod):
    source = ''
    try:
        with requests.Session() as s:
            episodeId = re.findall('.*streamdor.co/video/(\d+)', html)[0]
            p = s.get('https://embed.streamdor.co/video/' + episodeId, headers={'referer': src})
            p = re.findall(r'JuicyCodes.Run\(([^\)]+)', p.text, re.IGNORECASE)[0]
            p = re.sub(r'\"\s*\+\s*\"', '', p)
            p = re.sub(r'[^A-Za-z0-9+\\/=]', '', p)
            p = base64.b64decode(p)
            p = jsunpack.unpack(p.decode('utf-8'))
            qual = 'SD'
            try:
                qual = re.findall(r'label:"(.*?)"', p)[0]
            except:
                pass
            try:
                url = re.findall(r'(https://streamango.com/embed/.*?)"', p, re.IGNORECASE)[0]
                source = "streamango.com"
                details = {'source': source, 'quality': qual, 'language': "en", 'url': url, 'info': '',
                           'direct': False, 'debridonly': False}
            except:
                if olod == True:
                    url = ''
                    source = 'openload.co'
                    details = {'source': source, 'quality': qual, 'language': "en", 'url': url, 'info': '',
                               'direct': False, 'debridonly': False}
                else: return ''


        return details
    except:
        print("Unexpected error in CMOVIES STREAMDOR Script:")
        exc_type, exc_obj, exc_tb = sys.exc_info()
        print(exc_type, exc_tb.tb_lineno)
        return details
예제 #12
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            alt_links = []
            play_links = []
            link = client.request(url)
            film_quality = re.findall(
                '<div class="poster-qulabel">(.*?)</div>', link)[0]
            if "1080" in film_quality: quality = "1080p"
            elif "720" in film_quality: quality = "HD"
            else: quality = "SD"
            # print ("MOVIEGO SOURCES", quality)
            r = BeautifulSoup(link)
            r = r.findAll('iframe')

            try:
                for u in r:
                    iframe = u['src'].encode('utf-8')
                    if '/play/' in iframe:
                        # print ("MOVIEGO IFRAME", iframe)
                        videourl = client.request(iframe)
                        s = BeautifulSoup(videourl)
                        s = s.findAll('script')
                        unpacked_script = ""
                        for scripts in s:
                            try:
                                unpacked_script += jsunpack.unpack(
                                    scripts.text)
                            except:
                                pass
                            links = get_video(unpacked_script)
                            for url in links:
                                # print ("MOVIEGO pack", url)
                                try:
                                    sources.append({
                                        'source': 'gvideo',
                                        'quality': google_tag(url),
                                        'provider': 'Moviego',
                                        'url': url,
                                        'direct': True,
                                        'debridonly': False
                                    })
                                except:
                                    pass
                    else:
                        try:

                            host = get_host(iframe)
                            if host in hostDict:
                                sources.append({
                                    'source': host,
                                    'quality': quality,
                                    'provider': 'Moviego',
                                    'url': iframe,
                                    'direct': True,
                                    'debridonly': False
                                })
                        except:
                            pass

            except:
                pass

            return sources
        except:
            return sources
예제 #13
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            aliases = eval(data['aliases'])
            headers = {}

            if 'tvshowtitle' in data:
                year = re.compile('(\d{4})-(\d{2})-(\d{2})').findall(data['premiered'])[0][0]
                episode = '%01d' % int(data['episode'])
                url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers)

            else:
                episode = None
                year = data['year']
                url = self.searchMovie(data['title'], data['year'], aliases, headers)

            referer = url
            r = client.request(url)
            if episode == None:
                y = re.findall('Released\s*:\s*.+?\s*(\d{4})', r)[0]
                if not year == y: raise Exception()

            r = client.parseDOM(r, 'div', attrs = {'class': 'sli-name'})
            r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a'))

            if not episode == None:
                r = [i[0] for i in r if i[1].lower().startswith('episode %02d:' % int(data['episode']))]
            else:
                r = [i[0] for i in r]

            for u in r:
                try:
                    p = client.request(u, referer=referer, timeout='10')
                    src = re.findall('src\s*=\s*"(.*streamdor.co/video/\d+)"', p)[0]
                    if src.startswith('//'):
                        src = 'http:'+src
                    episodeId = re.findall('.*streamdor.co/video/(\d+)', src)[0]
                    p = client.request(src, referer=u)
                    try:
                        p = re.findall(r'JuicyCodes.Run\(([^\)]+)', p, re.IGNORECASE)[0]
                        p = re.sub(r'\"\s*\+\s*\"','', p)
                        p = re.sub(r'[^A-Za-z0-9+\\/=]','', p)    
                        p = base64.b64decode(p)                
                        p = jsunpack.unpack(p)
                        p = unicode(p, 'utf-8')
                    except:
                        continue

                    fl = re.findall(r'file"\s*:\s*"([^"]+)',p)[0]                   
                    post = {'episodeID': episodeId, 'file': fl, 'subtitle': 'false', 'referer': urllib.quote_plus(u)}
                    p = client.request(self.source_link, post=post, referer=src, XHR=True)

                    js = json.loads(p)

                    try:
                        ss = js['sources']
                        ss = [(i['file'], i['label']) for i in ss if 'file' in i]

                        for i in ss:
                            try:                                                                
                                sources.append({'source': 'CDN', 'quality': source_utils.label_to_quality(i[1]), 'language': 'en', 'url': i[0], 'direct': True, 'debridonly': False})
                            except: pass
                    except:
                        pass
                except:
                    pass

            return sources
        except:
            return sources
예제 #14
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if not str(url).startswith('http'):

                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

                if 'tvshowtitle' in data:
                    url = '%s/episodes/%s-%01dx%01d/' % (self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), int(data['episode']))
                    year = re.findall('(\d{4})', data['premiered'])[0]
                    url = client.request(url, output='geturl')
                    if url == None: raise Exception()

                    cookie = client.request(url, output='cookie')
                    cookie = client.request(url, cookie=cookie, output='cookie')
                    r = client.request(url, cookie=cookie)

                    y = client.parseDOM(r, 'span', attrs = {'class': 'date'})[0]

                    y = re.findall('(\d{4})', y)[0]
                    if not y == year: raise Exception()

                else:
                    url = data['url']
                    url = client.request(url, output='geturl')
                    if url == None: raise Exception()

                    cookie = client.request(url, output='cookie')
                    cookie = client.request(url, cookie=cookie, output='cookie')
                    r = client.request(url, cookie=cookie)

            else:
                url = urlparse.urljoin(self.base_link, url)
                r = client.request(url)

            try:
                result = re.findall('sources\s*:\s*\[(.+?)\]', r)[0]
                r = re.findall('"file"\s*:\s*"(.+?)"', result)

                for url in r:
                    try:
                        url = url.replace('\\', '')
                        url = directstream.googletag(url)[0]
                        sources.append({'source': 'gvideo', 'quality': url['quality'], 'language': 'en', 'url': url['url'], 'direct': True, 'debridonly': False})
                    except:
                        pass
            except:
                pass

            links = client.parseDOM(r, 'iframe', ret='src')

            for link in links:
                try:
                    url = link.replace('\/', '/')
                    url = client.replaceHTMLCodes(url)
                    url = 'http:' + url if url.startswith('//') else url
                    url = url.encode('utf-8')

                    if not '/play/' in url: raise Exception()

                    r = client.request(url, timeout='10')

                    s = re.compile('<script type="text/javascript">(.+?)</script>', re.DOTALL).findall(r)

                    for i in s:
                        try:
                            r += jsunpack.unpack(i)
                        except:
                            pass

                    try:
                        result = re.findall('sources\s*:\s*\[(.+?)\]', r)[0]
                        r = re.findall('"file"\s*:\s*"(.+?)"', result)

                        for url in r:
                            try:
                                url = url.replace('\\', '')
                                url = directstream.googletag(url)[0]
                                sources.append({'source': 'gvideo', 'quality': url['quality'], 'language': 'en', 'url': url['url'], 'direct': True, 'debridonly': False})
                            except:
                                pass
                    except:
                        pass
                except:
                    pass

            return sources
        except:
            return sources
예제 #15
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            aliases = eval(data['aliases'])
            headers = {}

            if 'tvshowtitle' in data:
                year = re.compile('(\d{4})-(\d{2})-(\d{2})').findall(data['premiered'])[0][0]
                episode = '%01d' % int(data['episode'])
                url = '%s/tv-series/%s-season-%01d/watch/' % (self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']))
                url = client.request(url, headers=headers, timeout='10', output='geturl')
                if url == None or url == self.base_link+'/':
                    url = '%s/tv-series/%s-season-%02d/watch/' % (self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']))
                    url = client.request(url, headers=headers, timeout='10', output='geturl')
                if url == None:
                    url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers)

            else:
                episode = None
                year = data['year']
                url = self.searchMovie(data['title'], data['year'], aliases, headers)

            referer = url
            r = client.request(url, headers=headers)

            y = re.findall('Release\s*:\s*.+?\s*(\d{4})', r)[0]

            if not year == y: raise Exception()


            r = client.parseDOM(r, 'div', attrs = {'class': 'les-content'})
            r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a'))
            r = [(i[0], ''.join(re.findall('(\d+)', i[1])[:1])) for i in r]

            if not episode == None:
                r = [i[0] for i in r if '%01d' % int(i[1]) == episode]
            else:
                r = [i[0] for i in r]

            r = [i for i in r if '/server-' in i]

            for u in r:
                try:
                    p = client.request(u, headers=headers, referer=referer, timeout='10')
                    src = re.findall('embed_src\s*:\s*"(.+?)"', p)[0]
                    if src.startswith('//'):
                        src = 'http:'+src
                    if not 'streamdor.co' in src: raise Exception()
                    episodeId = re.findall('streamdor.co.*/video/(.+?)"', p)[0]
                    p = client.request(src, referer=u)
                    try:
                        p = re.findall(r'JuicyCodes.Run\(([^\)]+)', p, re.IGNORECASE)[0]
                        p = re.sub(r'\"\s*\+\s*\"','', p)
                        p = re.sub(r'[^A-Za-z0-9+\\/=]','', p)    
                        p = base64.b64decode(p)                
                        p = jsunpack.unpack(p)
                        p = unicode(p, 'utf-8')
                    except:
                        continue

                    try:
                        url = re.findall(r'embedURL"\s*:\s*"([^"]+)',p)[0]
                        valid, hoster = source_utils.is_host_valid(url, hostDict)
                        if not valid: continue
                        urls, host, direct = source_utils.check_directstreams(url, hoster)
                        for x in urls:
                            sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False})     
                    except:
                        pass
                except:
                    pass

            return sources
        except:
            return sources
예제 #16
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if (self.user != '' and self.password != ''): #raise Exception()

                login = urlparse.urljoin(self.base_link, '/login.html')

                post = urllib.urlencode({'username': self.user, 'password': self.password, 'submit': 'Login'})

                cookie = client.request(login, post=post, output='cookie', close=False)

                r = client.request(login, post=post, cookie=cookie, output='extended')

                headers = {'User-Agent': r[3]['User-Agent'], 'Cookie': r[4]}
            else:
                headers = {}


            if not str(url).startswith('http'):

                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

                title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

                year = data['year']

                query = urlparse.urljoin(self.base_link, self.search_link)

                post = urllib.urlencode({'searchapi2': title})

                r = client.request(query, post=post, headers=headers)

                if 'tvshowtitle' in data:
                    r = re.findall('(watch-tvshow-.+?-\d+\.html)', r)
                    r = [(i, re.findall('watch-tvshow-(.+?)-\d+\.html', i)) for i in r]
                else:
                    r = re.findall('(watch-movie-.+?-\d+\.html)', r)
                    r = [(i, re.findall('watch-movie-(.+?)-\d+\.html', i)) for i in r]

                r = [(i[0], i[1][0]) for i in r if len(i[1]) > 0]
                r = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1])]
                r = [i[0] for i in r][0]

                u = urlparse.urljoin(self.base_link, r)
                for i in range(3):
                    r = client.request(u, headers=headers)
                    if not 'failed' in r: break

                if 'season' in data and 'episode' in data:
                    r = re.findall('(episode-.+?-.+?\d+.+?\d+-\d+.html)', r)
                    r = [i for i in r if '-s%02de%02d-' % (int(data['season']), int(data['episode'])) in i.lower()][0]

                    r = urlparse.urljoin(self.base_link, r)

                    r = client.request(r, headers=headers)

            else:
                r = urlparse.urljoin(self.base_link, url)

                r = client.request(r, post=post, headers=headers)



            quality = 'HD' if '-movie-' in r else 'SD'

            try:
                f = re.findall('''["']sources['"]\s*:\s*\[(.*?)\]''', r)[0]
                f = re.findall('''['"]*file['"]*\s*:\s*([^\(]+)''', f)[0]

                u = re.findall('function\s+%s[^{]+{\s*([^}]+)' % f, r)[0]
                u = re.findall('\[([^\]]+)[^+]+\+\s*([^.]+).*?getElementById\("([^"]+)', u)[0]

                a = re.findall('var\s+%s\s*=\s*\[([^\]]+)' % u[1], r)[0]
                b = client.parseDOM(r, 'span', {'id': u[2]})[0]

                url = u[0] + a + b
                url = url.replace('"', '').replace(',', '').replace('\/', '/')
                url += '|' + urllib.urlencode(headers)
            except:
                try:
                    url =  r = jsunpack.unpack(r)
                    url = url.replace('"', '')
                except:
                    url = re.findall(r'sources[\'"]\s*:\s*\[.*?file[\'"]\s*:\s*(\w+)\(\).*function\s+\1\(\)\s*\{\s*return\([\'"]([^\'"]+)',r,re.DOTALL)[0][1]

            sources.append({'source': 'cdn', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False, 'autoplay': True})

            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('StreamLord - Exception: \n' + str(failure))
            return sources
예제 #17
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            rels = dom_parser.parse_dom(r, 'nav', attrs={'class': 'player'})
            rels = dom_parser.parse_dom(rels, 'ul', attrs={'class': 'idTabs'})
            rels = dom_parser.parse_dom(rels, 'li')
            rels = [(dom_parser.parse_dom(i,
                                          'a',
                                          attrs={'class': 'options'},
                                          req='href'),
                     dom_parser.parse_dom(i, 'img', req='src')) for i in rels]
            rels = [(i[0][0].attrs['href'][1:],
                     re.findall('\/flags\/(\w+)\.png$', i[1][0].attrs['src']))
                    for i in rels if i[0] and i[1]]
            rels = [
                i[0] for i in rels if len(i[1]) > 0 and i[1][0].lower() == 'de'
            ]

            r = [dom_parser.parse_dom(r, 'div', attrs={'id': i}) for i in rels]

            links = re.findall('''(?:link|file)["']?\s*:\s*["'](.+?)["']''',
                               ''.join([i[0].content for i in r]))
            links += [
                l.attrs['src'] for l in dom_parser.parse_dom(
                    i, 'iframe', attrs={'class': 'metaframe'}, req='src')
                for i in r
            ]
            links += [
                l.attrs['src']
                for l in dom_parser.parse_dom(i, 'source', req='src')
                for i in r
            ]

            for i in links:
                try:
                    i = re.sub('\[.+?\]|\[/.+?\]', '', i)
                    i = client.replaceHTMLCodes(i)

                    if self.domains[0] in i:
                        i = client.request(i, referer=url)

                        for x in re.findall('''\(["']?(.*)["']?\)''', i):
                            try:
                                i += jsunpack.unpack(
                                    base64.decodestring(
                                        re.sub('"\s*\+\s*"', '', x)))
                            except:
                                pass

                        s = re.compile('(eval\(function.*?)</script>',
                                       re.DOTALL).findall(i)

                        for x in s:
                            try:
                                i += jsunpack.unpack(x)
                            except:
                                pass

                        i = [(match[0], match[1]) for match in re.findall(
                            '''['"]?file['"]?\s*:\s*['"]([^'"]+)['"][^}]*['"]?label['"]?\s*:\s*['"]([^'"]*)''',
                            i, re.DOTALL)]
                        i = [(x[0].replace('\/', '/'),
                              source_utils.label_to_quality(x[1])) for x in i]

                        for url, quality in i:
                            sources.append({
                                'source': 'gvideo',
                                'quality': quality,
                                'language': 'de',
                                'url': url,
                                'direct': True,
                                'debridonly': False
                            })
                    else:
                        try:
                            valid, host = source_utils.is_host_valid(
                                i, hostDict)
                            if not valid: continue

                            urls = []
                            if 'google' in i:
                                host = 'gvideo'
                                direct = True
                                urls = directstream.google(i)
                            if 'google' in i and not urls and directstream.googletag(
                                    i):
                                host = 'gvideo'
                                direct = True
                                urls = [{
                                    'quality':
                                    directstream.googletag(i)[0]['quality'],
                                    'url':
                                    i
                                }]
                            elif 'ok.ru' in i:
                                host = 'vk'
                                direct = True
                                urls = directstream.odnoklassniki(i)
                            elif 'vk.com' in i:
                                host = 'vk'
                                direct = True
                                urls = directstream.vk(i)
                            else:
                                direct = False
                                urls = [{
                                    'quality': 'SD',
                                    'url': i
                                }]

                            for x in urls:
                                sources.append({
                                    'source': host,
                                    'quality': x['quality'],
                                    'language': 'de',
                                    'url': x['url'],
                                    'direct': direct,
                                    'debridonly': False
                                })
                        except:
                            pass
                except:
                    pass

            return sources
        except:
            return sources
예제 #18
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            rels = dom_parser.parse_dom(r, 'nav', attrs={'class': 'player'})
            rels = dom_parser.parse_dom(rels, 'ul', attrs={'class': 'idTabs'})
            rels = dom_parser.parse_dom(rels, 'li')
            rels = [(dom_parser.parse_dom(i, 'a', attrs={'class': 'options'}, req='href'), dom_parser.parse_dom(i, 'img', req='src')) for i in rels]
            rels = [(i[0][0].attrs['href'][1:], re.findall('\/flags\/(\w+)\.png$', i[1][0].attrs['src'])) for i in rels if i[0] and i[1]]
            rels = [i[0] for i in rels if len(i[1]) > 0 and i[1][0].lower() == 'de']

            r = [dom_parser.parse_dom(r, 'div', attrs={'id': i}) for i in rels]
            r = [(re.findall('''(?:link|file)["']?\s*:\s*["'](.+?)["']''', i[0].content), dom_parser.parse_dom(i, 'iframe', attrs={'class': 'metaframe'}, req='src')) for i in r]
            r = [i[0][0] if len(i[0]) > 0 else i[1][0].attrs['src'] for i in r if i[0] or i[1]]

            for i in r:
                try:
                    i = re.sub('\[.+?\]|\[/.+?\]', '', i)
                    i = client.replaceHTMLCodes(i)

                    if self.base_link in i:
                        i = client.request(i, referer=url)

                        s = re.compile('(eval\(function.*?)</script>', re.DOTALL).findall(i)

                        for x in s:
                            try: i += jsunpack.unpack(x)
                            except: pass

                        i = re.findall('file"?\s*:\s*"(.+?)"', i)

                        for u in i:
                            try:
                                u = u.replace('\\/', '/').replace('\/', '/')
                                u = client.replaceHTMLCodes(u).encode('utf-8')

                                sources.append({'source': 'gvideo', 'quality': directstream.googletag(u)[0]['quality'], 'language': 'de', 'url': u, 'direct': True, 'debridonly': False})
                            except:
                                pass
                    else:
                        try:
                            valid, host = source_utils.is_host_valid(i, hostDict)
                            if not valid: continue

                            urls = []
                            if 'google' in i: host = 'gvideo'; direct = True; urls = directstream.google(i);
                            if 'google' in i and not urls and directstream.googletag(i):  host = 'gvideo'; direct = True; urls = [{'quality': directstream.googletag(i)[0]['quality'], 'url': i}]
                            elif 'ok.ru' in i: host = 'vk'; direct = True; urls = directstream.odnoklassniki(i)
                            elif 'vk.com' in i: host = 'vk'; direct = True; urls = directstream.vk(i)
                            else: direct = False; urls = [{'quality': 'SD', 'url': i}]

                            for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'de', 'url': x['url'], 'direct': direct, 'debridonly': False})
                        except:
                            pass
                except:
                    pass

            return sources
        except:
            return sources
예제 #19
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if (self.user != '' and self.password != ''):  #raise Exception()

                login = urlparse.urljoin(self.base_link, '/login.html')

                post = urllib.urlencode({
                    'username': self.user,
                    'password': self.password,
                    'submit': 'Login'
                })

                cookie = client.request(login,
                                        post=post,
                                        output='cookie',
                                        close=False)

                r = client.request(login,
                                   post=post,
                                   cookie=cookie,
                                   output='extended')

                headers = {'User-Agent': r[3]['User-Agent'], 'Cookie': r[4]}
            else:
                headers = {}

            if not str(url).startswith('http'):

                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '')
                             for i in data])

                title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                    'title']

                year = data['year']

                query = urlparse.urljoin(self.base_link, self.search_link)

                post = urllib.urlencode({'searchapi2': title})

                r = client.request(query, post=post, headers=headers)

                if 'tvshowtitle' in data:
                    r = re.findall('(watch-tvshow-.+?-\d+\.html)', r)
                    r = [(i, re.findall('watch-tvshow-(.+?)-\d+\.html', i))
                         for i in r]
                else:
                    r = re.findall('(watch-movie-.+?-\d+\.html)', r)
                    r = [(i, re.findall('watch-movie-(.+?)-\d+\.html', i))
                         for i in r]

                r = [(i[0], i[1][0]) for i in r if len(i[1]) > 0]
                r = [
                    i for i in r
                    if cleantitle.get(title) == cleantitle.get(i[1])
                ]
                r = [i[0] for i in r][0]

                u = urlparse.urljoin(self.base_link, r)
                for i in range(3):
                    r = client.request(u, headers=headers)
                    if not 'failed' in r: break

                if 'season' in data and 'episode' in data:
                    r = re.findall('(episode-.+?-.+?\d+.+?\d+-\d+.html)', r)
                    r = [
                        i for i in r if '-s%02de%02d-' %
                        (int(data['season']),
                         int(data['episode'])) in i.lower()
                    ][0]

                    r = urlparse.urljoin(self.base_link, r)

                    r = client.request(r, headers=headers)

            else:
                r = urlparse.urljoin(self.base_link, url)

                r = client.request(r, post=post, headers=headers)

            quality = 'HD' if '-movie-' in r else 'SD'

            try:
                f = re.findall('''["']sources['"]\s*:\s*\[(.*?)\]''', r)[0]
                f = re.findall('''['"]*file['"]*\s*:\s*([^\(]+)''', f)[0]

                u = re.findall('function\s+%s[^{]+{\s*([^}]+)' % f, r)[0]
                u = re.findall(
                    '\[([^\]]+)[^+]+\+\s*([^.]+).*?getElementById\("([^"]+)',
                    u)[0]

                a = re.findall('var\s+%s\s*=\s*\[([^\]]+)' % u[1], r)[0]
                b = client.parseDOM(r, 'span', {'id': u[2]})[0]

                url = u[0] + a + b
                url = url.replace('"', '').replace(',', '').replace('\/', '/')
                url += '|' + urllib.urlencode(headers)
            except:
                try:
                    url = r = jsunpack.unpack(r)
                    url = url.replace('"', '')
                except:
                    url = re.findall(
                        r'sources[\'"]\s*:\s*\[.*?file[\'"]\s*:\s*(\w+)\(\).*function\s+\1\(\)\s*\{\s*return\([\'"]([^\'"]+)',
                        r, re.DOTALL)[0][1]

            sources.append({
                'source': 'cdn',
                'quality': quality,
                'language': 'en',
                'url': url,
                'direct': True,
                'debridonly': False,
                'autoplay': True
            })

            return sources
        except:
            return sources
예제 #20
0
def gamato_links(url, name, poster):  #12
    try:
        url = urllib.quote(url, ':/.')
        data = client.request(url)
        try:
            desc = client.parseDOM(data,
                                   'div',
                                   attrs={'itemprop': 'description'})[0]
            desc = clear_Title(desc)
        except IndexError:
            desc = 'N/A'

        try:
            match = re.findall(
                r'''file\s*:\s*['"](.+?)['"],poster\s*:\s*['"](.+?)['"]\}''',
                data, re.DOTALL)[0]
            link, _poster = match[0], match[1]
        except IndexError:
            frame = client.parseDOM(data, 'div', attrs={'id':
                                                        r'option-\d+'})[0]
            frame = client.parseDOM(frame, 'iframe', ret='src')[0]
            if 'cloud' in frame:
                #sources: ["http://cloudb.me/4fogdt6l4qprgjzd2j6hymoifdsky3tfskthk76ewqbtgq4aml3ior7bdjda/v.mp4"],
                match = client.request(frame)
                try:
                    from resources.lib.modules import jsunpack
                    if jsunpack.detect(match):
                        match = jsunpack.unpack(match)
                    match = re.findall('sources:\s*\[[\'"](.+?)[\'"]\]', match,
                                       re.DOTALL)[0]
                    match += '|User-Agent=%s&Referer=%s' % (urllib.quote(
                        client.agent()), frame)
                except IndexError:
                    from resources.lib.modules import jsunpack as jsun
                    if jsun.detect(match):
                        match = jsun.unpack(match)
                        match = re.findall('sources:\s*\[[\'"](.+?)[\'"]\]',
                                           match, re.DOTALL)[0]
                        match += '|User-Agent=%s&Referer=%s' % (urllib.quote(
                            client.agent()), frame)

            else:
                match = frame
            link, _poster = match, poster

        try:
            fanart = client.parseDOM(data, 'div', attrs={'class': 'g-item'})[0]
            fanart = client.parseDOM(fanart, 'a', ret='href')[0]
        except IndexError:
            fanart = FANART
        try:
            trailer = client.parseDOM(data, 'iframe', ret='src')
            trailer = [i for i in trailer if 'youtube' in i][0]
            addDir('[B][COLOR lime]Trailer[/COLOR][/B]', trailer, 100,
                   iconimage, fanart, str(desc))
        except BaseException:
            pass

        addDir(name, link, 100, poster, fanart, str(desc))
    except BaseException:
        return
    views.selectView('movies', 'movie-view')
예제 #21
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            alt_links = []
            play_links = []
            link = client.request(url)
            film_quality = re.findall(
                '<div class="poster-qulabel">(.*?)</div>', link)[0]
            if "1080" in film_quality: quality = "1080p"
            elif "720" in film_quality: quality = "HD"
            else: quality = "SD"
            try:
                iframe = re.findall('<iframe src="([^"]+)"', link)[0]
                if iframe:
                    original_frame = iframe
                    # print ("MOVIEGO VIDEOURL", iframe)
                    videourl = client.request(iframe)

                    scripts_packs = re.compile('<script>(.+?)</script>',
                                               re.DOTALL).findall(videourl)
                    for pack in scripts_packs:
                        if jsunpack.detect(pack):
                            data_script = jsunpack.unpack(pack)
                            try:
                                alternative_links = re.findall(
                                    'Alternative (\d+)<', data_script)
                                for alts in alternative_links:
                                    alt_links.append(alts)
                            except:
                                pass
                            video_src = re.findall('<source src="([^"]+)"',
                                                   data_script)
                            # print ("MOVIEGO data_script", data_script)
                            for url in video_src:
                                url = url.replace(' ', '')
                                if "google" in url:
                                    play_links.append(url)
            except:
                pass
            try:
                for ids in alt_links:
                    newframes = original_frame + "?source=a" + ids
                    # print ("MOVIEGO NEW FRAMES", newframes)
                    newurl = client.request(newframes)
                    scripts_newpacks = re.compile('<script>(.+?)</script>',
                                                  re.DOTALL).findall(newurl)
                    for new_pack in scripts_newpacks:
                        if jsunpack.detect(new_pack):
                            new_data_script = jsunpack.unpack(new_pack)
                            new_video_src = re.findall('<source src="([^"]+)"',
                                                       new_data_script)
                            # print ("MOVIEGO ALT video_src", video_src)
                            for new_url in new_video_src:
                                new_url = new_url.replace(' ', '')
                                if "google" in new_url:
                                    play_links.append(new_url)
            except:
                pass

            try:
                dupes = []
                for url in play_links:
                    if not url in dupes:
                        dupes.append(url)
                        print("MOVIEGO PLAY url", url)
                        quality = directstream.googletag(url)[0]['quality']
                        url = client.replaceHTMLCodes(url)
                        url = url.encode('utf-8')
                        sources.append({
                            'source': 'gvideo',
                            'quality': quality,
                            'provider': 'Moviego',
                            'url': url,
                            'direct': True,
                            'debridonly': False
                        })
            except:
                pass

            try:
                url = re.findall('file:\s+"([^"]+)"', link)[0]
                sources.append({
                    'source': 'cdn',
                    'quality': quality,
                    'provider': 'Moviego',
                    'url': url,
                    'direct': True,
                    'debridonly': False
                })
            except:
                pass

            return sources
        except:
            return sources
예제 #22
0
def resolve(url):
    try:
        try:
            referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0]
        except:
            referer=url

        id = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
        url = 'http://www.finecast.tv/embed4.php?u=%s&vw=640&vh=450'%id
        result = client.request(url, referer=referer, mobile = True)
        unpacked = ''
        packed = result.split('\n')
        for i in packed: 
            try: unpacked += jsunpack.unpack(i)
            except: pass
        result += unpacked
   
        var = re.compile('var\s(.+?)\s*=\s*(?:\'|\"|\s*)(.+?)(?:\'|\"|\s*);').findall(result)
        
        try:
            url = re.compile('file\s*:\s*(.+?)\n').findall(result)

            url = [i.split("'") for i in url]
            url = [[x.replace('+','').replace(',','') for x in i if not x == ''] for i in url]
            url = [[x.replace(x,[v[1] for v in var if v[0] == x][0]) if len([v[1] for v in var if v[0] == x]) > 0 else x for x in i] for i in url]
            url = [''.join(i) for i in url]
            
            url = [i for i in url if i.startswith('rtmp') or '.m3u8' in i]
            url = random.choice(url)
          
            dummy = ' swfUrl=http://www.finecast.tv/player6/jwplayer.flash.swf flashver=WIN\2020,0,0,228 live=1 timeout=14 swfVfy=1 pageUrl=http://www.finecast.tv/'
            if url.startswith('rtmp://'):
                url = 'rtmp://play.finecast.tv:1935/live/?'+url.split('?')[1]+ str(dummy)
            else:
                url = 'http://play.finecast.tv/live/'+url.split('live/')[1]+ str(dummy)
                pass
            return url
        
        except:
            pass
        ids = re.findall('id=(.+?)>(.+?)<',result)
        result = re.sub(r"'(.+?)'", r'\1', result)
        x = re.findall('\[(.+?)\].join\(""\)',result)
        auth, auth2 = re.findall('\[.+?\].join\(""\).+?\+\s*(.+?).join\(""\).+?document.getElementById\("(.+?)"\).innerHTML\);',result)[0]
        for v in var:
            if v[0] == auth:
                auth = re.findall('\[(.+?)\]',v[1])[0]
        for v in ids:
            if v[0] == auth2:
                auth2 = v[1]
        rtmp, file = x[0], x[1]
        rtmp = rtmp.replace('"','').replace(',','') + auth.replace('"','').replace(',','') + auth2.replace('"','').replace(',','')
        file = file.replace('"','').replace(',','')
        rtmp = rtmp 
        rtmp = rtmp.replace(r'\/','/')
        url = rtmp + '/' + file + ' swfUrl=http://www.finecast.tv/player6/jwplayer.flash.swf flashver=WIN\2020,0,0,228 live=1 timeout=14 swfVfy=1 pageUrl=http://www.finecast.tv/'

        
        return url
    except:
        return
예제 #23
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            rels = dom_parser.parse_dom(r, 'nav', attrs={'class': 'player'})
            rels = dom_parser.parse_dom(rels, 'ul', attrs={'class': 'idTabs'})
            rels = dom_parser.parse_dom(rels, 'li')
            rels = [(dom_parser.parse_dom(i,
                                          'a',
                                          attrs={'class': 'options'},
                                          req='href'),
                     dom_parser.parse_dom(i, 'img', req='src')) for i in rels]
            rels = [(i[0][0].attrs['href'][1:],
                     re.findall('\/flags\/(\w+)\.png$', i[1][0].attrs['src']))
                    for i in rels if i[0] and i[1]]
            rels = [
                i[0] for i in rels if len(i[1]) > 0 and i[1][0].lower() == 'de'
            ]

            r = [dom_parser.parse_dom(r, 'div', attrs={'id': i}) for i in rels]
            r = [(re.findall('''(?:link|file)["']?\s*:\s*["'](.+?)["']''',
                             i[0].content),
                  dom_parser.parse_dom(i,
                                       'iframe',
                                       attrs={'class': 'metaframe'},
                                       req='src')) for i in r]
            r = [
                i[0][0] if len(i[0]) > 0 else i[1][0].attrs['src'] for i in r
                if i[0] or i[1]
            ]

            for i in r:
                try:
                    i = re.sub('\[.+?\]|\[/.+?\]', '', i)
                    i = client.replaceHTMLCodes(i)

                    if self.base_link in i:
                        i = client.request(i, referer=url)

                        s = re.compile('(eval\(function.*?)</script>',
                                       re.DOTALL).findall(i)

                        for x in s:
                            try:
                                i += jsunpack.unpack(x)
                            except:
                                pass

                        i = re.findall('file"?\s*:\s*"(.+?)"', i)

                        for u in i:
                            try:
                                u = u.replace('\\/', '/').replace('\/', '/')
                                u = client.replaceHTMLCodes(u).encode('utf-8')

                                sources.append({
                                    'source':
                                    'gvideo',
                                    'quality':
                                    directstream.googletag(u)[0]['quality'],
                                    'language':
                                    'de',
                                    'url':
                                    u,
                                    'direct':
                                    True,
                                    'debridonly':
                                    False
                                })
                            except:
                                pass
                    else:
                        try:
                            valid, host = source_utils.is_host_valid(
                                i, hostDict)
                            if not valid: continue

                            urls = []
                            if 'google' in i:
                                host = 'gvideo'
                                direct = True
                                urls = directstream.google(i)
                            if 'google' in i and not urls and directstream.googletag(
                                    i):
                                host = 'gvideo'
                                direct = True
                                urls = [{
                                    'quality':
                                    directstream.googletag(i)[0]['quality'],
                                    'url':
                                    i
                                }]
                            elif 'ok.ru' in i:
                                host = 'vk'
                                direct = True
                                urls = directstream.odnoklassniki(i)
                            elif 'vk.com' in i:
                                host = 'vk'
                                direct = True
                                urls = directstream.vk(i)
                            else:
                                direct = False
                                urls = [{
                                    'quality': 'SD',
                                    'url': i
                                }]

                            for x in urls:
                                sources.append({
                                    'source': host,
                                    'quality': x['quality'],
                                    'language': 'de',
                                    'url': x['url'],
                                    'direct': direct,
                                    'debridonly': False
                                })
                        except:
                            pass
                except:
                    pass

            return sources
        except:
            return sources
예제 #24
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            if 'tvshowtitle' in data:
                url = '%s/episodes/%s-%01dx%01d/' % (
                    self.base_link, cleantitle.geturl(data['tvshowtitle']),
                    int(data['season']), int(data['episode']))
                year = re.findall('(\d{4})', data['premiered'])[0]
                url = client.request(url, output='geturl')
                if url == None: raise Exception()

                r = client.request(url)

                y = client.parseDOM(r, 'span', attrs={'class': 'date'})[0]

                y = re.findall('(\d{4})', y)[0]
                if not y == year: raise Exception()

            else:
                url = client.request(url, output='geturl')
                if url == None: raise Exception()

                r = client.request(url)

            try:
                result = re.findall('sources\s*:\s*\[(.+?)\]', r)[0]
                r = re.findall('"file"\s*:\s*"(.+?)"', result)

                for url in r:
                    try:
                        url = url.replace('\\', '')
                        url = directstream.googletag(url)[0]
                        sources.append({
                            'source': 'gvideo',
                            'quality': url['quality'],
                            'language': 'en',
                            'url': url['url'],
                            'direct': True,
                            'debridonly': False
                        })
                    except:
                        pass
            except:
                pass

            links = client.parseDOM(r, 'iframe', ret='src')

            for link in links:
                try:
                    if 'openload.io' in link or 'openload.co' in link or 'oload.tv' in link:
                        sources.append({
                            'source': 'openload.co',
                            'quality': 'SD',
                            'language': 'en',
                            'url': link,
                            'direct': False,
                            'debridonly': False
                        })
                        raise Exception()
                    elif 'putstream' in link:
                        r = client.request(link)
                        r = re.findall(r'({"file.*?})', r)
                        for i in r:
                            try:
                                i = json.loads(i)
                                url = i['file']
                                q = source_utils.label_to_quality(i['label'])
                                if 'google' in url:
                                    valid, hoster = source_utils.is_host_valid(
                                        url, hostDict)
                                    urls, host, direct = source_utils.check_directstreams(
                                        url, hoster)
                                    for x in urls:
                                        sources.append({
                                            'source': host,
                                            'quality': x['quality'],
                                            'language': 'en',
                                            'url': x['url'],
                                            'direct': direct,
                                            'debridonly': False
                                        })

                                else:
                                    valid, hoster = source_utils.is_host_valid(
                                        url, hostDict)
                                    if not valid:
                                        if 'blogspot' in hoster or 'vidushare' in hoster:
                                            sources.append({
                                                'source': 'CDN',
                                                'quality': q,
                                                'language': 'en',
                                                'url': url,
                                                'direct': True,
                                                'debridonly': False
                                            })
                                            continue
                                        else:
                                            continue
                                    sources.append({
                                        'source': hoster,
                                        'quality': q,
                                        'language': 'en',
                                        'url': url,
                                        'direct': False,
                                        'debridonly': False
                                    })

                            except:
                                pass

                except:
                    pass

                try:
                    url = link.replace('\/', '/')
                    url = client.replaceHTMLCodes(url)
                    url = 'http:' + url if url.startswith('//') else url
                    url = url.encode('utf-8')

                    if not '/play/' in url: raise Exception()

                    r = client.request(url, timeout='10')

                    s = re.compile(
                        '<script type="text/javascript">(.+?)</script>',
                        re.DOTALL).findall(r)

                    for i in s:
                        try:
                            r += jsunpack.unpack(i)
                        except:
                            pass

                    try:
                        result = re.findall('sources\s*:\s*\[(.+?)\]', r)[0]
                        r = re.findall('"file"\s*:\s*"(.+?)"', result)

                        for url in r:
                            try:
                                url = url.replace('\\', '')
                                url = directstream.googletag(url)[0]
                                sources.append({
                                    'source': 'gvideo',
                                    'quality': url['quality'],
                                    'language': 'en',
                                    'url': url['url'],
                                    'direct': True,
                                    'debridonly': False
                                })
                            except:
                                pass
                    except:
                        pass
                except:
                    pass

            return sources
        except:
            return sources
예제 #25
0
def evaluate(host):
    try:
        #xbmc.log('@#@HOST:%s' % host, xbmc.LOGNOTICE)
        if 'animeshd' in host:
            host = client.request(host, output='geturl')

        elif 'server.pelisplus' in host:
            host = client.request(host)
            host = client.parseDOM(host, 'iframe', ret='src')[0]

        else:
            host = host
        #xbmc.log('@#@HOST-FINAL:%s' % host, xbmc.LOGNOTICE)
        if 'openload' in host:
            try:
                from resources.lib.modules import openload
                oplink = openload.get_video_openload(host)
                host = resolveurl.resolve(oplink) if oplink == '' else oplink
            except BaseException:
                host = resolveurl.resolve(host)
            return host

        elif 'animehdpro' in host:
            data = client.request(host)
            host = re.compile('''file['"]:['"]([^'"]+)''',
                              re.DOTALL).findall(data)[0]
            host = requests.get(host).headers['location']
            #xbmc.log('@#@ANIMEHDPRO:%s' % host, xbmc.LOGNOTICE)
            return host + '|User-Agent=%s' % urllib.quote(client.agent())

        elif 'tiwi' in host:
            from resources.lib.modules import jsunpack
            data = client.request(host)
            if jsunpack.detect(data):
                data = jsunpack.unpack(data)
                link = re.compile('''\{file:['"]([^'"]+)''',
                                  re.DOTALL).findall(data)[0]
                #xbmc.log('@#@HDPRO:%s' % link, xbmc.LOGNOTICE)
            else:
                #link = re.compile('''video\/mp4.+?src:['"](.+?)['"]''', re.DOTALL).findall(data)[0]
                link = re.compile('''dash\+xml.+?src:['"](.+?)['"]''',
                                  re.DOTALL).findall(data)[0]
                #xbmc.log('@#@HDPRO:%s' % link, xbmc.LOGNOTICE)
            return link + '|User-Agent=%s&Referer=%s' % (urllib.quote(
                client.agent()), host)

        elif 'pelishd.tv' in host:
            res_quality = []
            stream_url = []
            from resources.lib.modules import unjuice
            import json
            data = client.request(host)
            if unjuice.test(data):
                juice = unjuice.run(data)
                links = json.loads(re.findall('sources:(\[.+?\])', juice)[0])
                for stream in links:
                    url = stream['file']
                    qual = '[COLORlime][B]%s Calidad[/B][/COLOR]' % stream[
                        'label']
                    res_quality.append(qual)
                    stream_url.append(url)
                if len(res_quality) > 1:
                    dialog = xbmcgui.Dialog()
                    ret = dialog.select('[COLORgold][B]Ver en[/B][/COLOR]',
                                        res_quality)
                    if ret == -1:
                        return
                    elif ret > -1:
                        host = stream_url[ret]
                        #xbmc.log('@#@HDPRO:%s' % host, xbmc.LOGNOTICE)
                        return host + '|User-Agent=%s' % urllib.quote(
                            client.agent())
                    else:
                        return
                else:
                    host = stream_url[0]
                    return host + '|User-Agent=%s' % urllib.quote(
                        client.agent())

        elif 'www.pelisplus.net' in host:
            res_quality = []
            stream_url = []

            headers = {'User-Agent': client.agent(), 'Referer': host}
            cj = requests.get(host, headers=headers).cookies
            cj = '__cfduid=%s' % str(cj['__cfduid'])
            vid_id = host.split('/')[-1]
            headers['Cookie'] = cj
            data = requests.post('https://www.pelisplus.net/api/source/%s' %
                                 vid_id,
                                 headers=headers).json()
            streams = data['data']
            for stream in streams:
                url = stream['file']
                url = 'https://www.pelisplus.net' + url if url.startswith(
                    '/') else url
                qual = '[COLORlime][B]%s Calidad[/B][/COLOR]' % stream['label']
                res_quality.append(qual)
                stream_url.append(url)
            if len(res_quality) > 1:
                dialog = xbmcgui.Dialog()
                ret = dialog.select('[COLORgold][B]Ver en[/B][/COLOR]',
                                    res_quality)
                if ret == -1:
                    return
                elif ret > -1:
                    host = stream_url[ret]
                    #xbmc.log('@#@HDPRO:%s' % host, xbmc.LOGNOTICE)
                    return host + '|User-Agent=%s' % urllib.quote(
                        client.agent())
                else:
                    return

            else:
                host = stream_url[0]
                return host + '|User-Agent=%s' % urllib.quote(client.agent())

        else:
            host = resolveurl.resolve(host)
            return host
    except:
        return host
예제 #26
0
    def sources(self, url, hostDict, hostprDict):
        try:
			sources = []
			alt_links = []
			play_links = []
			link = client.request(url)
			film_quality = re.findall('<div class="poster-qulabel">(.*?)</div>',link)[0]
			if "1080" in film_quality: quality = "1080p"
			elif "720" in film_quality: quality = "HD"
			else: quality = "SD"
			try:
				iframe = re.findall('<iframe src="([^"]+)"',link)[0]
				if iframe:
					original_frame = iframe
					# print ("MOVIEGO VIDEOURL", iframe)
					videourl = client.request(iframe)
					
					scripts_packs = re.compile('<script>(.+?)</script>', re.DOTALL).findall(videourl)
					for pack in scripts_packs:
						if jsunpack.detect(pack):
							data_script = jsunpack.unpack(pack)
							try:
								alternative_links = re.findall('Alternative (\d+)<', data_script)
								for alts in alternative_links: alt_links.append(alts)
							except:
								pass
							video_src = re.findall('<source src="([^"]+)"', data_script)
							# print ("MOVIEGO data_script", data_script)
							for url in video_src:
								url = url.replace(' ', '')
								if "google" in url:
									play_links.append(url)
			except:
				pass
			try:
					for ids in alt_links:
						newframes = original_frame + "?source=a" + ids
						# print ("MOVIEGO NEW FRAMES", newframes)
						newurl = client.request(newframes)
						scripts_newpacks = re.compile('<script>(.+?)</script>', re.DOTALL).findall(newurl)
						for new_pack in scripts_newpacks:
							if jsunpack.detect(new_pack):
								new_data_script = jsunpack.unpack(new_pack)
								new_video_src = re.findall('<source src="([^"]+)"', new_data_script)
								# print ("MOVIEGO ALT video_src", video_src)
								for new_url in new_video_src:
									new_url = new_url.replace(' ', '')
									if "google" in new_url:
										play_links.append(new_url)
			except:
				pass
				
			try:
				dupes = []
				for url in play_links:
					if not url in dupes:
						dupes.append(url)
						print ("MOVIEGO PLAY url", url)
						quality = directstream.googletag(url)[0]['quality']
						url = client.replaceHTMLCodes(url)
						url = url.encode('utf-8')
						sources.append({'source': 'gvideo', 'quality': quality, 'provider': 'Moviego', 'url': url, 'direct': True, 'debridonly': False})
			except:
				pass

				
			try:
				url = re.findall('file:\s+"([^"]+)"',link)[0]
				sources.append({'source': 'cdn', 'quality': quality, 'provider': 'Moviego', 'url': url, 'direct': True, 'debridonly': False})
			except:
				pass
			
			return sources
        except:
            return sources
예제 #27
0
    def sources(self, url, hostDict, locDict):
        sources = []

        try:
            if url == None: return sources
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            aliases = eval(data['aliases'])
            #cookie = '; approve_search=yes'
            query = self.search_link % (urllib.quote_plus(title))
            query = urlparse.urljoin(self.base_link, query)
            result = client.request(query)  #, cookie=cookie)
            try:

                if 'episode' in data:
                    r = client.parseDOM(result,
                                        'div',
                                        attrs={'class': 'ml-item'})
                    r = zip(client.parseDOM(r, 'a', ret='href'),
                            client.parseDOM(r, 'a', ret='title'))
                    r = [(i[0], i[1],
                          re.findall('(.*?)\s+-\s+Season\s+(\d)', i[1]))
                         for i in r]
                    r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0]
                    url = [
                        i[0] for i in r if self.matchAlias(i[2][0], aliases)
                        and i[2][1] == data['season']
                    ][0]
                    url = '%swatch' % url
                else:
                    r = client.parseDOM(result,
                                        'div',
                                        attrs={'class': 'ml-item'})
                    r = zip(client.parseDOM(r, 'a', ret='href'),
                            client.parseDOM(r, 'a', ret='title'))
                    results = [(i[0], i[1], re.findall('\((\d{4})', i[1]))
                               for i in r]
                    try:
                        r = [(i[0], i[1], i[2][0]) for i in results
                             if len(i[2]) > 0]
                        url = [
                            i[0] for i in r
                            if self.matchAlias(i[1], aliases) and (
                                year == i[2])
                        ][0]
                    except:
                        url = None
                        pass

                    if (url == None):
                        url = [
                            i[0] for i in results
                            if self.matchAlias(i[1], aliases)
                        ][0]
                    url = '%s/watch' % url

                url = client.request(url, output='geturl')
                if url == None: raise Exception()

            except:
                return sources

            url = url if 'http' in url else urlparseF.urljoin(
                self.base_link, url)
            result = client.request(url)
            src = re.findall('src\s*=\s*"(.*streamdor.co/video/\d+)"',
                             result)[0]
            if src.startswith('//'):
                src = 'http:' + src
            episodeId = re.findall('.*streamdor.co/video/(\d+)', src)[0]
            p = client.request(src, referer=url)
            try:
                p = re.findall(r'JuicyCodes.Run\(([^\)]+)', p,
                               re.IGNORECASE)[0]
                p = re.sub(r'\"\s*\+\s*\"', '', p)
                p = re.sub(r'[^A-Za-z0-9+\\/=]', '', p)
                p = base64.b64decode(p)
                p = jsunpack.unpack(p)
                p = unicode(p, 'utf-8')

                post = {'id': episodeId}
                p2 = client.request('https://embed.streamdor.co/token.php?v=5',
                                    post=post,
                                    referer=src,
                                    XHR=True)
                js = json.loads(p2)
                tok = js['token']
                quali = 'SD'
                try:
                    quali = re.findall(r'label:"(.*?)"', p)[0]
                except:
                    pass
                p = re.findall(r'var\s+episode=({[^}]+});', p)[0]
                js = json.loads(p)
                ss = []

                #if 'eName' in js and js['eName'] != '':
                #    quali = source_utils.label_to_quality(js['eName'])
                if 'fileEmbed' in js and js['fileEmbed'] != '':
                    ss.append([js['fileEmbed'], quali])
                if 'fileHLS' in js and js['fileHLS'] != '':
                    ss.append([
                        'https://hls.streamdor.co/%s%s' % (tok, js['fileHLS']),
                        quali
                    ])
            except:
                return sources

            for link in ss:

                try:
                    if 'google' in url:
                        valid, hoster = source_utils.is_host_valid(
                            url, hostDict)
                        urls, host, direct = source_utils.check_directstreams(
                            url, hoster)
                        for x in urls:
                            sources.append({
                                'source': host,
                                'quality': x['quality'],
                                'language': 'en',
                                'url': x['url'],
                                'direct': direct,
                                'debridonly': False
                            })

                    else:
                        try:
                            valid, hoster = source_utils.is_host_valid(
                                link[0], hostDict)
                            direct = False
                            if not valid:
                                hoster = 'CDN'
                                direct = True
                            sources.append({
                                'source': hoster,
                                'quality': link[1],
                                'language': 'en',
                                'url': link[0],
                                'direct': direct,
                                'debridonly': False
                            })
                        except:
                            pass

                except:
                    pass

            return sources
        except Exception as e:
            return sources
예제 #28
0
def evaluate(host):
    try:
        if 'animeshd' in host:
            host = client.request(host, output='geturl')

        else:
            host = host
        xbmc.log('@#@HOST:%s' % host, xbmc.LOGNOTICE)
        if 'openload' in host:
            from resources.lib.modules import openload
            if openload.test_video(host):
                host = openload.get_video_openload(host)
            else:
                host = resolveurl.resolve(host)
            return host

        elif 'animehdpro' in host:
            data = client.request(host)
            host = re.compile('''file['"]:['"]([^'"]+)''',
                              re.DOTALL).findall(data)[0]
            host = requests.get(host).headers['location']
            xbmc.log('@#@HDPRO:%s' % host, xbmc.LOGNOTICE)
            return host + '|User-Agent=%s' % urllib.quote(client.agent())

        elif 'tiwi' in host:
            from resources.lib.modules import jsunpack
            data = client.request(host)
            if jsunpack.detect(data):
                data = jsunpack.unpack(data)
                link = re.compile('''\{file:['"]([^'"]+)''',
                                  re.DOTALL).findall(data)[0]
                xbmc.log('@#@HDPRO:%s' % link, xbmc.LOGNOTICE)
            else:
                #link = re.compile('''video\/mp4.+?src:['"](.+?)['"]''', re.DOTALL).findall(data)[0]
                link = re.compile('''dash\+xml.+?src:['"](.+?)['"]''',
                                  re.DOTALL).findall(data)[0]
                xbmc.log('@#@HDPRO:%s' % link, xbmc.LOGNOTICE)
            return link + '|User-Agent=%s&Referer=%s' % (urllib.quote(
                client.agent()), host)

        elif 'www.pelisplus.net' in host:
            res_quality = []
            stream_url = []

            headers = {'User-Agent': client.agent(), 'Referer': host}
            cj = requests.get(host, headers=headers).cookies
            cj = '__cfduid=%s' % str(cj['__cfduid'])
            vid_id = host.split('/')[-1]
            headers['Cookie'] = cj
            data = requests.post('https://www.pelisplus.net/api/sources/%s' %
                                 vid_id,
                                 headers=headers).json()
            streams = data['data']
            for stream in streams:
                url = stream['file']
                qual = stream['label']
                res_quality.append(qual)
                stream_url.append(url)
            if len(res_quality) > 1:
                dialog = xbmcgui.Dialog()
                ret = dialog.select('Ver en', res_quality)
                if ret == -1:
                    return
                elif ret > -1:
                    host = stream_url[ret]
                    xbmc.log('@#@HDPRO:%s' % host, xbmc.LOGNOTICE)
                    return host + '|User-Agent=%s' % urllib.quote(
                        client.agent())

        else:
            host = resolveurl.resolve(host)
            return host
    except:
        return host
예제 #29
0
def gamato_links(url, name, poster):  # 12
    try:
        url = quote(url, ':/.')
        data = requests.get(url).text
        # xbmc.log('DATA: {}'.format(str(data)))
        try:
            desc = client.parseDOM(data,
                                   'div',
                                   attrs={'itemprop': 'description'})[0]
            desc = clear_Title(desc)
        except IndexError:
            desc = 'N/A'

        try:
            # Playerjs({id:"playerjs14892",file:"https://gamato1.com/s/Aladdin%20and%20the%20King%20of%20Thieves%201996.mp4"})
            link = re.findall(
                r'''Playerjs\(\{.+?file\s*:\s*['"](.+?\.mp4)['"]\}''', data,
                re.DOTALL)[0]
            link = quote(link, ':/.')
            # link += '|User-Agent={}&Referer={}'.format(quote(client.agent()), quote(url))
            # xbmc.log('FRAME1: {}'.format(link))
        except IndexError:
            try:
                try:
                    match = re.findall(
                        r'''file\s*:\s*['"](.+?)['"],poster\s*:\s*['"](.+?)['"]\}''',
                        data, re.DOTALL)[0]
                    link, _poster = match[0], match[1]
                except IndexError:
                    # 'http://gamatotv2.com/kids/jwplayer/?source=http%3A%2F%2F161.97.109.217%2FSonic%2520%2520%2520-%2520Gamato%2520%2520.mp4&id=16449&type=mp4
                    link = re.findall(r'''/jwplayer/.+source=(.+?)&id=''',
                                      data, re.DOTALL)[0]

                # xbmc.log('FRAME2: {}'.format(link))
            except IndexError:
                frame = client.parseDOM(data,
                                        'div',
                                        attrs={'id': r'option-\d+'})[0]
                frame = client.parseDOM(frame, 'iframe', ret='src')[0]
                # xbmc.log('FRAME3: {}'.format(frame))

                if 'cloud' in frame:
                    # sources: ["http://cloudb.me/4fogdt6l4qprgjzd2j6hymoifdsky3tfskthk76ewqbtgq4aml3ior7bdjda/v.mp4"],
                    match = client.request(frame, referer=url)
                    # xbmc.log('MATCH3: {}'.format(match))
                    if 'meta name="robots"' in match:
                        cloudid = frame.split('html?')[-1].split('=')[0]
                        cloud = 'http://cloudb2.me/embed-{}.html?auto=1&referer={}'.format(
                            cloudid, url)
                        match = client.request(cloud)
                    try:
                        from resources.lib.modules import jsunpack
                        if jsunpack.detect(match):
                            match = jsunpack.unpack(match)
                        match = re.findall(r'sources:\s*\[[\'"](.+?)[\'"]\]',
                                           match, re.DOTALL)[0]
                        # match += '|User-Agent=%s&Referer=%s' % (quote(client.agent()), frame)
                    except IndexError:
                        from resources.lib.modules import jsunpack as jsun
                        if jsun.detect(match):
                            match = jsun.unpack(match)
                            match = re.findall(
                                r'sources:\s*\[[\'"](.+?)[\'"]\]', match,
                                re.DOTALL)[0]
                            # match += '|User-Agent=%s&Referer=%s' % (quote(client.agent()), frame)
                else:
                    match = frame
                link, _poster = match, poster

        try:
            fanart = client.parseDOM(data, 'div', attrs={'class': 'g-item'})[0]
            fanart = client.parseDOM(fanart, 'a', ret='href')[0]
        except IndexError:
            fanart = FANART
        try:
            trailer = client.parseDOM(data, 'iframe', ret='src')
            trailer = [i for i in trailer if 'youtube' in i][0]
            addDir('[B][COLOR lime]Trailer[/COLOR][/B]', trailer, 100,
                   iconimage, fanart, str(desc))
        except IndexError:
            pass

        addDir(name, link, 100, poster, fanart, str(desc))
    except BaseException:
        return
    views.selectView('movies', 'movie-view')
예제 #30
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            aliases = eval(data['aliases'])
            headers = {}

            if 'tvshowtitle' in data:
                year = re.compile('(\d{4})-(\d{2})-(\d{2})').findall(data['premiered'])[0][0]
                episode = '%01d' % int(data['episode'])
                url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers)

            else:
                episode = None
                year = data['year']
                url = self.searchMovie(data['title'], data['year'], aliases, headers)

            referer = url
            r = client.request(url)
            if episode == None:
                y = re.findall('Released\s*:\s*.+?\s*(\d{4})', r)[0]
                if not year == y: raise Exception()

            r = client.parseDOM(r, 'div', attrs = {'class': 'sli-name'})
            r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a'))

            if not episode == None:
                r = [i[0] for i in r if i[1].lower().startswith('episode %02d:' % int(data['episode'])) or i[1].lower().startswith('episode %d:' % int(data['episode']))]
            else:
                r = [i[0] for i in r]

            for u in r:
                try:
                    p = client.request(u, referer=referer, timeout='10')
                    quality = re.findall(r'Quality:\s*<.*?>([^<]+)',p)[0]
                    quality = quality if quality in ['HD', 'SD'] else source_utils.label_to_quality(quality)
                    src = re.findall('src\s*=\s*"(.*streamdor.co/video/\d+)"', p)[0]
                    if src.startswith('//'):
                        src = 'http:'+src
                    episodeId = re.findall('.*streamdor.co/video/(\d+)', src)[0]
                    p = client.request(src, referer=u)
                    try:
                        p = re.findall(r'JuicyCodes.Run\(([^\)]+)', p, re.IGNORECASE)[0]
                        p = re.sub(r'\"\s*\+\s*\"','', p)
                        p = re.sub(r'[^A-Za-z0-9+\\/=]','', p)    
                        p = base64.b64decode(p)                
                        p = jsunpack.unpack(p)
                        p = unicode(p, 'utf-8')
                    except:
                        continue

                    try:

                        fl = re.findall(r'file"\s*:\s*"([^"]+)',p)
                        if len(fl) > 0:
                            fl = fl[0]                                       
                            post = {'episodeID': episodeId, 'file': fl, 'subtitle': 'false', 'referer': urllib.quote_plus(u)}
                            p = client.request(self.source_link, post=post, referer=src, XHR=True)
                            js = json.loads(p)
                            src = js['sources']
                            p = client.request('http:'+src, referer=src)   
                            js = json.loads(p)[0]
                            ss = js['sources']
                            ss = [(i['file'], i['label']) for i in ss if 'file' in i]                        
                        
                        else:
                            try:
                                post = {'id': episodeId}
                                p2 = client.request('https://embed.streamdor.co/token.php?v=5', post=post, referer=src, XHR=True)
                                js = json.loads(p2)
                                tok = js['token']
                                p = re.findall(r'var\s+episode=({[^}]+});',p)[0]
                                js = json.loads(p)
                                ss = []
                                if 'eName' in js and js['eName'] != '':
                                    quality = source_utils.label_to_quality(js['eName'])
                                if 'fileEmbed' in js and js['fileEmbed'] != '':
                                    ss.append([js['fileEmbed'], quality])
                                if 'fileHLS' in js and js['fileHLS'] != '':
                                    ss.append(['https://hls.streamdor.co/%s%s'%(tok, js['fileHLS']), quality])  
                            except:
                                pass

                        for i in ss:
                            try: 
                                valid, hoster = source_utils.is_host_valid(i[0], hostDict)
                                direct = False
                                if not valid:
                                    hoster = 'CDN'                        
                                    direct = True                                       
                                sources.append({'source': hoster, 'quality': 'SD', 'language': 'en', 'url': i[0], 'direct': direct, 'debridonly': False})
                            except: pass

                    except:
                        url = re.findall(r'embedURL"\s*:\s*"([^"]+)',p)[0]
                        valid, hoster = source_utils.is_host_valid(url, hostDict)
                        if not valid: continue
                        urls, host, direct = source_utils.check_directstreams(url, hoster)
                        for x in urls:
                            sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False})     

                except:
                    pass

            return sources
        except:
            return sources
예제 #31
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url, output='extended')

            headers = r[3]
            headers.update({
                'Cookie': r[2].get('Set-Cookie'),
                'Referer': self.base_link
            })
            r = r[0]

            rels = dom_parser.parse_dom(r, 'nav', attrs={'class': 'player'})
            rels = dom_parser.parse_dom(rels, 'ul', attrs={'class': 'idTabs'})
            rels = dom_parser.parse_dom(rels, 'li')
            rels = [(dom_parser.parse_dom(i,
                                          'a',
                                          attrs={'class': 'options'},
                                          req='href'),
                     dom_parser.parse_dom(i, 'img', req='src')) for i in rels]
            rels = [(i[0][0].attrs['href'][1:],
                     re.findall('/flags/(\w+)\.png$', i[1][0].attrs['src']))
                    for i in rels if i[0] and i[1]]
            rels = [
                i[0] for i in rels if len(i[1]) > 0 and i[1][0].lower() == 'de'
            ]

            r = [dom_parser.parse_dom(r, 'div', attrs={'id': i}) for i in rels]

            links = re.findall('''(?:link|file)["']?\s*:\s*["'](.+?)["']''',
                               ''.join([i[0].content for i in r]))
            links += [
                l.attrs['src'] for i in r for l in dom_parser.parse_dom(
                    i, 'iframe', attrs={'class': 'metaframe'}, req='src')
            ]
            links += [
                l.attrs['src'] for i in r
                for l in dom_parser.parse_dom(i, 'source', req='src')
            ]

            for i in links:
                try:
                    i = re.sub('\[.+?\]|\[/.+?\]', '', i)
                    i = client.replaceHTMLCodes(i)

                    if '/play/' in i: i = urlparse.urljoin(self.base_link, i)

                    if self.domains[0] in i:
                        i = client.request(i, headers=headers, referer=url)

                        for x in re.findall('''\(["']?(.*)["']?\)''', i):
                            try:
                                i += jsunpack.unpack(
                                    base64.decodestring(
                                        re.sub('"\s*\+\s*"', '',
                                               x))).replace('\\', '')
                            except:
                                pass

                        for x in re.findall('(eval\s*\(function.*?)</script>',
                                            i, re.DOTALL):
                            try:
                                i += jsunpack.unpack(x).replace('\\', '')
                            except:
                                pass

                        links = [(match[0], match[1]) for match in re.findall(
                            '''['"]?file['"]?\s*:\s*['"]([^'"]+)['"][^}]*['"]?label['"]?\s*:\s*['"]([^'"]*)''',
                            i, re.DOTALL)]
                        links = [(x[0].replace('\/', '/'),
                                  source_utils.label_to_quality(x[1]))
                                 for x in links if '/no-video.mp4' not in x[0]]

                        doc_links = [
                            directstream.google(
                                'https://drive.google.com/file/d/%s/view' %
                                match)
                            for match in re.findall(
                                '''file:\s*["'](?:[^"']+youtu.be/([^"']+))''',
                                i, re.DOTALL)
                        ]
                        doc_links = [(u['url'], u['quality'])
                                     for x in doc_links if x for u in x]
                        links += doc_links

                        for url, quality in links:
                            if self.base_link in url:
                                url = url + '|Referer=' + self.base_link

                            sources.append({
                                'source': 'gvideo',
                                'quality': quality,
                                'language': 'de',
                                'url': url,
                                'direct': True,
                                'debridonly': False
                            })
                    else:
                        try:
                            # as long as URLResolver get no Update for this URL (So just a Temp-Solution)
                            did = re.findall(
                                'youtube.googleapis.com.*?docid=(\w+)', i)
                            if did:
                                i = 'https://drive.google.com/file/d/%s/view' % did[
                                    0]

                            valid, host = source_utils.is_host_valid(
                                i, hostDict)
                            if not valid: continue

                            urls, host, direct = source_utils.check_directstreams(
                                i, host)

                            for x in urls:
                                sources.append({
                                    'source': host,
                                    'quality': x['quality'],
                                    'language': 'de',
                                    'url': x['url'],
                                    'direct': direct,
                                    'debridonly': False
                                })
                        except:
                            pass
                except:
                    pass

            return sources
        except:
            return sources
예제 #32
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            # if (self.user != '' and self.password != ''): #raise Exception()

            # login = urlparse.urljoin(self.base_link, '/login.html')

            # post = urllib.urlencode({'username': self.user, 'password': self.password, 'submit': 'Login'})

            # cookie = client.request(login, post=post, output='cookie', close=False)

            # r = client.request(login, post=post, cookie=cookie, output='extended')

            # headers = {'User-Agent': r[3]['User-Agent'], 'Cookie': r[4]}
            # else:
            # headers = {}

            headers = {'User-Agent': client.randomagent()}
            if not str(url).startswith('http'):

                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '')
                             for i in data])

                title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                    'title']

                year = data['year']

                def searchname(r):
                    r = [(i[0], i[1][0]) for i in r if len(i[1]) > 0]
                    r = [
                        i for i in r
                        if cleantitle.get(title) == cleantitle.get(i[1])
                    ]
                    r = [] if r == [] else [i[0] for i in r][0]
                    return r

                if 'tvshowtitle' in data:
                    link = urlparse.urljoin(
                        self.base_link, 'tvshow-%s.html' % title[0].upper())
                    r = client.request(link, headers=headers)
                    pages = dom_parser.parse_dom(
                        r, 'span', attrs={'class': 'break-pagination-2'})
                    pages = dom_parser.parse_dom(pages, 'a', req='href')
                    pages = [(i.attrs['href']) for i in pages]
                    if pages == []:
                        r = re.findall('(watch-tvshow-.+?-\d+\.html)', r)
                        r = [(i, re.findall('watch-tvshow-(.+?)-\d+\.html', i))
                             for i in r]
                        r = searchname(r)
                    else:
                        for page in pages:
                            link = urlparse.urljoin(self.base_link, page)
                            r = client.request(link, headers=headers)
                            r = re.findall('(watch-tvshow-.+?-\d+\.html)', r)
                            r = [(i,
                                  re.findall('watch-tvshow-(.+?)-\d+\.html',
                                             i)) for i in r]
                            r = searchname(r)
                            if r != []: break
                else:
                    link = urlparse.urljoin(
                        self.base_link, 'movies-%s.html' % title[0].upper())
                    r = client.request(link, headers=headers)
                    pages = dom_parser.parse_dom(
                        r, 'span', attrs={'class': 'break-pagination-2'})
                    pages = dom_parser.parse_dom(pages, 'a', req='href')
                    pages = [(i.attrs['href']) for i in pages]
                    if pages == []:
                        r = re.findall('(watch-movie-.+?-\d+\.html)', r)
                        r = [(i, re.findall('watch-movie-(.+?)-\d+\.html', i))
                             for i in r]
                        r = searchname(r)
                    else:
                        for page in pages:
                            log_utils.log('shit Returned: %s' % str('in loop'),
                                          log_utils.LOGNOTICE)
                            link = urlparse.urljoin(self.base_link, page)
                            r = client.request(link, headers=headers)
                            r = re.findall('(watch-movie-.+?-\d+\.html)', r)
                            r = [(i,
                                  re.findall('watch-movie-(.+?)-\d+\.html', i))
                                 for i in r]
                            r = searchname(r)
                            if r != []: break

                # leaving old search in for if streamlord renables searching on the site
                # query = urlparse.urljoin(self.base_link, self.search_link)

                # post = urllib.urlencode({'searchapi2': title})

                # r = client.request(query, post=post, headers=headers)

                # if 'tvshowtitle' in data:
                # r = re.findall('(watch-tvshow-.+?-\d+\.html)', r)
                # r = [(i, re.findall('watch-tvshow-(.+?)-\d+\.html', i)) for i in r]
                # else:
                # r = re.findall('(watch-movie-.+?-\d+\.html)', r)
                # r = [(i, re.findall('watch-movie-(.+?)-\d+\.html', i)) for i in r]

                # r = [(i[0], i[1][0]) for i in r if len(i[1]) > 0]
                # r = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1])]
                # r = [i[0] for i in r][0]

                u = urlparse.urljoin(self.base_link, r)
                for i in range(3):
                    r = client.request(u, headers=headers)
                    if not 'failed' in r: break

                if 'season' in data and 'episode' in data:
                    r = re.findall('(episode-.+?-.+?\d+.+?\d+-\d+.html)', r)
                    r = [
                        i for i in r if '-s%02de%02d-' %
                        (int(data['season']),
                         int(data['episode'])) in i.lower()
                    ][0]

                    r = urlparse.urljoin(self.base_link, r)

                    r = client.request(r, headers=headers)

            else:
                r = urlparse.urljoin(self.base_link, url)

                r = client.request(r, post=post, headers=headers)

            quality = 'HD' if '-movie-' in r else 'SD'

            try:
                f = re.findall('''["']sources['"]\s*:\s*\[(.*?)\]''', r)[0]
                f = re.findall('''['"]*file['"]*\s*:\s*([^\(]+)''', f)[0]

                u = re.findall('function\s+%s[^{]+{\s*([^}]+)' % f, r)[0]
                u = re.findall(
                    '\[([^\]]+)[^+]+\+\s*([^.]+).*?getElementById\("([^"]+)',
                    u)[0]

                a = re.findall('var\s+%s\s*=\s*\[([^\]]+)' % u[1], r)[0]
                b = client.parseDOM(r, 'span', {'id': u[2]})[0]

                url = u[0] + a + b
                url = url.replace('"', '').replace(',', '').replace('\/', '/')
                url += '|' + urllib.urlencode(headers)
            except:
                try:
                    url = r = jsunpack.unpack(r)
                    url = url.replace('"', '')
                except:
                    url = re.findall(
                        r'sources[\'"]\s*:\s*\[.*?file[\'"]\s*:\s*(\w+)\(\).*function\s+\1\(\)\s*\{\s*return\([\'"]([^\'"]+)',
                        r, re.DOTALL)[0][1]

            sources.append({
                'source': 'cdn',
                'quality': quality,
                'language': 'en',
                'url': url,
                'direct': True,
                'debridonly': False,
                'autoplay': True
            })

            return sources
        except:
            return sources
예제 #33
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            if 'tvshowtitle' in data:
                url = '%s/episodes/%s-%01dx%01d/' % (self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), int(data['episode']))
                year = re.findall('(\d{4})', data['premiered'])[0]
                url = client.request(url, output='geturl')
                if url == None: raise Exception()

                r = client.request(url)

                y = client.parseDOM(r, 'span', attrs = {'class': 'date'})[0]

                y = re.findall('(\d{4})', y)[0]
                if not y == year: raise Exception()

            else:
                url = client.request(url, output='geturl')
                if url == None: raise Exception()

                r = client.request(url)


            try:
                result = re.findall('sources\s*:\s*\[(.+?)\]', r)[0]
                r = re.findall('"file"\s*:\s*"(.+?)"', result)

                for url in r:
                    try:
                        url = url.replace('\\', '')
                        url = directstream.googletag(url)[0]
                        sources.append({'source': 'gvideo', 'quality': url['quality'], 'language': 'en', 'url': url['url'], 'direct': True, 'debridonly': False})
                    except:
                        pass
            except:
                pass

            links = client.parseDOM(r, 'iframe', ret='src')

            for link in links:
                try:
                    if 'openload.io' in link or 'openload.co' in link or 'oload.tv' in link:
                        sources.append(
                            {'source': 'openload.co', 'quality': 'SD', 'language': 'en', 'url': link, 'direct': False,
                             'debridonly': False})
                        raise Exception()
                    elif 'putstream' in link:
                        r = client.request(link)
                        r = re.findall(r'({"file.*?})',r)
                        for i in r:
                             try:
                                i = json.loads(i)
                                url = i['file']
                                q = source_utils.label_to_quality(i['label'])                           
                                if 'google' in url:
                                    valid, hoster = source_utils.is_host_valid(url, hostDict)
                                    urls, host, direct = source_utils.check_directstreams(url, hoster)
                                    for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False})
             
                                else:
                                    valid, hoster = source_utils.is_host_valid(url, hostDict)
                                    if not valid:
                                        if 'blogspot' in hoster or 'vidushare' in hoster:
                                            sources.append({'source': 'CDN', 'quality': q, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False})
                                            continue
                                        else: continue
                                    sources.append({'source': hoster, 'quality': q, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})                            
                                
                             except:
                                pass

                except:
                    pass

                try:
                    url = link.replace('\/', '/')
                    url = client.replaceHTMLCodes(url)
                    url = 'http:' + url if url.startswith('//') else url
                    url = url.encode('utf-8')

                    if not '/play/' in url: raise Exception()

                    r = client.request(url, timeout='10')

                    s = re.compile('<script type="text/javascript">(.+?)</script>', re.DOTALL).findall(r)

                    for i in s:
                        try:
                            r += jsunpack.unpack(i)
                        except:
                            pass

                    try:
                        result = re.findall('sources\s*:\s*\[(.+?)\]', r)[0]
                        r = re.findall('"file"\s*:\s*"(.+?)"', result)

                        for url in r:
                            try:
                                url = url.replace('\\', '')
                                url = directstream.googletag(url)[0]
                                sources.append({'source': 'gvideo', 'quality': url['quality'], 'language': 'en', 'url': url['url'], 'direct': True, 'debridonly': False})
                            except:
                                pass
                    except:
                        pass
                except:
                    pass

            return sources
        except:
            return sources
예제 #34
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['title']
            hdlr = data['year']
            query = '%s %s' % (title, hdlr)
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(query))
            r = client.request(url)
            items = client.parseDOM(r, 'div', attrs={'class': 'cell_container'})

            for item in items:
                try:
                    name = client.parseDOM(item, 'a', ret='title')[0]
                    name = client.replaceHTMLCodes(name)
                    t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d+E\d+|S\d+|3D)(\.|\)|\]|\s|)(.+|)', '', name)
                    if not cleantitle.get(t) == cleantitle.get(title):
                        raise Exception()

                    y = re.findall('[\.|\(|\[|\s](\d{4})[\.|\)|\]|\s]', name)[-1]
                    if not y == hdlr:
                        raise Exception()

                    link = client.parseDOM(item, 'a', ret='href')[0]
                    link = urlparse.urljoin(self.base_link, link) if link.startswith('/') else link
                except:
                    return sources
                try:
                    r = client.request(link)
                    url = re.findall('''frame_url\s*=\s*['"](.+?)['"]\;''', r, re.DOTALL)[0]
                    url = url if url.startswith('http') else urlparse.urljoin('https://', url)

                    if 'vidlink' in url:
                        ua = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; rv:14.0) Gecko/20100101 Firefox/14.0.1'}
                        html = client.request(url, headers=ua)
                        postID = re.findall("postID\s*=\s*'([^']+)", html)[0]
                        data = {'postID': postID}

                        rid = client.request('https://vidlink.org/embed/update_views', post=data, headers=ua,
                                             referer=url)
                        from resources.lib.modules import jsunpack
                        rid = jsunpack.unpack(rid)
                        playlist = re.findall('''file1=['"](.+?)['"];''', rid)[0]
                        links = client.request(playlist, headers=ua, referer=url)

                        try:
                            sub = re.findall('''URI="/sub/vtt/(\d+)/sub.m3u8",LANGUAGE="el"''', links)[0]
                        except IndexError:
                            sub = re.findall('''URI="/sub/vtt/(\d+)/sub.m3u8",LANGUAGE="en"''', links)[0]
                        sub = 'https://opensubtitles.co/sub/{0}.vtt'.format(sub)

                        pattern = 'RESOLUTION=\d+x(\d{3,4}),SUBTITLES="subs"\s*(/drive.+?.m3u8)'
                        links = re.findall(pattern, links)
                        for quality, link in links:
                            quality = source_utils.get_release_quality(quality, quality)[0]
                            link = 'https://p2p.vidlink.org/' + link.replace('/drive//hls/', 'drive/hls/')
                            sources.append({'source': 'GVIDEO', 'quality': quality, 'language': 'en', 'url': link,
                                            'sub': sub, 'direct': True, 'debridonly': False})

                except:
                    pass

            return sources
        except:
            return sources
예제 #35
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            r = urlparse.urljoin(self.base_link, url)

            result = client.request(r, timeout='10')

            f = client.parseDOM(result, 'iframe', ret='src')
            f = [i for i in f if 'iframe' in i][0]

            result = client.request(f, headers={'Referer': r}, timeout='10')

            r = client.parseDOM(result, 'div', attrs = {'id': 'botones'})[0]
            r = client.parseDOM(r, 'a', ret='href')
            r = [(i, urlparse.urlparse(i).netloc) for i in r]

            links = []

            for u, h in r:
                if not 'pelispedia' in h: continue

                result = client.request(u, headers={'Referer': f}, timeout='10')

                try:
                    if 'pelispedia' in h: raise Exception()

                    url = re.findall('sources\s*:\s*\[(.+?)\]', result)[0]
                    url = re.findall('file\s*:\s*(?:\"|\')(.+?)(?:\"|\')\s*,\s*label\s*:\s*(?:\"|\')(.+?)(?:\"|\')', url)
                    url = [i[0] for i in url if '720' in i[1]][0]

                    links.append({'source': 'cdn', 'quality': 'HD', 'url': url, 'direct': False})
                except:
                    pass

                try:
                    url = re.findall('sources\s*:\s*\[(.+?)\]', result)[0]
                    url = re.findall('file\s*:\s*(?:\"|\')(.+?)(?:\"|\')', url)

                    for i in url:
                        try:
                            links.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'url': i, 'direct': True})
                        except:
                            pass
                except:
                    pass

                try:
                    post = re.findall('gkpluginsphp.*?link\s*:\s*"([^"]+)', result)[0]
                    post = urllib.urlencode({'link': post})

                    url = urlparse.urljoin(self.base_link, '/Pe_flsh/plugins/gkpluginsphp.php')
                    url = client.request(url, post=post, XHR=True, referer=u, timeout='10')
                    url = json.loads(url)['link']

                    links.append({'source': 'gvideo', 'quality': 'HD', 'url': url, 'direct': True})
                except:
                    pass

                try:
                    post = re.findall('var\s+parametros\s*=\s*"([^"]+)', result)[0]

                    post = urlparse.parse_qs(urlparse.urlparse(post).query)['pic'][0]
                    post = urllib.urlencode({'sou': 'pic', 'fv': '23', 'url': post})

                    url = urlparse.urljoin(self.base_link, '/Pe_Player_Html5/pk/pk_2/plugins/protected.php')
                    url = client.request(url, post=post, XHR=True, timeout='10')
                    url = json.loads(url)[0]['url']

                    links.append({'source': 'cdn', 'quality': 'HD', 'url': url, 'direct': True})
                except:
                    pass

                try:
                    if not jsunpack.detect(result): raise Exception()

                    result = jsunpack.unpack(result)
                    url = re.findall('sources\s*:\s*\[(.+?)\]', result)[0]
                    url = re.findall('file\s*:\s*.*?\'(.+?)\'', url)
                    for i in url:
                        try:
                            i = client.request(i, headers={'Referer': f}, output='geturl', timeout='10')
                            links.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'url': i,
                                          'direct': True})
                        except:
                            pass
                except:
                    pass

                try:
                    post = re.findall('var\s+parametros\s*=\s*"([^"]+)', result)[0]

                    post = urlparse.parse_qs(urlparse.urlparse(post).query)['pic'][0]
                    token = 'eyJjdCI6InZGS3QySm9KRWRwU0k4SzZoZHZKL2c9PSIsIml2IjoiNDRkNmMwMWE0ZjVkODk4YThlYmE2MzU0NDliYzQ5YWEiLCJzIjoiNWU4MGUwN2UwMjMxNDYxOCJ9'
                    post = urllib.urlencode({'sou': 'pic', 'fv': '0', 'url': post, 'token': token})

                    url = urlparse.urljoin(self.player_link, '/template/protected.php')
                    url = client.request(url, post=post, XHR=True, timeout='10')
                    js = json.loads(url)
                    url = [i['url'] for i in js]
                    for i in url:
                        try:
                            i = client.request(i, headers={'Referer': f}, output='geturl', timeout='10')
                            links.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'url': i,
                                          'direct': True})
                        except:
                            pass
                except:
                    pass

            for i in links: sources.append({'source': i['source'], 'quality': i['quality'], 'provider': 'Pelispedia', 'url': i['url'], 'direct': i['direct'], 'debridonly': False})

            return sources
        except:
            return sources
예제 #36
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            # if (self.user != '' and self.password != ''): #raise Exception()

                # login = urlparse.urljoin(self.base_link, '/login.html')

                # post = urllib.urlencode({'username': self.user, 'password': self.password, 'submit': 'Login'})

                # cookie = client.request(login, post=post, output='cookie', close=False)

                # r = client.request(login, post=post, cookie=cookie, output='extended')

                # headers = {'User-Agent': r[3]['User-Agent'], 'Cookie': r[4]}
            # else:
                # headers = {}


            headers = {'User-Agent': client.randomagent()}
            if not str(url).startswith('http'):

                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

                title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

                year = data['year']
                def searchname(r):
                    r = [(i[0], i[1][0]) for i in r if len(i[1]) > 0]
                    r = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1])]
                    r = [] if r == [] else [i[0] for i in r][0]
                    return r
                
                if 'tvshowtitle' in data:
                    link = urlparse.urljoin(self.base_link, 'tvshow-%s.html' %title[0].upper())
                    r = client.request(link, headers=headers)
                    pages = dom_parser.parse_dom(r, 'span', attrs={'class': 'break-pagination-2'})
                    pages = dom_parser.parse_dom(pages, 'a', req='href')
                    pages = [(i.attrs['href']) for i in pages]
                    if pages == []:
                        r = re.findall('(watch-tvshow-.+?-\d+\.html)', r)
                        r = [(i, re.findall('watch-tvshow-(.+?)-\d+\.html', i)) for i in r]
                        r = searchname(r)
                    else:
                        for page in pages:
                            link = urlparse.urljoin(self.base_link, page)
                            r = client.request(link, headers=headers)
                            r = re.findall('(watch-tvshow-.+?-\d+\.html)', r)
                            r = [(i, re.findall('watch-tvshow-(.+?)-\d+\.html', i)) for i in r]
                            r = searchname(r)
                            if r != []: break
                else:
                    link = urlparse.urljoin(self.base_link, 'movies-%s.html' %title[0].upper())
                    r = client.request(link, headers=headers)
                    pages = dom_parser.parse_dom(r, 'span', attrs={'class': 'break-pagination-2'})
                    pages = dom_parser.parse_dom(pages, 'a', req='href')
                    pages = [(i.attrs['href']) for i in pages]
                    if pages == []:
                        r = re.findall('(watch-movie-.+?-\d+\.html)', r)
                        r = [(i, re.findall('watch-movie-(.+?)-\d+\.html', i)) for i in r]
                        r = searchname(r)
                    else:
                        for page in pages:
                            log_utils.log('shit Returned: %s' % str('in loop'), log_utils.LOGNOTICE)
                            link = urlparse.urljoin(self.base_link, page)
                            r = client.request(link, headers=headers)
                            r = re.findall('(watch-movie-.+?-\d+\.html)', r)
                            r = [(i, re.findall('watch-movie-(.+?)-\d+\.html', i)) for i in r]
                            r = searchname(r)
                            if r != []: break
                        
                    

                # leaving old search in for if streamlord renables searching on the site
                # query = urlparse.urljoin(self.base_link, self.search_link)

                # post = urllib.urlencode({'searchapi2': title})

                # r = client.request(query, post=post, headers=headers)

                # if 'tvshowtitle' in data:
                    # r = re.findall('(watch-tvshow-.+?-\d+\.html)', r)
                    # r = [(i, re.findall('watch-tvshow-(.+?)-\d+\.html', i)) for i in r]
                # else:
                    # r = re.findall('(watch-movie-.+?-\d+\.html)', r)
                    # r = [(i, re.findall('watch-movie-(.+?)-\d+\.html', i)) for i in r]

                # r = [(i[0], i[1][0]) for i in r if len(i[1]) > 0]
                # r = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1])]
                # r = [i[0] for i in r][0]

                u = urlparse.urljoin(self.base_link, r)
                for i in range(3):
                    r = client.request(u, headers=headers)
                    if not 'failed' in r: break

                if 'season' in data and 'episode' in data:
                    r = re.findall('(episode-.+?-.+?\d+.+?\d+-\d+.html)', r)
                    r = [i for i in r if '-s%02de%02d-' % (int(data['season']), int(data['episode'])) in i.lower()][0]

                    r = urlparse.urljoin(self.base_link, r)

                    r = client.request(r, headers=headers)

            else:
                r = urlparse.urljoin(self.base_link, url)

                r = client.request(r, post=post, headers=headers)



            quality = 'HD' if '-movie-' in r else 'SD'

            try:
                f = re.findall('''["']sources['"]\s*:\s*\[(.*?)\]''', r)[0]
                f = re.findall('''['"]*file['"]*\s*:\s*([^\(]+)''', f)[0]

                u = re.findall('function\s+%s[^{]+{\s*([^}]+)' % f, r)[0]
                u = re.findall('\[([^\]]+)[^+]+\+\s*([^.]+).*?getElementById\("([^"]+)', u)[0]

                a = re.findall('var\s+%s\s*=\s*\[([^\]]+)' % u[1], r)[0]
                b = client.parseDOM(r, 'span', {'id': u[2]})[0]

                url = u[0] + a + b
                url = url.replace('"', '').replace(',', '').replace('\/', '/')
                url += '|' + urllib.urlencode(headers)
            except:
                try:
                    url =  r = jsunpack.unpack(r)
                    url = url.replace('"', '')
                except:
                    url = re.findall(r'sources[\'"]\s*:\s*\[.*?file[\'"]\s*:\s*(\w+)\(\).*function\s+\1\(\)\s*\{\s*return\([\'"]([^\'"]+)',r,re.DOTALL)[0][1]

            sources.append({'source': 'cdn', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False, 'autoplay': True})

            return sources
        except:
            return sources
예제 #37
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            aliases = eval(data['aliases'])
            headers = {}

            if 'tvshowtitle' in data:
                year = re.compile('(\d{4})-(\d{2})-(\d{2})').findall(data['premiered'])[0][0]
                episode = '%01d' % int(data['episode'])
                url = '%s/tv-series/%s-season-%01d/watch/' % (self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']))
                url = client.request(url, headers=headers, timeout='10', output='geturl')
                if url == None or url == self.base_link+'/':
                    url = '%s/tv-series/%s-season-%02d/watch/' % (self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']))
                    url = client.request(url, headers=headers, timeout='10', output='geturl')
                if url == None:
                    url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers)

            else:
                episode = None
                year = data['year']
                url = self.searchMovie(data['title'], data['year'], aliases, headers)

            referer = url
            r = client.request(url, headers=headers)

            y = re.findall('Release\s*:\s*.+?\s*(\d{4})', r)[0]

            if not year == y: raise Exception()


            r = client.parseDOM(r, 'div', attrs = {'class': 'les-content'})
            r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a'))
            r = [(i[0], ''.join(re.findall('(\d+)', i[1])[:1])) for i in r]

            if not episode == None:
                r = [i[0] for i in r if '%01d' % int(i[1]) == episode]
            else:
                r = [i[0] for i in r]

            r = [i for i in r if '/server-' in i]

            for u in r:
                try:
                    p = client.request(u, headers=headers, referer=referer, timeout='10')
                    src = re.findall('embed_src\s*:\s*"(.+?)"', p)[0]
                    if src.startswith('//'):
                        src = 'http:'+src
                    if not 'streamdor.co' in src: raise Exception()
                    episodeId = re.findall('streamdor.co.*/video/(.+?)"', p)[0]
                    p = client.request(src, referer=u)
                    try:
                        p = re.findall(r'JuicyCodes.Run\(([^\)]+)', p, re.IGNORECASE)[0]
                        p = re.sub(r'\"\s*\+\s*\"','', p)
                        p = re.sub(r'[^A-Za-z0-9+\\/=]','', p)    
                        p = base64.b64decode(p)                
                        p = jsunpack.unpack(p)
                        p = unicode(p, 'utf-8')
                    except:
                        continue

                    try:
                        url = re.findall(r'embedURL"\s*:\s*"([^"]+)',p)[0]
                        valid, hoster = source_utils.is_host_valid(url, hostDict)
                        if not valid: continue
                        urls, host, direct = source_utils.check_directstreams(url, hoster)
                        for x in urls:
                            sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False})     
                    except:
                        pass
                except:
                    pass

            return sources
        except:
            return sources
예제 #38
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            r = urlparse.urljoin(self.base_link, url)

            result = client.request(r, timeout='10')

            f = client.parseDOM(result, 'iframe', ret='src')
            f = [i for i in f if 'iframe' in i][0]

            result = client.request(f, headers={'Referer': r}, timeout='10')

            r = client.parseDOM(result, 'div', attrs = {'id': 'botones'})[0]
            r = client.parseDOM(r, 'a', ret='href')
            r = [(i, urlparse.urlparse(i).netloc) for i in r]

            links = []

            for u, h in r:
                if not 'pelispedia' in h: continue

                result = client.request(u, headers={'Referer': f}, timeout='10')

                try:
                    if 'pelispedia' in h: raise Exception()

                    url = re.findall('sources\s*:\s*\[(.+?)\]', result)[0]
                    url = re.findall('file\s*:\s*(?:\"|\')(.+?)(?:\"|\')\s*,\s*label\s*:\s*(?:\"|\')(.+?)(?:\"|\')', url)
                    url = [i[0] for i in url if '720' in i[1]][0]

                    links.append({'source': 'cdn', 'quality': 'HD', 'url': url, 'direct': False})
                except:
                    pass

                try:
                    url = re.findall('sources\s*:\s*\[(.+?)\]', result)[0]
                    url = re.findall('file\s*:\s*(?:\"|\')(.+?)(?:\"|\')', url)

                    for i in url:
                        try:
                            links.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'url': i, 'direct': True})
                        except:
                            pass
                except:
                    pass

                try:
                    post = re.findall('gkpluginsphp.*?link\s*:\s*"([^"]+)', result)[0]
                    post = urllib.urlencode({'link': post})

                    url = urlparse.urljoin(self.base_link, '/gkphp_flv/plugins/gkpluginsphp.php')
                    url = client.request(url, post=post, XHR=True, referer=u, timeout='10')
                    url = json.loads(url)['link']

                    links.append({'source': 'gvideo', 'quality': 'HD', 'url': url, 'direct': True})
                except:
                    pass

                try:
                    post = re.findall('var\s+parametros\s*=\s*"([^"]+)', result)[0]

                    post = urlparse.parse_qs(urlparse.urlparse(post).query)['pic'][0]
                    post = urllib.urlencode({'sou': 'pic', 'fv': '23', 'url': post})

                    url = urlparse.urljoin(self.base_link, '/Pe_Player_Html5/pk/pk_2/plugins/protected.php')
                    url = client.request(url, post=post, XHR=True, timeout='10')
                    url = json.loads(url)[0]['url']

                    links.append({'source': 'cdn', 'quality': 'HD', 'url': url, 'direct': True})
                except:
                    pass

                try:
                    if not jsunpack.detect(result): raise Exception()

                    result = jsunpack.unpack(result)
                    url = re.findall('sources\s*:\s*\[(.+?)\]', result)[0]
                    url = re.findall('file\s*:\s*.*?\'(.+?)\'', url)
                    for i in url:
                        try:
                            i = client.request(i, headers={'Referer': f}, output='geturl', timeout='10')
                            links.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'url': i,
                                          'direct': True})
                        except:
                            pass
                except:
                    pass

                try:
                    post = re.findall('var\s+parametros\s*=\s*"([^"]+)', result)[0]

                    post = urlparse.parse_qs(urlparse.urlparse(post).query)['pic'][0]
                    token = 'eyJjdCI6InZGS3QySm9KRWRwU0k4SzZoZHZKL2c9PSIsIml2IjoiNDRkNmMwMWE0ZjVkODk4YThlYmE2MzU0NDliYzQ5YWEiLCJzIjoiNWU4MGUwN2UwMjMxNDYxOCJ9'
                    post = urllib.urlencode({'sou': 'pic', 'fv': '0', 'url': post, 'token': token})

                    url = urlparse.urljoin(self.player_link, '/template/protected.php')
                    url = client.request(url, post=post, XHR=True, timeout='10')
                    js = json.loads(url)
                    url = [i['url'] for i in js]
                    for i in url:
                        try:
                            i = client.request(i, headers={'Referer': f}, output='geturl', timeout='10')
                            links.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'url': i,
                                          'direct': True})
                        except:
                            pass
                except:
                    pass

            for i in links: sources.append({'source': i['source'], 'quality': i['quality'], 'language': 'en', 'url': i['url'], 'direct': i['direct'], 'debridonly': False})

            return sources
        except:
            return sources
예제 #39
0
파일: watch32.py 프로젝트: helloman37/repo
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['title']

            hdlr = data['year']

            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', title)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)
            r = client.request(url)

            posts = client.parseDOM(r, 'div', attrs={'class': 'video_title'})

            items = []

            for post in posts:
                try:
                    data = dom_parser2.parse_dom(post, 'a', req=['href', 'title'])[0]
                    t = data.content
                    y = re.findall('\((\d{4})\)', data.attrs['title'])[0]
                    qual = data.attrs['title'].split('-')[1]
                    link = data.attrs['href']

                    if not cleantitle.get(t) == cleantitle.get(title): raise Exception()
                    if not y == hdlr: raise Exception()

                    items += [(link, qual)]

                except :
                    pass
            for item in items:
                try:
                    r = client.request(item[0]) if item[0].startswith('http') else client.request(urlparse.urljoin(self.base_link, item[0]))

                    url = re.findall('''frame_url\s*=\s*["']([^']+)['"];''', r, re.DOTALL)[0]
                    url = url if url.startswith('http') else urlparse.urljoin('https://', url)

                    if 'vidlink' in url:
                        ua = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; rv:14.0) Gecko/20100101 Firefox/14.0.1'}
                        html = client.request(url, headers=ua)
                        postID = re.findall("postID\s*=\s*'([^']+)", html)[0]
                        data = {'postID': postID}

                        rid = client.request('https://vidlink.org/embed/update_views', post=data, headers=ua,
                                             referer=url)
                        from resources.lib.modules import jsunpack
                        rid = jsunpack.unpack(rid)
                        playlist = re.findall('''file1=['"](.+?)['"];''', rid)[0]
                        links = client.request(playlist, headers=ua, referer=url)

                        try:
                            sub = re.findall('''URI="/sub/vtt/(\d+)/sub.m3u8",LANGUAGE="el"''', links)[0]
                        except IndexError:
                            sub = re.findall('''URI="/sub/vtt/(\d+)/sub.m3u8",LANGUAGE="en"''', links)[0]
                        sub = 'https://opensubtitles.co/sub/{0}.vtt'.format(sub)

                        pattern = 'RESOLUTION=\d+x(\d{3,4}),SUBTITLES="subs"\s*(/drive.+?.m3u8)'
                        links = re.findall(pattern, links)
                        for quality, link in links:
                            quality = source_utils.get_release_quality(quality, quality)[0]
                            link = 'https://p2p.vidlink.org/' + link.replace('/drive//hls/', 'drive/hls/')
                            sources.append({'source': 'GVIDEO', 'quality': quality, 'language': 'en', 'url': link,
                                            'sub': sub, 'direct': True, 'debridonly': False})

                except :
                    pass

            return sources
        except :
            return sources
예제 #40
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            r = urlparse.urljoin(self.base_link, url)

            result = client.request(r)

            f = client.parseDOM(result, 'iframe', ret='src')
            f = [i for i in f if 'iframe' in i][0]

            result = client.request(f, headers={'Referer': r})

            r = client.parseDOM(result, 'div', attrs={'id': 'botones'})[0]
            r = client.parseDOM(r, 'a', ret='href')
            r = [(i, urlparse.urlparse(i).netloc) for i in r]

            links = []

            for u, h in r:
                if not 'pelispedia' in h and not 'thevideos.tv' in h: continue

                result = client.request(u, headers={'Referer': f})

                try:
                    if 'pelispedia' in h: raise Exception()

                    url = re.findall('sources\s*:\s*\[(.+?)\]', result)[0]
                    url = re.findall(
                        'file\s*:\s*(?:\"|\')(.+?)(?:\"|\')\s*,\s*label\s*:\s*(?:\"|\')(.+?)(?:\"|\')',
                        url)
                    url = [i[0] for i in url if '720' in i[1]][0]

                    links.append({
                        'source': 'cdn',
                        'quality': 'HD',
                        'url': url,
                        'direct': False
                    })
                except:
                    pass

                try:
                    url = re.findall('sources\s*:\s*\[(.+?)\]', result)[0]
                    url = re.findall('file\s*:\s*(?:\"|\')(.+?)(?:\"|\')', url)

                    for i in url:
                        try:
                            links.append({
                                'source':
                                'gvideo',
                                'quality':
                                directstream.googletag(i)[0]['quality'],
                                'url':
                                i,
                                'direct':
                                True
                            })
                        except:
                            pass
                except:
                    pass

                try:
                    post = re.findall('gkpluginsphp.*?link\s*:\s*"([^"]+)',
                                      result)[0]
                    post = urllib.urlencode({'link': post})

                    url = urlparse.urljoin(
                        self.base_link, '/Pe_flsh/plugins/gkpluginsphp.php')
                    url = client.request(url, post=post, XHR=True, referer=u)
                    url = json.loads(url)['link']

                    links.append({
                        'source': 'gvideo',
                        'quality': 'HD',
                        'url': url,
                        'direct': True
                    })
                except:
                    pass

                try:
                    post = re.findall('var\s+parametros\s*=\s*"([^"]+)',
                                      result)[0]

                    post = urlparse.parse_qs(
                        urlparse.urlparse(post).query)['pic'][0]
                    post = urllib.urlencode({
                        'sou': 'pic',
                        'fv': '23',
                        'url': post
                    })

                    url = urlparse.urljoin(
                        self.base_link,
                        '/Pe_Player_Html5/pk/pk_2/plugins/protected.php')
                    url = client.request(url, post=post, XHR=True)
                    url = json.loads(url)[0]['url']

                    links.append({
                        'source': 'cdn',
                        'quality': 'HD',
                        'url': url,
                        'direct': True
                    })
                except:
                    pass

                try:
                    if not jsunpack.detect(result): raise Exception()

                    result = jsunpack.unpack(result)
                    url = re.findall('sources\s*:\s*\[(.+?)\]', result)[0]
                    url = re.findall('file\s*:\s*.*?\'(.+?)\'', url)
                    for i in url:
                        try:
                            i = client.request(i,
                                               headers={'Referer': f},
                                               output='geturl')
                            links.append({
                                'source':
                                'gvideo',
                                'quality':
                                directstream.googletag(i)[0]['quality'],
                                'url':
                                i,
                                'direct':
                                True
                            })
                        except:
                            pass
                except:
                    pass

            for i in links:
                sources.append({
                    'source': i['source'],
                    'quality': i['quality'],
                    'language': 'en',
                    'url': i['url'],
                    'direct': i['direct'],
                    'debridonly': False
                })

            return sources
        except:
            return sources
예제 #41
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            aliases = eval(data['aliases'])
            headers = {}

            if 'tvshowtitle' in data:
                year = re.compile('(\d{4})-(\d{2})-(\d{2})').findall(
                    data['premiered'])[0][0]
                episode = '%01d' % int(data['episode'])
                url = self.searchShow(data['tvshowtitle'], data['season'],
                                      aliases, headers)

            else:
                episode = None
                year = data['year']
                url = self.searchMovie(data['title'], data['year'], aliases,
                                       headers)

            referer = url
            r = client.request(url)
            if episode == None:
                y = re.findall('Released\s*:\s*.+?\s*(\d{4})', r)[0]
                if not year == y: raise Exception()

            r = client.parseDOM(r, 'div', attrs={'class': 'sli-name'})
            r = zip(client.parseDOM(r, 'a', ret='href'),
                    client.parseDOM(r, 'a'))

            if not episode == None:
                r = [
                    i[0] for i in r
                    if i[1].lower().startswith('episode %02d:' %
                                               int(data['episode']))
                ]
            else:
                r = [i[0] for i in r]

            for u in r:
                try:
                    p = client.request(u, referer=referer, timeout='10')
                    src = re.findall('src\s*=\s*"(.*streamdor.co/video/\d+)"',
                                     p)[0]
                    if src.startswith('//'):
                        src = 'http:' + src
                    episodeId = re.findall('.*streamdor.co/video/(\d+)',
                                           src)[0]
                    p = client.request(src, referer=u)
                    try:
                        p = re.findall(r'JuicyCodes.Run\(([^\)]+)', p,
                                       re.IGNORECASE)[0]
                        p = re.sub(r'\"\s*\+\s*\"', '', p)
                        p = re.sub(r'[^A-Za-z0-9+\\/=]', '', p)
                        p = base64.b64decode(p)
                        p = jsunpack.unpack(p)
                        p = unicode(p, 'utf-8')
                    except:
                        continue

                    try:
                        fl = re.findall(r'file"\s*:\s*"([^"]+)', p)[0]
                        post = {
                            'episodeID': episodeId,
                            'file': fl,
                            'subtitle': 'false',
                            'referer': urllib.quote_plus(u)
                        }
                        p = client.request(self.source_link,
                                           post=post,
                                           referer=src,
                                           XHR=True)
                        js = json.loads(p)
                        src = js['sources']
                        p = client.request('http:' + src, referer=src)
                        js = json.loads(p)[0]

                        try:
                            ss = js['sources']
                            ss = [(i['file'], i['label']) for i in ss
                                  if 'file' in i]

                            for i in ss:
                                try:
                                    sources.append({
                                        'source':
                                        'CDN',
                                        'quality':
                                        source_utils.label_to_quality(i[1]),
                                        'language':
                                        'en',
                                        'url':
                                        i[0],
                                        'direct':
                                        True,
                                        'debridonly':
                                        False
                                    })
                                except:
                                    pass
                        except:
                            pass
                    except:
                        url = re.findall(r'embedURL"\s*:\s*"([^"]+)', p)[0]
                        valid, hoster = source_utils.is_host_valid(
                            url, hostDict)
                        if not valid: continue
                        urls, host, direct = source_utils.check_directstreams(
                            url, hoster)
                        for x in urls:
                            sources.append({
                                'source': host,
                                'quality': 'SD',
                                'language': 'en',
                                'url': x['url'],
                                'direct': direct,
                                'debridonly': False
                            })

                except:
                    pass

            return sources
        except:
            return sources
예제 #42
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url, output='extended')

            headers = r[3]
            headers.update({'Cookie': r[2].get('Set-Cookie'), 'Referer': self.base_link})
            r = r[0]

            rels = dom_parser.parse_dom(r, 'nav', attrs={'class': 'player'})
            rels = dom_parser.parse_dom(rels, 'ul', attrs={'class': 'idTabs'})
            rels = dom_parser.parse_dom(rels, 'li')
            rels = [(dom_parser.parse_dom(i, 'a', attrs={'class': 'options'}, req='href'), dom_parser.parse_dom(i, 'img', req='src')) for i in rels]
            rels = [(i[0][0].attrs['href'][1:], re.findall('/flags/(\w+)\.png$', i[1][0].attrs['src'])) for i in rels if i[0] and i[1]]
            rels = [i[0] for i in rels if len(i[1]) > 0 and i[1][0].lower() == 'de']

            r = [dom_parser.parse_dom(r, 'div', attrs={'id': i}) for i in rels]

            links = re.findall('''(?:link|file)["']?\s*:\s*["'](.+?)["']''', ''.join([i[0].content for i in r]))
            links += [l.attrs['src'] for i in r for l in dom_parser.parse_dom(i, 'iframe', attrs={'class': 'metaframe'}, req='src')]
            links += [l.attrs['src'] for i in r for l in dom_parser.parse_dom(i, 'source', req='src')]

            for i in links:
                try:
                    i = re.sub('\[.+?\]|\[/.+?\]', '', i)
                    i = client.replaceHTMLCodes(i)

                    if '/play/' in i: i = urlparse.urljoin(self.base_link, i)

                    if self.domains[0] in i:
                        i = client.request(i, headers=headers, referer=url)

                        for x in re.findall('''\(["']?(.*)["']?\)''', i):
                            try: i += jsunpack.unpack(base64.decodestring(re.sub('"\s*\+\s*"', '', x))).replace('\\', '')
                            except: pass

                        for x in re.findall('(eval\s*\(function.*?)</script>', i, re.DOTALL):
                            try: i += jsunpack.unpack(x).replace('\\', '')
                            except: pass

                        links = [(match[0], match[1]) for match in re.findall('''['"]?file['"]?\s*:\s*['"]([^'"]+)['"][^}]*['"]?label['"]?\s*:\s*['"]([^'"]*)''', i, re.DOTALL)]
                        links = [(x[0].replace('\/', '/'), source_utils.label_to_quality(x[1])) for x in links if '/no-video.mp4' not in x[0]]

                        doc_links = [directstream.google('https://drive.google.com/file/d/%s/view' % match) for match in re.findall('''file:\s*["'](?:[^"']+youtu.be/([^"']+))''', i, re.DOTALL)]
                        doc_links = [(u['url'], u['quality']) for x in doc_links if x for u in x]
                        links += doc_links

                        for url, quality in links:
                            if self.base_link in url:
                                url = url + '|Referer=' + self.base_link

                            sources.append({'source': 'gvideo', 'quality': quality, 'language': 'de', 'url': url, 'direct': True, 'debridonly': False})
                    else:
                        try:
                            # as long as resolveurl get no Update for this URL (So just a Temp-Solution)
                            did = re.findall('youtube.googleapis.com.*?docid=(\w+)', i)
                            if did: i = 'https://drive.google.com/file/d/%s/view' % did[0]

                            valid, host = source_utils.is_host_valid(i, hostDict)
                            if not valid: continue

                            urls, host, direct = source_utils.check_directstreams(i, host)

                            for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'de', 'url': x['url'], 'direct': direct, 'debridonly': False})
                        except:
                            pass
                except:
                    pass

            return sources
        except:
            return sources
예제 #43
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            quality = client.parseDOM(r,
                                      'div',
                                      attrs={'class': 'poster-qulabel'})[0]
            quality = quality.lower()
            if 'cam' in quality or 'ts' in quality: raise Exception()

            ref = client.parseDOM(r, 'iframe', ret='src')[0]

            r = client.request(ref, referer=url)

            s = re.compile('<script>(.+?)</script>', re.DOTALL).findall(r)

            for i in s:
                try:
                    r += jsunpack.unpack(i)
                except:
                    pass

            streams = client.parseDOM(r, 'source', ret='src')

            links = client.parseDOM(r, 'li', ret='onclick')
            links = [re.findall('\'(.+?\d+)', i) for i in links]
            links = [i[0] for i in links if i]

            for link in links:
                try:
                    link = ref + "?source=%s" % link

                    r = client.request(link, referer=link)
                    s = re.compile('<script>(.+?)</script>',
                                   re.DOTALL).findall(r)
                    for i in s:
                        try:
                            r += jsunpack.unpack(i)
                        except:
                            pass

                    streams += client.parseDOM(r, 'source', ret='src')
                except:
                    pass

            streams = [
                x for y, x in enumerate(streams) if x not in streams[:y]
            ]

            for i in streams:
                try:
                    sources.append({
                        'source':
                        'gvideo',
                        'quality':
                        directstream.googletag(i)[0]['quality'],
                        'provider':
                        'Moviego',
                        'url':
                        i,
                        'direct':
                        True,
                        'debridonly':
                        False
                    })
                except:
                    pass

            return sources
        except:
            return sources
예제 #44
0
    def sources(self, url, hostDict, locDict):
        sources = []

        try:
            if url == None: return sources
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            aliases = eval(data['aliases'])
            #cookie = '; approve_search=yes'
            query = self.search_link % (urllib.quote_plus(title))
            query = urlparse.urljoin(self.base_link, query)
            result = client.request(query) #, cookie=cookie)
            try:

                if 'episode' in data:
                    r = client.parseDOM(result, 'div', attrs={'class': 'ml-item'})
                    r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
                    r = [(i[0], i[1], re.findall('(.*?)\s+-\s+Season\s+(\d+)', i[1])) for i in r]
                    r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0]
                    url = [i[0] for i in r if self.matchAlias(i[2][0], aliases) and i[2][1] == data['season']][0]

                    url = '%swatch' % url
                    result = client.request(url)

                    url = re.findall('a href=\"(.+?)\" class=\"btn-eps first-ep \">Episode %02d' % int(data['episode']), result)[0]

                else:
                    r = client.parseDOM(result, 'div', attrs={'class': 'ml-item'})
                    r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
                    results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r]
                    try:
                        r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
                        url = [i[0] for i in r if self.matchAlias(i[1], aliases) and (year == i[2])][0]
                    except:
                        url = None
                        pass

                    if (url == None):
                        url = [i[0] for i in results if self.matchAlias(i[1], aliases)][0]
                    url = '%s/watch' % url

                url = client.request(url, output='geturl')
                if url == None: raise Exception()

            except:
              return sources

            url = url if 'http' in url else urlparse.urljoin(self.base_link, url)
            result = client.request(url)
            src = re.findall('src\s*=\s*"(.*streamdor.co\/video\/\d+)"', result)[0]
            if src.startswith('//'):
                src = 'http:'+src
            episodeId = re.findall('.*streamdor.co/video/(\d+)', src)[0]
            p = client.request(src, referer=url)
            try:
                p = re.findall(r'JuicyCodes.Run\(([^\)]+)', p, re.IGNORECASE)[0]
                p = re.sub(r'\"\s*\+\s*\"','', p)
                p = re.sub(r'[^A-Za-z0-9+\\/=]','', p)
                p = base64.b64decode(p)
                p = jsunpack.unpack(p)
                p = unicode(p, 'utf-8')

                post = {'id': episodeId}
                p2 = client.request('https://embed.streamdor.co/token.php?v=5', post=post, referer=src, XHR=True)
                js = json.loads(p2)
                tok = js['token']
                quali = 'SD'
                try:
                    quali = re.findall(r'label:"(.*?)"',p)[0]
                except:
                    pass
                p = re.findall(r'var\s+episode=({[^}]+});',p)[0]
                js = json.loads(p)
                ss = []

                #if 'eName' in js and js['eName'] != '':
                #    quali = source_utils.label_to_quality(js['eName'])
                if 'fileEmbed' in js and js['fileEmbed'] != '':
                    ss.append([js['fileEmbed'], quali])
                if 'fileHLS' in js and js['fileHLS'] != '':
                    ss.append(['https://hls.streamdor.co/%s%s'%(tok, js['fileHLS']), quali])
            except:
                return sources

            for link in ss:

                try:
                    if 'google' in url:
                        valid, hoster = source_utils.is_host_valid(url, hostDict)
                        urls, host, direct = source_utils.check_directstreams(url, hoster)
                        for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False})

                    else:
                        try:
                            valid, hoster = source_utils.is_host_valid(link[0], hostDict)
                            direct = False
                            if not valid:
                                hoster = 'CDN'
                                direct = True
                            sources.append({'source': hoster, 'quality': link[1], 'language': 'en', 'url': link[0], 'direct': direct, 'debridonly': False})
                        except: pass

                except:
                    pass

            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('CMoviesHD - Exception: \n' + str(failure))
            return sources
예제 #45
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['title']

            hdlr = data['year']

            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', title)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)
            r = client.request(url)

            posts = client.parseDOM(r, 'div', attrs={'class': 'video_title'})

            items = []

            for post in posts:
                try:
                    data = dom_parser2.parse_dom(post,
                                                 'a',
                                                 req=['href', 'title'])[0]
                    t = data.content
                    y = re.findall('\((\d{4})\)', data.attrs['title'])[0]
                    qual = data.attrs['title'].split('-')[1]
                    link = data.attrs['href']

                    if not cleantitle.get(t) == cleantitle.get(title):
                        raise Exception()
                    if not y == hdlr:
                        raise Exception()

                    items += [(link, qual)]

                except Exception:
                    pass
            for item in items:
                try:
                    r = client.request(item[0]) if item[0].startswith(
                        'http') else client.request(
                            urlparse.urljoin(self.base_link, item[0]))

                    qual = client.parseDOM(r, 'h1')[0]
                    # quality = source_utils.get_release_quality(item[1], qual)[0]

                    url = re.findall('''frame_url\s*=\s*["']([^']+)['"]\;''',
                                     r, re.DOTALL)[0]
                    url = url if url.startswith('http') else urlparse.urljoin(
                        'https://', url)

                    ua = {
                        'User-Agent':
                        'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0'
                    }

                    postID = url.split('/embed/')[1]
                    post_link = 'https://vidlink.org/embed/update_views'
                    payload = {'postID': postID}
                    headers = ua
                    headers['X-Requested-With'] = 'XMLHttpRequest'
                    headers['Referer'] = url

                    ihtml = client.request(post_link,
                                           post=payload,
                                           headers=headers)
                    linkcode = jsunpack.unpack(ihtml).replace('\\', '')
                    try:
                        extra_link = re.findall(r'var oploadID="(.+?)"',
                                                linkcode)[0]
                        oload = 'https://openload.co/embed/' + extra_link
                        sources.append({
                            'source': 'openload.co',
                            'quality': '1080p',
                            'language': 'en',
                            'url': oload,
                            'direct': False,
                            'debridonly': False
                        })

                    except Exception:
                        pass

                    give_me = re.findall(r'var file1="(.+?)"', linkcode)[0]
                    stream_link = give_me.split('/pl/')[0]
                    headers = {
                        'Referer':
                        'https://vidlink.org/',
                        'User-Agent':
                        'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0'
                    }
                    r = client.request(give_me, headers=headers)
                    my_links = re.findall(
                        r'[A-Z]{10}=\d+x(\d+)\W[A-Z]+=\"\w+\"\s+\/(.+?)\.', r)
                    for quality_bitches, link in my_links:

                        if '1080' in quality_bitches:
                            quality = '1080p'
                        elif '720' in quality_bitches:
                            quality = '720p'
                        elif '480' in quality_bitches:
                            quality = 'SD'
                        elif '360' in quality_bitches:
                            quality = 'SD'
                        else:
                            quality = 'SD'

                        final = stream_link + '/' + link + '.m3u8'
                        sources.append({
                            'source': 'GVIDEO',
                            'quality': quality,
                            'language': 'en',
                            'url': final,
                            'direct': True,
                            'debridonly': False
                        })

                except Exception:
                    pass

            return sources
        except Exception:
            return sources
예제 #46
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            headers = eval(data['headers'])

            if 'tvshowtitle' in data:
                url = '%s/episodes/%s-%01dx%01d/' % (self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), int(data['episode']))
                year = re.findall('(\d{4})', data['premiered'])[0]
                url = client.request(url, headers=headers, output='geturl')
                if url == None: raise Exception()

                r = client.request(url, headers=headers)

                y = client.parseDOM(r, 'span', attrs = {'class': 'date'})[0]

                y = re.findall('(\d{4})', y)[0]
                if not y == year: raise Exception()

            else:
                url = data['url']
                url = client.request(url, headers=headers, output='geturl')
                if url == None: raise Exception()

                r = client.request(url, headers=headers)


            try:
                result = re.findall('sources\s*:\s*\[(.+?)\]', r)[0]
                r = re.findall('"file"\s*:\s*"(.+?)"', result)

                for url in r:
                    try:
                        url = url.replace('\\', '')
                        url = directstream.googletag(url)[0]
                        sources.append({'source': 'gvideo', 'quality': url['quality'], 'language': 'en', 'url': url['url'], 'direct': True, 'debridonly': False})
                    except:
                        pass
            except:
                pass

            links = client.parseDOM(r, 'iframe', ret='src')

            for link in links:
                try:
                    if 'openload.io' in link or 'openload.co' in link or 'oload.tv' in link:
                        sources.append(
                            {'source': 'openload.co', 'quality': 'HD', 'language': 'en', 'url': link, 'direct': False,
                             'debridonly': False})
                        raise Exception()
                except:
                    pass

                try:
                    url = link.replace('\/', '/')
                    url = client.replaceHTMLCodes(url)
                    url = 'http:' + url if url.startswith('//') else url
                    url = url.encode('utf-8')

                    if not '/play/' in url: raise Exception()

                    r = client.request(url, headers=headers, timeout='10')

                    s = re.compile('<script type="text/javascript">(.+?)</script>', re.DOTALL).findall(r)

                    for i in s:
                        try:
                            r += jsunpack.unpack(i)
                        except:
                            pass

                    try:
                        result = re.findall('sources\s*:\s*\[(.+?)\]', r)[0]
                        r = re.findall('"file"\s*:\s*"(.+?)"', result)

                        for url in r:
                            try:
                                url = url.replace('\\', '')
                                url = directstream.googletag(url)[0]
                                sources.append({'source': 'gvideo', 'quality': url['quality'], 'language': 'en', 'url': url['url'], 'direct': True, 'debridonly': False})
                            except:
                                pass
                    except:
                        pass
                except:
                    pass

            return sources
        except:
            return sources
예제 #47
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            if 'tvshowtitle' in data:
                url = '%s/episodes/%s-%01dx%01d/' % (
                    self.base_link, cleantitle.geturl(data['tvshowtitle']),
                    int(data['season']), int(data['episode']))
                year = re.findall('(\d{4})', data['premiered'])[0]
                url = client.request(url, output='geturl')
                if url == None: raise Exception()

                r = client.request(url)

                y = client.parseDOM(r, 'span', attrs={'class': 'date'})[0]

                y = re.findall('(\d{4})', y)[0]
                if not y == year: raise Exception()

            else:
                url = client.request(url, output='geturl')
                if url == None: raise Exception()
                ref = url
                r = client.request(url)

            try:
                result = re.findall('sources\s*:\s*\[(.+?)\]', r)[0]
                r = re.findall('"file"\s*:\s*"(.+?)"', result)

                for url in r:
                    try:
                        url = url.replace('\\', '')
                        url = directstream.googletag(url)[0]
                        sources.append({
                            'source': 'gvideo',
                            'quality': url['quality'],
                            'language': 'en',
                            'url': url['url'],
                            'direct': True,
                            'debridonly': False
                        })
                    except:
                        pass
            except:
                pass

            links = client.parseDOM(r, 'iframe', ret='src')
            q = re.findall(r'class="qualityx">([^<]+)', r)[0] if re.search(
                r'class="qualityx">([^<]+)', r) != None else 'SD'
            q = source_utils.get_release_quality(q)[0]

            for link in links:
                try:
                    if 'openload.io' in link or 'openload.co' in link or 'oload.tv' in link:
                        sources.append({
                            'source': 'openload.co',
                            'quality': 'SD',
                            'language': 'en',
                            'url': link,
                            'direct': False,
                            'debridonly': False
                        })
                        raise Exception()
                    if re.search(r'^((?!youtube).)*embed.*$', link) == None:
                        values = re.findall(
                            r'nonces":{"ajax_get_video_info":"(\w+)".*?data-servers="(\d+)"\s+data-ids="([^"]+)',
                            r, re.DOTALL)
                        post = urllib.urlencode({
                            'action': 'ajax_get_video_info',
                            'ids': values[0][2],
                            'server': values[0][1],
                            'nonce': values[0][0]
                        })
                        r = client.request(
                            urlparse.urljoin(self.base_link, self.post_link),
                            post=post,
                            headers={
                                'Referer': ref,
                                'X-Requested-With': 'XMLHttpRequest',
                                'Accept-Encoding': 'gzip, deflate'
                            })
                    else:
                        r = client.request(link)

                    links = re.findall(
                        r'((?:{"file.*?})|(?:\/embed\/[^\']+))\'\s+id="(\d+)',
                        r)
                    strm_urls = re.findall(r'(https?.*-)\d+\.mp\w+', r)

                    for i in links:
                        try:
                            try:
                                i = json.loads(i[0])
                                url = i['file']
                                q = source_utils.label_to_quality(i['label'])
                            except:

                                url = '%s%s.mp4' % (strm_urls[0], i[1])
                                q = source_utils.label_to_quality(i[1])

                            if 'google' in url:
                                valid, hoster = source_utils.is_host_valid(
                                    url, hostDict)
                                urls, host, direct = source_utils.check_directstreams(
                                    url, hoster)
                                for x in urls:
                                    sources.append({
                                        'source': host,
                                        'quality': x['quality'],
                                        'language': 'en',
                                        'url': x['url'],
                                        'direct': direct,
                                        'debridonly': False
                                    })

                            else:
                                valid, hoster = source_utils.is_host_valid(
                                    url, hostDict)
                                if not valid:
                                    sources.append({
                                        'source': 'CDN',
                                        'quality': q,
                                        'language': 'en',
                                        'url': url,
                                        'direct': True,
                                        'debridonly': False
                                    })
                                    continue
                                else:
                                    sources.append({
                                        'source': hoster,
                                        'quality': q,
                                        'language': 'en',
                                        'url': url,
                                        'direct': False,
                                        'debridonly': False
                                    })

                        except:
                            pass

                except:
                    pass

                try:
                    url = link.replace('\/', '/')
                    url = client.replaceHTMLCodes(url)
                    url = 'http:' + url if url.startswith('//') else url
                    url = url.encode('utf-8')

                    if not '/play/' in url: raise Exception()

                    r = client.request(url, timeout='10')

                    s = re.compile(
                        '<script type="text/javascript">(.+?)</script>',
                        re.DOTALL).findall(r)

                    for i in s:
                        try:
                            r += jsunpack.unpack(i)
                        except:
                            pass

                    try:
                        result = re.findall('sources\s*:\s*\[(.+?)\]', r)[0]
                        r = re.findall('"file"\s*:\s*"(.+?)"', result)

                        for url in r:
                            try:
                                url = url.replace('\\', '')
                                url = directstream.googletag(url)[0]
                                sources.append({
                                    'source': 'gvideo',
                                    'quality': url['quality'],
                                    'language': 'en',
                                    'url': url['url'],
                                    'direct': True,
                                    'debridonly': False
                                })
                            except:
                                pass
                    except:
                        pass
                except:
                    pass

            return sources
        except:
            return sources
예제 #48
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            rels = dom_parser.parse_dom(r, 'nav', attrs={'class': 'player'})
            rels = dom_parser.parse_dom(rels, 'ul', attrs={'class': 'idTabs'})
            rels = dom_parser.parse_dom(rels, 'li')
            rels = [(dom_parser.parse_dom(i, 'a', attrs={'class': 'options'}, req='href'), dom_parser.parse_dom(i, 'img', req='src')) for i in rels]
            rels = [(i[0][0].attrs['href'][1:], re.findall('\/flags\/(\w+)\.png$', i[1][0].attrs['src'])) for i in rels if i[0] and i[1]]
            rels = [i[0] for i in rels if len(i[1]) > 0 and i[1][0].lower() == 'de']

            r = [dom_parser.parse_dom(r, 'div', attrs={'id': i}) for i in rels]

            links = re.findall('''(?:link|file)["']?\s*:\s*["'](.+?)["']''', ''.join([i[0].content for i in r]))
            links += [l.attrs['src'] for i in r for l in dom_parser.parse_dom(i, 'iframe', attrs={'class': 'metaframe'}, req='src')]
            links += [l.attrs['src'] for i in r for l in dom_parser.parse_dom(i, 'source', req='src')]

            for i in links:
                try:
                    i = re.sub('\[.+?\]|\[/.+?\]', '', i)
                    i = client.replaceHTMLCodes(i)

                    if self.domains[0] in i:
                        i = client.request(i, referer=url)

                        for x in re.findall('''\(["']?(.*)["']?\)''', i):
                            try: i += jsunpack.unpack(base64.decodestring(re.sub('"\s*\+\s*"', '', x)))
                            except: pass

                        s = re.compile('(eval\(function.*?)</script>', re.DOTALL).findall(i)

                        for x in s:
                            try: i += jsunpack.unpack(x)
                            except: pass

                        i = [(match[0], match[1]) for match in re.findall('''['"]?file['"]?\s*:\s*['"]([^'"]+)['"][^}]*['"]?label['"]?\s*:\s*['"]([^'"]*)''', i, re.DOTALL)]
                        i = [(x[0].replace('\/', '/'), source_utils.label_to_quality(x[1])) for x in i if '/no-video.mp4' not in x[0]]

                        for url, quality in i:
                            sources.append({'source': 'gvideo', 'quality': quality, 'language': 'de', 'url': url, 'direct': True, 'debridonly': False})
                    else:
                        try:
                            valid, host = source_utils.is_host_valid(i, hostDict)
                            if not valid: continue

                            urls = []
                            if 'google' in i: host = 'gvideo'; direct = True; urls = directstream.google(i);
                            if 'google' in i and not urls and directstream.googletag(i):  host = 'gvideo'; direct = True; urls = [{'quality': directstream.googletag(i)[0]['quality'], 'url': i}]
                            elif 'ok.ru' in i: host = 'vk'; direct = True; urls = directstream.odnoklassniki(i)
                            elif 'vk.com' in i: host = 'vk'; direct = True; urls = directstream.vk(i)
                            else: direct = False; urls = [{'quality': 'SD', 'url': i}]

                            for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'de', 'url': x['url'], 'direct': direct, 'debridonly': False})
                        except:
                            pass
                except:
                    pass

            return sources
        except:
            return sources
예제 #49
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)
            r = dom_parser.parse_dom(r, 'div', {'class': 'repro'})

            r = dom_parser.parse_dom(r[0].content, 'iframe', req='src')
            f = r[0].attrs['src']

            r = client.request(f)
            r = dom_parser.parse_dom(r, 'div', {'id': 'botones'})
            r = dom_parser.parse_dom(r, 'a', req='href')
            r = [(i.attrs['href'], urlparse.urlparse(i.attrs['href']).netloc)
                 for i in r]

            links = []

            for u, h in r:
                if not 'pelispedia' in h:
                    valid, host = source_utils.is_host_valid(u, hostDict)
                    if not valid: continue

                    links.append({
                        'source': host,
                        'quality': 'SD',
                        'url': u,
                        'direct': False
                    })
                    continue

                result = client.request(u,
                                        headers={'Referer': f},
                                        timeout='10')

                try:
                    if 'pelispedia' in h: raise Exception()

                    url = re.findall('sources\s*:\s*\[(.+?)\]', result)[0]
                    url = re.findall(
                        'file\s*:\s*(?:\"|\')(.+?)(?:\"|\')\s*,\s*label\s*:\s*(?:\"|\')(.+?)(?:\"|\')',
                        url)
                    url = [i[0] for i in url if '720' in i[1]][0]

                    links.append({
                        'source': 'cdn',
                        'quality': 'HD',
                        'url': url,
                        'direct': False
                    })
                except:
                    pass

                try:
                    url = re.findall('sources\s*:\s*\[(.+?)\]', result)[0]
                    url = re.findall('file\s*:\s*(?:\"|\')(.+?)(?:\"|\')', url)

                    for i in url:
                        try:
                            links.append({
                                'source':
                                'gvideo',
                                'quality':
                                directstream.googletag(i)[0]['quality'],
                                'url':
                                i,
                                'direct':
                                True
                            })
                        except:
                            pass
                except:
                    pass

                try:
                    post = re.findall('gkpluginsphp.*?link\s*:\s*"([^"]+)',
                                      result)[0]
                    post = urllib.urlencode({'link': post})

                    url = urlparse.urljoin(
                        self.base_link, '/gkphp_flv/plugins/gkpluginsphp.php')
                    url = client.request(url,
                                         post=post,
                                         XHR=True,
                                         referer=u,
                                         timeout='10')
                    url = json.loads(url)['link']

                    links.append({
                        'source': 'gvideo',
                        'quality': 'HD',
                        'url': url,
                        'direct': True
                    })
                except:
                    pass

                try:
                    post = re.findall('var\s+parametros\s*=\s*"([^"]+)',
                                      result)[0]

                    post = urlparse.parse_qs(
                        urlparse.urlparse(post).query)['pic'][0]
                    post = urllib.urlencode({
                        'sou': 'pic',
                        'fv': '25',
                        'url': post
                    })

                    url = client.request(self.protect_link,
                                         post=post,
                                         XHR=True,
                                         timeout='10')
                    url = json.loads(url)[0]['url']

                    links.append({
                        'source': 'cdn',
                        'quality': 'HD',
                        'url': url,
                        'direct': True
                    })
                except:
                    pass

                try:
                    if not jsunpack.detect(result): raise Exception()

                    result = jsunpack.unpack(result)
                    url = re.findall('sources\s*:\s*\[(.+?)\]', result)[0]
                    url = re.findall('file\s*:\s*.*?\'(.+?)\'', url)
                    for i in url:
                        try:
                            i = client.request(i,
                                               headers={'Referer': f},
                                               output='geturl',
                                               timeout='10')
                            links.append({
                                'source':
                                'gvideo',
                                'quality':
                                directstream.googletag(i)[0]['quality'],
                                'url':
                                i,
                                'direct':
                                True
                            })
                        except:
                            pass
                except:
                    pass

                try:
                    post = re.findall('var\s+parametros\s*=\s*"([^"]+)',
                                      result)[0]

                    post = urlparse.parse_qs(
                        urlparse.urlparse(post).query)['pic'][0]
                    token = 'eyJjdCI6InZGS3QySm9KRWRwU0k4SzZoZHZKL2c9PSIsIml2IjoiNDRkNmMwMWE0ZjVkODk4YThlYmE2MzU0NDliYzQ5YWEiLCJzIjoiNWU4MGUwN2UwMjMxNDYxOCJ9'
                    post = urllib.urlencode({
                        'sou': 'pic',
                        'fv': '0',
                        'url': post,
                        'token': token
                    })

                    url = client.request(self.protect_link,
                                         post=post,
                                         XHR=True,
                                         timeout='10')
                    js = json.loads(url)
                    url = [i['url'] for i in js]
                    for i in url:
                        try:
                            i = client.request(i,
                                               headers={'Referer': f},
                                               output='geturl',
                                               timeout='10')
                            links.append({
                                'source':
                                'gvideo',
                                'quality':
                                directstream.googletag(i)[0]['quality'],
                                'url':
                                i,
                                'direct':
                                True
                            })
                        except:
                            pass
                except:
                    pass

            for i in links:
                sources.append({
                    'source': i['source'],
                    'quality': i['quality'],
                    'language': 'en',
                    'url': i['url'],
                    'direct': i['direct'],
                    'debridonly': False
                })

            return sources
        except:
            return sources