コード例 #1
0
	def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_options=None, key=None, testing=False):
		try:
			sources = []
			if control.setting('Provider-%s' % name) == False:
				log('INFO','get_sources','Provider Disabled by User')
				log('INFO', 'get_sources', 'Completed')
				return sources

			if url == None: 
				log('FAIL','get_sources','url == None. Could not find a matching title: %s' % cleantitle.title_from_key(key), dolog=not testing)
				log('INFO', 'get_sources', 'Completed')
				return sources

			links_m = []
			TYPES_QUAL = {'SD':'480p', '3D SD':'480p', '3D FullHD':'1080p'}
			TYPES_RIP = {'SD':'BRRIP', '3D SD':'3D-BRRIP', '3D FullHD':'3D-BRRIP'}
			
			for data_j in url:
				try:
					file = data_j['file']
					page = data_j['page']
					poster = data_j['poster']
					label = data_j['label']
					sub_url = data_j['srt']
					qual = '480p'
					riptype = 'BRRIP'
					if label in TYPES_QUAL.keys():
						qual = TYPES_QUAL[label]
						riptype = TYPES_RIP[label]
				
					headers = {'Referer': page, 'User-Agent': self.user_agent}
					try:
						l = resolvers.createMeta(file, self.name, self.logo, qual, [], key, riptype, testing=testing, sub_url=sub_url, headers=headers, poster=poster)
						for ll in l:
							if ll != None and 'key' in ll.keys():
								links_m.append(ll)
					except:
						pass
					
					if testing == True and len(links_m) > 0:
						break
				except:
					pass
					
			for l in links_m:
				if l != None and 'key' in l.keys():
					sources.append(l)

			if len(sources) == 0:
				log('FAIL','get_sources','Could not find a matching title: %s' % cleantitle.title_from_key(key))
			else:
				log('SUCCESS', 'get_sources','%s sources : %s' % (cleantitle.title_from_key(key), len(sources)))
				
			log('INFO', 'get_sources', 'Completed')
			
			return sources
		except Exception as e:
			log('ERROR', 'get_sources', '%s' % e)
			log('INFO', 'get_sources', 'Completed')
			return sources
コード例 #2
0
    def get_sources(self,
                    url,
                    hosthdDict=None,
                    hostDict=None,
                    locDict=None,
                    proxy_options=None,
                    key=None,
                    testing=False):
        try:
            sources = []
            if control.setting('Provider-%s' % name) == False:
                log('INFO', 'get_sources', 'Provider Disabled by User')
                return sources
            if url == None:
                log('FAIL',
                    'get_sources',
                    'url == None. Could not find a matching title: %s' %
                    cleantitle.title_from_key(key),
                    dolog=not testing)
                return sources

            links_m = []
            for data in url:
                try:
                    links_m = resolvers.createMeta(data['link'],
                                                   self.name,
                                                   self.logo,
                                                   data['qual'],
                                                   links_m,
                                                   key,
                                                   poster=data['poster'],
                                                   riptype=data['rip'],
                                                   testing=testing)
                    if testing == True and len(links_m) > 0:
                        break
                except:
                    pass

            for l in links_m:
                sources.append(l)

            if len(sources) == 0:
                log(
                    'FAIL', 'get_sources',
                    'Could not find a matching title: %s' %
                    cleantitle.title_from_key(key))
                return sources

            log('SUCCESS',
                'get_sources',
                '%s sources : %s' %
                (cleantitle.title_from_key(key), len(sources)),
                dolog=not testing)
            return sources
        except Exception as e:
            log('ERROR', 'get_sources', '%s' % e, dolog=not testing)
            return sources
コード例 #3
0
ファイル: __init__.py プロジェクト: Jtl12/FMoviesPlus.bundle
	def executeThreads(self, key):
		
		try:
			title = cleantitle.title_from_key(key)
			log(type='INFO', err='Starting threads : %s' % title)
			while key in self.threadSlots:
				for s in self.threadSlots[key]:
					active = 0
					done = 0
					idle = 0
					for s1 in self.threadSlots[key]:
						if s1['status'] == 'active':
							active += 1
						if s1['status'] == 'done':
							done += 1
						if s1['status'] == 'idle':
							idle += 1
							
					if done == len(self.threadSlots[key]):
						log(type='INFO', err='Threads completed ! : %s' % title)
						control.savePermStore()
						return
							
					if s['status'] == 'idle' and active < int(control.setting('control_concurrent_src_threads')):
						s['thread'].start()
						s['status'] = 'active'
						s['s_time'] = time.time()
						log(type='INFO', err='Starting thread : %s > %s' % (title, s['source']))
					
					time.sleep(0.1)
				time.sleep(1.0)
		except Exception as e:
			log(type='ERROR', err='Thread Title %s - %s' % (title,e))
		control.savePermStore()
コード例 #4
0
	def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_options=None, key=None, testing=False):
		try:
			sources = []
			if control.setting('Provider-%s' % name) == False:
				log('INFO','get_sources','Provider Disabled by User')
				log('INFO', 'get_sources', 'Completed')
				return sources
			if url == None: 
				log('FAIL','get_sources','url == None. Could not find a matching title: %s' % cleantitle.title_from_key(key), dolog=not testing)
				log('INFO', 'get_sources', 'Completed')
				return sources

			links_m = []
			data = self.flix.get_sources(url)
			for d in data:
				vidurl = d['url']
				quality = d['quality']
				poster = d['poster']
				headers = {'Referer':url}
				try:
					if client.geturlhost(vidurl) not in self.avoidHosts:
						l = resolvers.createMeta(vidurl, self.name, self.logo, quality, [], key, poster=poster, testing=testing, headers=headers)
						for ll in l:
							if ll != None and 'key' in ll.keys():
								links_m.append(ll)
						
						if testing == True:
							break
				except Exception as e:
					log('ERROR', 'get_sources-0', '%s' % e, dolog=not testing)	
					
			for l in links_m:
				if l != None and 'key' in l.keys():
					sources.append(l)
			
			if len(sources) == 0:
				log('FAIL','get_sources','Could not find a matching title: %s' % cleantitle.title_from_key(key))
			else:
				log('SUCCESS', 'get_sources','%s sources : %s' % (cleantitle.title_from_key(key), len(sources)))
				
			log('INFO', 'get_sources', 'Completed')
			
			return sources
		except Exception as e:
			log('ERROR', 'get_sources', '%s' % e)
			log('INFO', 'get_sources', 'Completed')
			return sources
コード例 #5
0
 def executeThreadsStatus(self, key, thread):
     tuid = control.id_generator(16)
     try:
         title = cleantitle.title_from_key(key)
         control.AddThread('executeThreadsStatus',
                           'Provider Search Manage Thread: %s' % title,
                           time.time(), '1', False, tuid, thread)
         while thread != None and thread.isAlive():
             time.sleep(1.0)
     except Exception as e:
         log(type='ERROR-CRITICAL', err='executeThreadsStatus - %s' % e)
     control.RemoveThread(tuid)
コード例 #6
0
    def executeThreads(self, key):
        try:
            title = cleantitle.title_from_key(key)
            log(type='SUCCESS', err='Starting Threads ! : %s' % title)
            while key in self.threadSlots:
                for s in self.threadSlots[key]:
                    active = 0
                    done = 0
                    idle = 0
                    for s1 in self.threadSlots[key]:
                        if s1['status'] == 'active':
                            active += 1
                        if s1['status'] == 'idle':
                            idle += 1
                        if s1['status'] == 'done-marked':
                            log(type='SUCCESS',
                                err='Completed Thread: %s > %s in %ss.' %
                                (title, s1['source'],
                                 round(s1['e_time'] - s1['s_time'], 2)))
                            control.RemoveThread(s1['tuid'])
                            s1['status'] = 'done'
                        if s1['status'] == 'done':
                            done += 1

                    if done == len(self.threadSlots[key]):
                        log(type='SUCCESS',
                            err='Completed Threads ! : %s with %s sources' %
                            (title, len(self.sourcesFilter(key=key))))
                        control.savePermStore()
                        return

                    if s['status'] == 'idle' and active < int(
                            control.setting('control_concurrent_src_threads')):
                        log(type='SUCCESS',
                            err='Starting Thread: %s > %s' %
                            (title, s['source']))
                        s['status'] = 'active'
                        s['s_time'] = time.time()
                        tuid2 = control.id_generator(16)
                        control.AddThread(
                            'executeThreads',
                            'Provider Search Thread: %s > %s' %
                            (title, s['source']), time.time(), '4', False,
                            tuid2, s['thread'])
                        s['tuid'] = tuid2
                        s['thread'].start()

                    time.sleep(1.0)
                time.sleep(1.0)
        except Exception as e:
            log(type='ERROR-CRITICAL', err='Thread Title %s - %s' % (title, e))
        control.savePermStore()
コード例 #7
0
    def purgeSourcesKey(self, key=None, maxcachetimeallowed=0):
        try:
            bool = False
            filtered = []
            purgedItems = []
            curr_time = time.time()
            if key == None:
                return bool
            else:
                # if cache time < 2min; then get the sources from last 2min. otherwise it will always return 0 sources
                if maxcachetimeallowed < 2 * 60:
                    maxcachetimeallowed = 2 * 60
                for i in self.sources:
                    if (i['ts'] + float(maxcachetimeallowed)) >= curr_time:
                        pass
                    else:
                        self.sources.remove(i)
                        bool = True

                if self.checkKeyInThread(key) == True and self.checkProgress(
                        key) == 100:
                    purgedItems.append(key)
                    del self.threads[key]
                    del self.threadSlots[key]
                    bool = True

            if len(purgedItems) > 0 or len(
                    filtered) > 0 or control.debug == True:
                log(type='INFO',
                    err='purgeSourcesKey performed at %s' % time.strftime(
                        '%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
                log(type='INFO',
                    err='purgeSourcesKey purged items %s' % (', '.join(
                        cleantitle.title_from_key(x) for x in purgedItems)))
        except Exception as e:
            log(type='ERROR', err='purgeSourcesKey : %s' % e)
            bool = False

        return bool
コード例 #8
0
    def purgeSources(self, maxcachetimeallowed=0, override=False):
        try:
            filtered = []
            purgedItems = []
            maxcachetimeallowed = float(maxcachetimeallowed)
            curr_time = time.time()
            if override == True:
                pass
            else:
                # if cache time < 5min; then get the sources from last 2min. otherwise it will always return 0 sources
                if maxcachetimeallowed < 5 * 60:
                    maxcachetimeallowed = 5 * 60
                for i in self.sources:
                    if (i['ts'] + float(maxcachetimeallowed)) >= curr_time:
                        filtered.append(i)
                for k in self.threads:
                    if self.checkKeyInThread(k) == True and self.checkProgress(
                            k) == 100:
                        purgedItems.append(k)
                        del self.threads[k]
                        del self.threadSlots[k]

            del self.sources[:]
            for i in filtered:
                self.sources.append(i)

            if len(purgedItems) > 0 or len(
                    filtered) > 0 or control.debug == True:
                log(type='INFO',
                    err='purgeSources performed at %s' % time.strftime(
                        '%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
                log(type='INFO',
                    err='purgeSources purged items %s' % (', '.join(
                        cleantitle.title_from_key(x) for x in purgedItems)))
        except Exception as e:
            log(type='ERROR', err='purgeSources : %s' % e)
コード例 #9
0
    def get_sources(self,
                    url,
                    hosthdDict=None,
                    hostDict=None,
                    locDict=None,
                    proxy_options=None,
                    key=None,
                    testing=False):
        try:
            sources = []
            if control.setting('Provider-%s' % name) == False:
                log('INFO', 'get_sources', 'Provider Disabled by User')
                return sources

            if url == None:
                log('FAIL',
                    'get_sources',
                    'url == None. Could not find a matching title: %s' %
                    cleantitle.title_from_key(key),
                    dolog=not testing)
                return sources

            links_m = []
            TYPES_QUAL = {
                'SD': '480p',
                '3D SD': '480p',
                'HD': '1080p',
                '3D FullHD': '1080p'
            }
            #TYPES_RIP = {'SD':'BRRIP', '3D SD':'3D-BRRIP', 'HD':'3D-BRRIP', '3D FullHD':'3D-BRRIP'}

            for data_j in url:
                try:
                    file = data_j['file']
                    src_file = data_j['src_file']
                    page = data_j['page']
                    label = data_j['label']
                    sub_url = data_j['srt']
                    poster = data_j['poster']
                    qual = '480p'
                    riptype = '3D-BRRIP'

                    data_j['file'] = urlparse.urljoin(self.base_link, file)

                    if label in TYPES_QUAL.keys():
                        qual = TYPES_QUAL[label]
                    data_j['label'] = qual

                    file_data = urllib.urlencode(data_j)

                    links_m = resolvers.createMeta(file_data,
                                                   self.name,
                                                   self.logo,
                                                   qual,
                                                   links_m,
                                                   key,
                                                   riptype,
                                                   testing=testing,
                                                   sub_url=sub_url,
                                                   urlhost=client.geturlhost(
                                                       self.base_link),
                                                   poster=poster)

                    if testing == True and len(links_m) > 0:
                        break
                except:
                    pass

            for l in links_m:
                sources.append(l)

            if len(sources) == 0:
                log(
                    'FAIL', 'get_sources',
                    'Could not find a matching title: %s' %
                    cleantitle.title_from_key(key))
                return sources

            log('SUCCESS',
                'get_sources',
                '%s sources : %s' %
                (cleantitle.title_from_key(key), len(sources)),
                dolog=not testing)
            return sources
        except Exception as e:
            log('ERROR', 'get_sources', '%s' % e, dolog=not testing)
            return sources
コード例 #10
0
    def get_sources(self,
                    url,
                    hosthdDict=None,
                    hostDict=None,
                    locDict=None,
                    proxy_options=None,
                    key=None,
                    testing=False):
        #try:
        try:
            sources = []
            if control.setting('Provider-%s' % name) == False:
                log('INFO', 'get_sources', 'Provider Disabled by User')
                return sources
            if url == None:
                log('FAIL',
                    'get_sources',
                    'url == None. Could not find a matching title: %s' %
                    cleantitle.title_from_key(key),
                    dolog=not testing)
                return sources

            links_m = []
            trailers = []
            headers = self.headers
            headers = {'Referer': self.base_link}
            sub_url = None

            u = url[0]
            ep = url[1]
            #r = client.request(u, headers=headers IPv4=True)
            r = proxies.request(u,
                                headers=self.headers,
                                IPv4=True,
                                proxy_options=proxy_options,
                                use_web_proxy=self.proxyrequired)

            if testing == False:
                try:
                    #regex = r"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+"
                    #matches = re.finditer(regex, r, re.MULTILINE)
                    matches = re.compile(
                        'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
                    ).findall(r)
                    for match in matches:
                        try:
                            #print match
                            if 'youtube.com' in match:
                                match = match.replace('embed/', 'watch?v=')
                                trailers.append(match)
                        except:
                            pass
                except Exception as e:
                    pass

                for trailer in trailers:
                    links_m = resolvers.createMeta(trailer,
                                                   self.name,
                                                   self.logo,
                                                   '720p',
                                                   links_m,
                                                   key,
                                                   vidtype='Trailer',
                                                   testing=testing)

            try:
                if ep == None:
                    srcs = client.parseDOM(r, 'a', ret='player-data')
                else:
                    srcs = client.parseDOM(r,
                                           'a',
                                           ret='player-data',
                                           attrs={'episode-data': str(ep)})

                try:
                    elem = client.parseDOM(r,
                                           'span',
                                           attrs={'class': 'quality'})[0]
                    qual = source_utils.check_sd_url(elem)
                    riptype = source_utils.check_sd_url_rip(elem)
                except Exception as e:
                    qual = '480p'
                    riptype = 'BRRIP'

                try:
                    poster = client.parseDOM(r,
                                             'div',
                                             attrs={'class': 'dm-thumb'})[0]
                    poster = client.parseDOM(poster, 'img', ret='src')[0]
                except:
                    poster = None

                for s in srcs:
                    try:
                        if s.startswith('//'):
                            s = 'https:%s' % s
                        links_m = resolvers.createMeta(s,
                                                       self.name,
                                                       self.logo,
                                                       qual,
                                                       links_m,
                                                       key,
                                                       poster=poster,
                                                       riptype=riptype,
                                                       vidtype='Movie',
                                                       sub_url=sub_url,
                                                       testing=testing)
                        if testing == True and len(links_m) > 0:
                            break
                    except:
                        pass
            except:
                pass

            sources += [l for l in links_m]

            if len(sources) == 0:
                log(
                    'FAIL', 'get_sources',
                    'Could not find a matching title: %s' %
                    cleantitle.title_from_key(key))
                return sources

            log('SUCCESS',
                'get_sources',
                '%s sources : %s' %
                (cleantitle.title_from_key(key), len(sources)),
                dolog=not testing)
            return sources
        except Exception as e:
            log('ERROR', 'get_sources', '%s' % e, dolog=not testing)
            return sources
コード例 #11
0
	def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_options=None, key=None, testing=False):
		try:
			sources = []
			if control.setting('Provider-%s' % name) == False:
				log('INFO','get_sources','Provider Disabled by User')
				return sources
			if url == None: 
				log('FAIL','get_sources','url == None. Could not find a matching title: %s' % cleantitle.title_from_key(key), dolog=not testing)
				return sources
			
			myts = str(((int(time.time())/3600)*3600))
			log('INFO','get_sources-1', 'url: %s' % url, dolog=False)
			token_error = False
			urls = []
			sub_url = None
			page_url = None
			
			if not str(url).startswith('http'):
				try:
					data = urlparse.parse_qs(url)
					data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

					title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

					try:
						year = re.findall('(\d{4})', data['premiered'])[0] if 'tvshowtitle' in data else data['year']
					except:
						try:
							year = data['year']
						except:
							year = None
					try: episode = data['episode']
					except: pass

					query = {'keyword': title}
					search_url = urlparse.urljoin(self.base_link, '/search')
					search_url = search_url + '?' + urllib.urlencode(query)
					result = proxies.request(search_url, headers=self.headers, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, httpsskip=True)
					
					log('INFO','get_sources-2', '%s' % search_url, dolog=False)
					
					rs = client.parseDOM(result, 'div', attrs = {'class': '[^"]*movie-list[^"]*'})[0]
					r = client.parseDOM(rs, 'div', attrs = {'class': 'item'})
					r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', attrs = {'class': 'name'})) for i in r]
					r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and  len(i[1]) > 0]
					r = [(re.sub('http.+?//.+?/','/', i[0]), re.sub('&#\d*;','', i[1])) for i in r]
					
					if 'season' in data:
						r = [(i[0], re.sub(' \(\w*\)', '', i[1])) for i in r]
						
						possible_hits = []
						for i in r:
							if cleantitle.get(title).lower() == cleantitle.get(i[1]).lower():
								possible_hits.append((i[0], [[i[1], u'1']]))
							
						#title += '%01d' % int(data['season'])
						url = [(i[0], re.findall('(.+?) (\d+)$', i[1])) for i in r]

						for i in possible_hits:
							url.append(i)
							
						url = [(i[0], i[1][0][0], i[1][0][1]) for i in url if len(i[1]) > 0]
						
						url = [i for i in url if cleantitle.get(title) in cleantitle.get(i[1])]

						url = [i for i in url if '%01d' % int(data['season']) == '%01d' % int(i[2])]
						
						if len(url) == 0:
							url = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1])]
						if len(url) == 0:
							url = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1]+str(season))]
					else:
						url = [i for i in r if cleantitle.get(title) in cleantitle.get(i[1])]
					
					if len(url) == 0:
						log('FAIL','get_sources','Could not find a matching title: %s' % cleantitle.title_from_key(key))
						return sources
					
					for urli in url:
						url = urli[0]
						url = urlparse.urljoin(self.base_link, url)
						urls.append(url)
					
				except Exception as e:
					raise Exception(e)

			vidtype = 'Movie'
			for url in urls:
				try:
					try: url, episode = re.compile('(.+?)\?episode=(\d*)$').findall(url)[0]
					except: pass
					
					log('INFO','get_sources-3', url, dolog=False)

					referer = url
					page_url = url
					
					result = resultT = proxies.request(url, headers=self.headers, limit='0', proxy_options=proxy_options, use_web_proxy=self.proxyrequired, httpsskip=True)
					
					if 'data-type="series"' not in result:
						raise Exception('Not a TV-Series')
					
					vidtype = 'Show'
					alina = client.parseDOM(result, 'title')[0]

					atr = [i for i in client.parseDOM(result, 'title') if len(re.findall('(\d{4})', i)) > 0][-1]

					if 'season' in data:
						try: season = data['season']
						except: pass
						
						years = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '%s' % str(int(year) + int(season)), '%s' % str(int(year) - int(season))]
						mychk = False
						for y in years:
							if y in atr: 
								mychk = True
						result = result if mychk == True else None
						if mychk == True:
							break
					else:
						result = result if year in atr else None
						
					if result != None:
						break
				except Exception as e:
					log('FAIL','get_sources-3', '%s : %s' % (url,e), dolog=False)
					
			if result == None:
				log('FAIL','get_sources','Could not find a matching title: %s' % cleantitle.title_from_key(key))
				return sources

			try:
				myts = re.findall(r'data-ts="(.*?)"', result)[0]
			except:
				log('INFO','get_sources-3', 'could not parse ts ! will use generated one : %s' % myts, dolog=False)
				
			trailers = []
			links_m = []
			
			if testing == False:
				try:
					matches = re.compile('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+').findall(result)
					for match in matches:
						try:
							if 'youtube.com' in match:
								match = match.replace('embed/','watch?v=')
								trailers.append(match)
						except:
							pass
				except Exception as e:
					pass
					
				for trailer in trailers:
					links_m = resolvers.createMeta(trailer, self.name, self.logo, '720p', links_m, key, vidtype='Trailer', testing=testing)
			
			riptype = None
			try: quality = client.parseDOM(result, 'span', attrs = {'class': 'quality'})[0].lower()
			except: quality = 'hd'
			if quality == 'cam' or quality == 'ts': 
				quality = '480p'
				riptype = 'CAM'
			elif quality == 'hd' or 'hd ' in quality: 
				quality = '720p'
				riptype = 'BRRIP'
			else: 
				quality = '480p'
				riptype = 'BRRIP'

			result_servers = self.get_servers(url, proxy_options=proxy_options)
			result_servers = client.parseDOM(result_servers, 'ul', attrs = {'data-range-id':"0"})
			#print result_servers
			#result_servers = []
			#servers = client.parseDOM(result, 'li', attrs = {'data-type': 'direct'})
			result_servers = zip(client.parseDOM(result_servers, 'a', ret='data-id'), client.parseDOM(result_servers, 'a'))
			#print result_servers
			
			result_servers = [(i[0], re.findall('(\d+)', i[1])) for i in result_servers]
			
			servers = [(i[0], ''.join(i[1][:1])) for i in result_servers]
			
			try: servers = [i for i in servers if '%01d' % int(i[1]) == '%01d' % int(episode)]
			except: pass
			
			for s in servers[:len(servers)]:
				try:
					video_url = None
					#quality = '360p'
					if '1080' in s[1]: 
						quality = '1080p'
						#riptype = 'BRRIP'
					elif '720' in s[1] or 'hd' in s[1].lower(): 
						quality = '720p'
						#riptype = 'BRRIP'
					elif '480' in s[1]: 
						quality = '480p'
						#riptype = 'BRRIP'
					elif 'cam' in s[1].lower() or 'ts' in s[1].lower(): 
						quality = '480p'
						#riptype = 'CAM'
					else:
						quality = '480p'
						#riptype = 'CAM'
				
					if video_url == None:
						headers = {'X-Requested-With': 'XMLHttpRequest'}
						hash_url = urlparse.urljoin(self.base_link, self.hash_link)
						
						query = {'ts': myts, 'id': s[0], 'update': '0', 'server':'36'}
						
						query.update(self.__get_token(query))
						hash_url = hash_url + '?' + urllib.urlencode(query)
						headers['Referer'] = urlparse.urljoin(url, s[0])
						headers['Cookie'] = self.headers['Cookie']
						log('INFO','get_sources-4.b', '%s' % hash_url, dolog=False)
						result = proxies.request(hash_url, headers=headers, limit='0', proxy_options=proxy_options, use_web_proxy=self.proxyrequired, httpsskip=True)
						result = json.loads(result)
						
						if 'error' in result and result['error'] == True:
							token_error = True
							query.update(self.__get_token(query, token_error=token_error))
							hash_url = hash_url + '?' + urllib.urlencode(query)
							result = proxies.request(hash_url, headers=headers, limit='0', proxy_options=proxy_options, use_web_proxy=self.proxyrequired, httpsskip=True)
							result = json.loads(result)
							
							query = {'id': s[0], 'update': '0'}
							query.update(self.__get_token(query, token_error=token_error))
						else:
							token_error = False
							queryx = {'id': s[0], 'update': '0'}
							query.update(self.__get_token(queryx))
						
						url = url + '?' + urllib.urlencode(query)
						#result = client2.http_get(url, headers=headers)
					
						log('INFO','get_sources-5', result, dolog=False)
						
						if result['target'] != "":
							pass
						else:
							grabber = result['grabber']
							grab_data = grabber

							grabber_url = urlparse.urljoin(self.base_link, self.grabber_api)
							
							if '?' in grabber:
								grab_data = grab_data.split('?')
								grabber_url = grab_data[0]
								grab_data = grab_data[1]
								
							grab_server = str(urlparse.parse_qs(grab_data)['server'][0])
							
							b, resp = self.decode_t(result['params']['token'], -18)
							if b == False:
								raise Exception(resp)
							token = resp
							b, resp = self.decode_t(result['params']['options'], -18)
							if b == False:
								raise Exception(resp)
							options = resp
							
							grab_query = {'ts':myts, grabber_url:'','id':result['params']['id'],'server':grab_server,'mobile':'0','token':token,'options':options}
							tk = self.__get_token(grab_query, token_error)

							if tk == None:
								raise Exception('video token algo')
							grab_info = {'token':token,'options':options}
							del query['server']
							query.update(grab_info)
							query.update(tk)
							
							sub_url = result['subtitle']
							if sub_url==None or len(sub_url) == 0:
								sub_url = None
							
							if '?' in grabber:
								grabber += '&' + urllib.urlencode(query)
							else:
								grabber += '?' + urllib.urlencode(query)
						
							if grabber!=None and not grabber.startswith('http'):
								grabber = 'http:'+grabber
								
							log('INFO','get_sources-6', grabber, dolog=False)

							result = proxies.request(grabber, headers=headers, referer=url, limit='0', proxy_options=proxy_options, use_web_proxy=self.proxyrequired, httpsskip=True)

							result = json.loads(result)
						
						if 'data' in result.keys():
							result = [i['file'] for i in result['data'] if 'file' in i]
							
							for i in result:
								video_url = i
								links_m = resolvers.createMeta(i, self.name, self.logo, quality, links_m, key, riptype, vidtype=vidtype, sub_url=sub_url, testing=testing)
						else:
							target = result['target']
							b, resp = self.decode_t(target, -18)
							if b == False:
								raise Exception(resp)
							target = resp
							sub_url = result['subtitle']
							if sub_url==None or len(sub_url) == 0:
								sub_url = None
							
							if target!=None and not target.startswith('http'):
								target = 'http:' + target
								
							video_url = target
							links_m = resolvers.createMeta(target, self.name, self.logo, quality, links_m, key, riptype, vidtype=vidtype, sub_url=sub_url, testing=testing)

				except Exception as e:
					log('FAIL', 'get_sources-7','%s' % e, dolog=False)
					
				try:
					if video_url == None and USE_PHANTOMJS == True and control.setting('use_phantomjs') != control.phantomjs_choices[0]:
						vx_url = '%s/%s' % (page_url,s[0])
						log(type='INFO',method='get_sources-4.a.1', err=u'trying phantomjs method: %s' % vx_url)
						try:
							v_url, bool = phantomjs.decode(vx_url, js='fmovies.js')
							if bool == False:
								ret_error = v_url
								raise Exception(ret_error)
							else:
								video_url = v_url
								ret_error = ''
								log(type='SUCCESS',method='get_sources-4.a.2', err=u'*PhantomJS* method is working: %s' % vx_url)
								links_m = resolvers.createMeta(video_url, self.name, self.logo, quality, links_m, key, riptype, vidtype=vidtype, sub_url=sub_url, testing=testing)
						except:
							raise Exception('phantomjs not working')
					else:
						raise Exception('phantomjs is disabled')
				except Exception as e:
					log(type='FAIL',method='get_sources-4.a.3', err=u'%s' % e)

			sources += [l for l in links_m]
			
			if len(sources) == 0:
				log('FAIL','get_sources','Could not find a matching title: %s' % cleantitle.title_from_key(key))
				return sources
			
			log('SUCCESS', 'get_sources','%s sources : %s' % (cleantitle.title_from_key(key), len(sources)), dolog=not testing)
			return sources
		except Exception as e:
			log('ERROR', 'get_sources', '%s' % e, dolog=not testing)
			return sources
コード例 #12
0
    def get_sources(self,
                    url,
                    hosthdDict=None,
                    hostDict=None,
                    locDict=None,
                    proxy_options=None,
                    key=None,
                    testing=False):
        try:
            sources = []
            if control.setting('Provider-%s' % name) == False:
                log('INFO', 'get_sources', 'Provider Disabled by User')
                return sources
            if url == None:
                log('FAIL',
                    'get_sources',
                    'url == None. Could not find a matching title: %s' %
                    cleantitle.title_from_key(key),
                    dolog=not testing)
                return sources

            url = urlparse.urljoin(self.base_link, url)

            #result = proxies.request(url, 'choose_tabs', proxy_options=proxy_options, use_web_proxy=self.proxyrequired)
            result = proxies.request(url,
                                     proxy_options=proxy_options,
                                     use_web_proxy=self.proxyrequired)

            links_m = []
            trailers = []
            if testing == False:
                try:
                    matches = re.findall(r'\"(http[s]?://www.youtube.*?)\"',
                                         result)
                    for match in matches:
                        try:
                            #print match
                            if 'youtube.com' in match and '"' not in match:
                                match = match.replace('embed/', 'watch?v=')
                                trailers.append(match)
                        except:
                            pass
                except Exception as e:
                    pass

                for trailer in trailers:
                    links_m = resolvers.createMeta(trailer,
                                                   self.name,
                                                   self.logo,
                                                   '720p',
                                                   links_m,
                                                   key,
                                                   vidtype='Trailer',
                                                   testing=testing)

            links = client.parseDOM(result, 'tbody')
            try:
                riptypex = client.parseDOM(result,
                                           'div',
                                           attrs={'class':
                                                  'warning_message'})[0]
            except:
                riptypex = 'BRRIP'

            for i in links:
                try:
                    url = client.parseDOM(i, 'a', ret='href')[0]
                    try:
                        url = urlparse.parse_qs(
                            urlparse.urlparse(url).query)['u'][0]
                    except:
                        pass
                    try:
                        url = urlparse.parse_qs(
                            urlparse.urlparse(url).query)['q'][0]
                    except:
                        pass
                    url = urlparse.parse_qs(
                        urlparse.urlparse(url).query)['url'][0]

                    url = base64.b64decode(url)
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    if 'http' not in url:
                        raise Exception()
                    for u in AVOID_DOMAINS:
                        if u in url:
                            raise Exception()

                    quality = client.parseDOM(i, 'span', ret='class')[0]

                    if quality == 'quality_cam' or quality == 'quality_ts':  # quality_ts
                        quality = '480p'
                        riptype = 'CAM'
                    elif quality == 'quality_dvd':
                        quality = '720p'
                        riptype = 'BRRIP'
                    else:
                        riptype = riptypex
                        quality = '480p'

                    links_m = resolvers.createMeta(url,
                                                   self.name,
                                                   self.logo,
                                                   quality,
                                                   links_m,
                                                   key,
                                                   riptype=riptype,
                                                   testing=testing)
                except:
                    pass

            for l in links_m:
                sources.append(l)

            if len(sources) == 0:
                log(
                    'FAIL', 'get_sources',
                    'Could not find a matching title: %s' %
                    cleantitle.title_from_key(key))
                return sources

            log('SUCCESS',
                'get_sources',
                '%s sources : %s' %
                (cleantitle.title_from_key(key), len(sources)),
                dolog=not testing)
            return sources
        except Exception as e:
            log('ERROR', 'get_sources', '%s' % e, dolog=not testing)
            return sources
コード例 #13
0
    def get_sources(self,
                    url,
                    hosthdDict=None,
                    hostDict=None,
                    locDict=None,
                    proxy_options=None,
                    key=None,
                    testing=False):
        try:
            sources = []
            if control.setting('Provider-%s' % name) == False:
                log('INFO', 'get_sources', 'Provider Disabled by User')
                log('INFO', 'get_sources', 'Completed')
                return sources
            if url == None:
                log('FAIL',
                    'get_sources',
                    'url == None. Could not find a matching title: %s' %
                    cleantitle.title_from_key(key),
                    dolog=not testing)
                log('INFO', 'get_sources', 'Completed')
                return sources

            # get IMDb item page
            result = proxies.request(url,
                                     proxy_options=proxy_options,
                                     use_web_proxy=self.proxyrequired,
                                     IPv4=True)
            r = client.parseDOM(result,
                                'div',
                                attrs={'class': 'aux-content-widget-3'})[1]

            # get types of videos available
            types = {
                'content_type-trailer': 'Trailer',
                'content_type-clip': 'Clip',
                'content_type-interview': 'Interviews',
                'content_type-other': 'Misc.',
                'content_type-featurette': 'Featurette'
            }
            re_map_types = {
                'Featurette': 'Featurette',
                'Clip': 'Trailer',
                'Trailer': 'Trailer',
                'Interviews': 'Interviews',
                'Misc.': 'Misc.'
            }

            r1 = client.parseDOM(r, 'a', ret='href')

            types_map = {}

            for r1_url in r1:
                type = 'Trailer'
                for t in types.keys():
                    if t in r1_url:
                        type = types[t]
                        break

                if type not in types_map.keys():
                    types_map[type] = []

                result_r1 = proxies.request(urlparse.urljoin(
                    self.base_link, r1_url),
                                            proxy_options=proxy_options,
                                            use_web_proxy=self.proxyrequired,
                                            IPv4=True)

                r2 = client.parseDOM(result_r1,
                                     'div',
                                     attrs={'class': 'search-results'})[0]
                r2a = client.parseDOM(r2, 'a', ret='href')

                for r2a1 in r2a:
                    if 'ref_' in r2a1:
                        types_map[type].append(
                            urlparse.urljoin(self.base_link, r2a1))

            links = []
            quality = u'720p'
            selection_map = {}

            for vidtype in types_map.keys():
                page_links = types_map[vidtype]
                for page_link in page_links:
                    try:
                        res = proxies.request(page_link,
                                              proxy_options=proxy_options,
                                              use_web_proxy=self.proxyrequired,
                                              IPv4=True)
                        vidurls = re.findall(r'encodings\":(.*?\])', res)[0]
                        poster = client.parseDOM(res,
                                                 'meta',
                                                 attrs={'itemprop': 'image'},
                                                 ret='content')[0]
                        vidurls_json = json.loads(vidurls)
                        txt = re.findall(r'<title>(.*?)</title>', res)[0]
                        txt = txt.replace('&quot;', '')

                        for viddata in vidurls_json:
                            try:
                                vidurl = viddata['videoUrl']
                                if '.mp4' in vidurl:
                                    if txt not in selection_map.keys():
                                        selection_map[txt] = {}
                                    quality = viddata['definition']
                                    vidtype = re_map_types[vidtype]
                                    try:
                                        l = resolvers.createMeta(
                                            vidurl,
                                            self.name,
                                            self.logo,
                                            quality, [],
                                            key,
                                            vidtype=vidtype,
                                            testing=testing,
                                            txt=txt,
                                            poster=poster)
                                        l = l[0]
                                        if l['quality'] in selection_map[
                                                txt].keys():
                                            selection_map[txt][
                                                l['quality']].append({
                                                    'fs':
                                                    int(l['fs']),
                                                    'src':
                                                    l
                                                })
                                        else:
                                            selection_map[txt][
                                                l['quality']] = [{
                                                    'fs':
                                                    int(l['fs']),
                                                    'src':
                                                    l
                                                }]
                                        if testing == True:
                                            links.append(l)
                                            break
                                    except Exception as e:
                                        log('ERROR',
                                            'get_sources-0',
                                            '%s' % e,
                                            dolog=not testing)
                            except Exception as e:
                                log('ERROR',
                                    'get_sources-1',
                                    '%s' % e,
                                    dolog=not testing)
                    except Exception as e:
                        log('ERROR',
                            'get_sources-2',
                            '%s' % e,
                            dolog=not testing)
                    if testing == True and len(links) > 0:
                        break
                if testing == True and len(links) > 0:
                    break

            #print selection_map
            for sel_titles in selection_map.keys():
                for sel in selection_map[sel_titles].keys():
                    qls = selection_map[sel_titles][sel]
                    files = sorted(qls, key=lambda k: k['fs'], reverse=True)
                    file = files[0]
                    links.append(file['src'])

            for link in links:
                if link != None and 'key' in link.keys():
                    sources.append(link)

            if len(sources) == 0:
                log(
                    'FAIL', 'get_sources',
                    'Could not find a matching title: %s' %
                    cleantitle.title_from_key(key))
            else:
                log(
                    'SUCCESS', 'get_sources', '%s sources : %s' %
                    (cleantitle.title_from_key(key), len(sources)))

            log('INFO', 'get_sources', 'Completed')

            return sources
        except Exception as e:
            log('ERROR', 'get_sources', '%s' % e)
            log('INFO', 'get_sources', 'Completed')
            return sources
コード例 #14
0
	def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_options=None, key=None, testing=False):
		try:
			sources = []
			if control.setting('Provider-%s' % name) == False:
				log('INFO','get_sources','Provider Disabled by User')
				return sources
			if url == None: 
				log('FAIL','get_sources','url == None. Could not find a matching title: %s' % cleantitle.title_from_key(key), dolog=not testing)
				return sources

			url = urlparse.urljoin(self.base_link, url)
			
			#r = client.request(url)
			req = proxies.request(url, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, IPv4=True)

			r = client.parseDOM(req, 'iframe', ret='src')
			try:
				r2 = re.findall('data-video=\"(.*?)\"', req)
				for r2_i in r2:
					r.append(r2_i)
			except:
				pass
				
			links = []

			for u in r:
				try:
					if 'http' not in u:
						u = 'http:' + u

					if u.startswith('http') == True:
						if 'vidstreaming' in u:
							#url = client.request(u)
							url = proxies.request(u, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, IPv4=True)
							
							url = client.parseDOM(url, 'source', ret='src')
						else:
							url = [u]

						for i in url:
							#try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False})
							#except: pass
							
							try:
								qualityt = client.googletag(i)[0]['quality']
							except:
								qualityt = u'720p'
							try:
								links = resolvers.createMeta(i, self.name, self.logo, qualityt, links, key, vidtype='Show', testing=testing)
							except:
								pass
				except:
					pass
					
			for i in links: sources.append(i)
			
			if len(sources) == 0:
				log('FAIL','get_sources','Could not find a matching title: %s' % cleantitle.title_from_key(key))
				return sources
			
			log('SUCCESS', 'get_sources','%s sources : %s' % (cleantitle.title_from_key(key), len(sources)), dolog=not testing)
			return sources
		except Exception as e:
			log('ERROR', 'get_sources', '%s' % e, dolog=not testing)
			return sources
コード例 #15
0
    def get_sources(self,
                    url,
                    hosthdDict=None,
                    hostDict=None,
                    locDict=None,
                    proxy_options=None,
                    key=None,
                    testing=False):
        try:
            sources = []
            if control.setting('Provider-%s' % name) == False:
                log('INFO', 'get_sources', 'Provider Disabled by User')
                log('INFO', 'get_sources', 'Completed')
                return sources
            if url == None:
                log('FAIL',
                    'get_sources',
                    'url == None. Could not find a matching title: %s' %
                    cleantitle.title_from_key(key),
                    dolog=not testing)
                log('INFO', 'get_sources', 'Completed')
                return sources

            REMAP_TYPE = {
                'trailer': 'Trailer',
                'feature_trailer': 'Trailer',
                'theatrical_trailer': 'Trailer',
                'behind_the_scenes': 'Behind the scenes',
                'deleted_scene': 'Deleted Scenes',
                'featurette': 'Featurette',
                'featured_box': 'Featurette',
                'music-video': 'Music Video',
                'clip': 'Misc.'
            }

            year = None
            episode = None
            season = None

            log('INFO', 'get_sources-1', 'data-items: %s' % url, dolog=False)
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = cleantitle.simpletitle(title)
            try:
                year = re.findall(
                    '(\d{4})', data['premiered']
                )[0] if 'tvshowtitle' in data else data['year']
            except:
                try:
                    year = data['year']
                except:
                    year = None

            title_s = title.split(' ')
            queries = []
            for ts in range(len(title_s)):
                titles = ('+'.join(
                    str(x) for x in title_s[:len(title_s) - ts]))
                queries.append('%s+%s' % (titles, year))
                queries.append(titles)
            rs = []

            for q in queries:
                page_count = 1
                search_url = self.base_link + '/movie/results/' + '?lang=hindi&page=' + str(
                    page_count) + '&query=%s' % q
                log('INFO', 'get_sources-2', 'Searching: %s' % search_url)
                r, res = request_einthusan(search_url)

                try:
                    movies = client.parseDOM(res,
                                             'section',
                                             attrs={'id': 'UIMovieSummary'})[0]
                    movies = client.parseDOM(movies, 'li')

                    for block in movies:
                        try:
                            blocka = client.parseDOM(block,
                                                     'div',
                                                     attrs={'class':
                                                            'block1'})[0]
                            loc = self.base_link + client.parseDOM(
                                blocka, 'a', ret='href')[0]
                            poster = "http:" + client.parseDOM(
                                blocka, 'img', ret='src')[0]
                            titlex = client.parseDOM(block, 'h3')[0]
                            yearx = client.parseDOM(block,
                                                    'div',
                                                    attrs={'class': 'info'})[0]
                            yearx = client.parseDOM(yearx, 'p')[0]
                            if str(year) in str(yearx):
                                rs.append([titlex, yearx, loc, poster])
                                log('INFO', 'get_sources-3',
                                    'match-page-url: %s | %s' % (loc, titlex))
                                break
                        except:
                            pass
                    if len(rs) > 0:
                        break
                except:
                    pass

            if len(rs) > 0:
                links_m = []
                vidtype = 'Movie'
                riptype = 'BRRIP'
                quality = '720p'

                for r in rs:
                    video_urls = []
                    trailers = []
                    music_vids = []
                    poster = r[3]
                    page_url = r[2]
                    eindata1, htm = GetEinthusanData(page_url)
                    eindata1 = json.loads(json.dumps(eindata1))

                    log('INFO', 'get_sources-4-A',
                        'GetEinthusanData: %s' % eindata1)

                    video_urls.append(eindata1['MP4Link'])
                    video_urls.append(eindata1['HLSLink'])

                    if testing == False:
                        try:
                            matches = re.compile(
                                'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
                            ).findall(htm)
                            matches = list(set(matches))
                            for match in matches:
                                try:
                                    if 'youtube.com' in match:
                                        match = match.replace(
                                            'embed/', 'watch?v=')
                                        trailers.append(match)
                                        log('INFO', 'get_sources-4-B',
                                            'trailers: %s' % match)
                                except:
                                    pass
                        except Exception as e:
                            log('FAIL', 'get_sources-4', '%s' % e)

                    if testing == False:
                        try:
                            musicblock = client.parseDOM(
                                htm,
                                'section',
                                attrs={'id': 'UICompactMovieClipList'})[0]
                            musicblock = client.parseDOM(musicblock, 'li')
                            music_vids = []
                            locx = None
                            for block in musicblock:
                                try:
                                    music_vids_s = []
                                    locx = self.base_link + client.parseDOM(
                                        block,
                                        'a',
                                        attrs={'class': 'title'},
                                        ret='href')[0]
                                    thumbx = "http:" + client.parseDOM(
                                        block, 'img', ret='src')[0]
                                    titlex = client.parseDOM(
                                        block, 'a', attrs={'class':
                                                           'title'})[0]
                                    titlex = client.parseDOM(titlex, 'h5')[0]
                                    eindata1, htm1 = GetEinthusanData(locx)
                                    eindata1 = json.loads(json.dumps(eindata1))
                                    log('INFO', 'get_sources-4-C',
                                        'GetEinthusanData: %s' % eindata1)
                                    type = eindata1['type']
                                    if type in REMAP_TYPE.keys():
                                        type = REMAP_TYPE[type]
                                    else:
                                        type = REMAP_TYPE['clip']
                                    music_vids_s.append(
                                        [eindata1['MP4Link'], type])
                                    music_vids_s.append(
                                        [eindata1['HLSLink'], type])
                                    music_vids.append(
                                        [titlex, thumbx, music_vids_s, locx])
                                except Exception as e:
                                    log('FAIL', 'get_sources-5A',
                                        '%s : %s' % (e, locx))
                        except Exception as e:
                            log('FAIL', 'get_sources-5B', '%s' % e)

                    for vid in trailers:
                        try:
                            l = resolvers.createMeta(vid,
                                                     self.name,
                                                     self.logo,
                                                     '720p', [],
                                                     key,
                                                     poster=poster,
                                                     vidtype='Trailer',
                                                     testing=testing,
                                                     page_url=page_url)
                            for ll in l:
                                if ll != None and 'key' in ll.keys():
                                    links_m.append(ll)
                        except:
                            log('FAIL', 'get_sources-6',
                                'Could not add: %s' % vid)

                    for vid in music_vids:
                        try:
                            for v in vid[2]:
                                l = resolvers.createMeta(v[0],
                                                         self.name,
                                                         self.logo,
                                                         '720p', [],
                                                         key,
                                                         poster=vid[1],
                                                         vidtype=v[1],
                                                         testing=testing,
                                                         txt=vid[0],
                                                         page_url=vid[3])
                                for ll in l:
                                    if ll != None and 'key' in ll.keys():
                                        links_m.append(ll)
                        except:
                            log('FAIL', 'get_sources-7',
                                'Could not add: %s' % v[0])

                    for vid in video_urls:
                        try:
                            l = resolvers.createMeta(vid,
                                                     self.name,
                                                     self.logo,
                                                     quality, [],
                                                     key,
                                                     poster=poster,
                                                     riptype=riptype,
                                                     vidtype=vidtype,
                                                     testing=testing,
                                                     page_url=page_url)
                            for ll in l:
                                if ll != None and 'key' in ll.keys():
                                    links_m.append(ll)
                        except:
                            log('FAIL', 'get_sources-8',
                                'Could not add: %s' % vid)

                for l in links_m:
                    if l != None and 'key' in l.keys():
                        sources.append(l)

            if len(sources) == 0:
                log(
                    'FAIL', 'get_sources',
                    'Could not find a matching title: %s' %
                    cleantitle.title_from_key(key))
            else:
                log(
                    'SUCCESS', 'get_sources', '%s sources : %s' %
                    (cleantitle.title_from_key(key), len(sources)))

            log('INFO', 'get_sources', 'Completed')

            return sources
        except Exception as e:
            log('ERROR', 'get_sources', '%s' % e)
            log('INFO', 'get_sources', 'Completed')
            return sources
コード例 #16
0
    def get_sources(self,
                    url,
                    hosthdDict=None,
                    hostDict=None,
                    locDict=None,
                    proxy_options=None,
                    key=None,
                    testing=False):
        try:
            sources = []
            if control.setting('Provider-%s' % name) == False:
                log('INFO', 'get_sources', 'Provider Disabled by User')
                log('INFO', 'get_sources', 'Completed')
                return sources
            if url == None:
                log('FAIL',
                    'get_sources',
                    'url == None. Could not find a matching title: %s' %
                    cleantitle.title_from_key(key),
                    dolog=not testing)
                log('INFO', 'get_sources', 'Completed')
                return sources

            log('INFO', 'get_sources-1A', url, dolog=False)
            #result = proxies.request(url, 'choose_tabs', proxy_options=proxy_options, use_web_proxy=self.proxyrequired)

            result = proxies.request(url,
                                     proxy_options=proxy_options,
                                     use_web_proxy=self.proxyrequired)
            result = cleantitle.asciiOnly(result)

            try:
                poster1 = client.parseDOM(result,
                                          'div',
                                          attrs={'class': 'movie_thumb'})[0]
                poster = client.parseDOM(poster1, 'img', ret='src')[0]
                if 'www' not in poster:
                    poster = 'http:%s' % poster
            except:
                poster = None

            loc = url.replace(self.base_link + '/', '')
            url = testjs(result, self.base_link, loc)
            vidtype = 'Movie'
            if 'season' in url:
                vidtype = 'Show'
                url = url.replace('=tv-',
                                  '=watch-').replace('/season', '&season')
                url = url.replace('season-',
                                  'season=').replace('-episode-', '&episode=')
            log('INFO', 'get_sources-1B', url, dolog=False)

            result = proxies.request(url,
                                     proxy_options=proxy_options,
                                     use_web_proxy=self.proxyrequired)
            result = cleantitle.asciiOnly(result)

            links_m = []
            trailers = []
            if testing == False:
                try:
                    matches = re.findall(r'\"(//www.youtube.*?)\"', result)
                    for match in matches:
                        try:
                            #print match
                            if 'youtube.com' in match and '"' not in match:
                                match = match.replace('embed/', 'watch?v=')
                                if 'http' not in match:
                                    match = 'http:%s' % match
                                trailers.append(match)
                        except:
                            pass
                except Exception as e:
                    pass

                for trailer in trailers:
                    try:
                        l = resolvers.createMeta(trailer,
                                                 self.name,
                                                 self.logo,
                                                 '720p', [],
                                                 key,
                                                 vidtype='Trailer',
                                                 testing=testing)
                        for ll in l:
                            if ll != None and 'key' in ll.keys():
                                links_m.append(ll)
                    except:
                        pass

            links = client.parseDOM(result, 'tbody')

            try:
                riptypex = client.parseDOM(result,
                                           'div',
                                           attrs={'class':
                                                  'warning_message'})[0]
            except:
                riptypex = 'BRRIP'

            c = 0
            for i in links:
                try:
                    url = client.parseDOM(i, 'a', ret='href')[0]
                    url = urlparse.urljoin(self.base_link, url)
                    #print url
                    r = client.request(url)
                    r = cleantitle.asciiOnly(r)
                    links = client.parseDOM(r, 'script')
                    p = False
                    urls_p = []
                    for l in links:
                        if 'eval' in l:
                            unpacked_code = jsunpack.unpack(l)
                            unpacked_code = jsunpack.unpack(unpacked_code)
                            #print unpacked_code
                            host = re.findall(r"var host=\\'(.*?)\\'",
                                              unpacked_code)
                            hosted = re.findall(r"var hosted=\\'(.*?)\\'",
                                                unpacked_code)[0]
                            loc = re.findall(r"var loc=\\'(.*?)\\'",
                                             unpacked_code)[0]
                            if loc != None and len(loc) > 4:
                                for h in host:
                                    if hosted in h:
                                        url = h + loc
                                        #print url
                                        urls_p.append(url)
                                        #url = re.findall(r"'(http.*)'",unpacked_code)[0]
                                        p = True
                                        break
                    if p == False:
                        raise
                    c = c + 1

                    for url in urls_p:
                        log('INFO',
                            'get_sources-2A-%s: %s' % (c, url),
                            dolog=False)

                        if 'http' not in url:
                            raise Exception()
                        for u in AVOID_DOMAINS:
                            if u in url:
                                raise Exception()

                        quality = client.parseDOM(i, 'span', ret='class')[0]

                        if quality == 'quality_cam' or quality == 'quality_ts':  # quality_ts
                            quality = '480p'
                            riptype = 'CAM'
                        elif quality == 'quality_dvd':
                            quality = '720p'
                            riptype = 'BRRIP'
                        else:
                            riptype = riptypex
                            quality = '480p'

                        try:
                            l = resolvers.createMeta(url,
                                                     self.name,
                                                     self.logo,
                                                     quality, [],
                                                     key,
                                                     vidtype=vidtype,
                                                     poster=poster,
                                                     riptype=riptype,
                                                     testing=testing)
                            for ll in l:
                                if ll != None and 'key' in ll.keys():
                                    links_m.append(ll)
                        except:
                            pass
                except:
                    pass

            for l in links_m:
                if l != None and 'key' in l.keys():
                    sources.append(l)

            if len(sources) == 0:
                log(
                    'FAIL', 'get_sources',
                    'Could not find a matching title: %s' %
                    cleantitle.title_from_key(key))
            else:
                log(
                    'SUCCESS', 'get_sources', '%s sources : %s' %
                    (cleantitle.title_from_key(key), len(sources)))

            log('INFO', 'get_sources', 'Completed')

            return sources
        except Exception as e:
            log('ERROR', 'get_sources', '%s' % e)
            log('INFO', 'get_sources', 'Completed')
            return sources
コード例 #17
0
def createMeta(url,
               provider,
               logo,
               quality,
               links,
               key,
               riptype=None,
               vidtype='Movie',
               lang='en',
               sub_url=None,
               txt='',
               file_ext='.mp4',
               testing=False,
               urlhost=None,
               poster=None,
               headers=None,
               page_url=None):

    if url == None or url == '' or url == 'http:' or url == 'https:' or 'http' not in url:
        log(type='ERROR',
            err="createMeta > Title: %s Provider:%s  url:%s" %
            (cleantitle.title_from_key(key), provider, url))
        return links

    url = url.strip()

    for item in links:
        if url == item['orig_url']:
            log("createMeta > %s has already been processed" % url)
            return links

    for item in control.getExtSource():
        if url == item['orig_url']:
            log("createMeta > %s has already been processed" % url)
            return links

    quality = fixquality(quality)
    links_m = []
    urldata = client.b64encode(json.dumps('', encoding='utf-8'))
    params = client.b64encode(json.dumps('', encoding='utf-8'))

    try:
        if urlhost == None:
            try:
                urlhost = re.findall(
                    '([\w]+[.][\w]+)$',
                    urlparse.urlparse(url.strip().lower()).netloc)[0]
            except:
                urlhost = re.findall(
                    '([\w]+[.][\w]+).*$',
                    urlparse.urlparse(url.strip().lower()).netloc)[0]
                urlhost = urlhost.split('.')[1]

        if urlhost != None:
            if riptype == None:
                riptype_def = 'BRRIP'
            else:
                riptype_def = riptype
            for host in sourceHostsCall:
                log("createMeta > Searching %s in host (%s) for provider (%s)"
                    % (urlhost, host['name'], provider),
                    logToControl=False)

                if urlhost in host['host']:
                    log("createMeta > Found %s in host (%s)" %
                        (urlhost, host['name']))
                    return host['call'].createMeta(url,
                                                   provider,
                                                   logo,
                                                   quality,
                                                   links,
                                                   key,
                                                   riptype_def,
                                                   vidtype=vidtype,
                                                   lang=lang,
                                                   sub_url=sub_url,
                                                   txt=txt,
                                                   file_ext=file_ext,
                                                   testing=testing,
                                                   poster=poster,
                                                   headers=headers,
                                                   page_url=page_url)

        log("createMeta > urlhost '%s' not found in host/resolver plugins - creating generic services > provider:%s"
            % (urlhost, provider))
        log("createMeta > url:%s" % (url))

        quality = file_quality(url, quality)

        if riptype == None:
            type = rip_type(url, quality)
        else:
            type = riptype

        links_m.append({
            'source': urlhost,
            'maininfo': '',
            'titleinfo': '',
            'quality': quality,
            'vidtype': vidtype,
            'rip': type,
            'provider': provider,
            'orig_url': url,
            'url': url,
            'durl': url,
            'urldata': urldata,
            'params': params,
            'logo': logo,
            'online': 'Unknown',
            'allowsDownload': False,
            'resumeDownload': False,
            'allowsStreaming': True,
            'key': key,
            'enabled': True,
            'fs': int(0),
            'file_ext': file_ext,
            'ts': time.time(),
            'lang': lang,
            'sub_url': sub_url,
            'poster': poster,
            'subdomain': urlhost,
            'page_url': page_url,
            'misc': {
                'player': 'eplayer',
                'gp': False
            },
            'seq': 0
        })
    except Exception as e:
        log(type='ERROR', err="createMeta : %s url: %s" % (e.args, url))

    links += [l for l in links_m]
    return links
コード例 #18
0
	def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_options=None, key=None, testing=False):
		try:
			sources = []
			if control.setting('Provider-%s' % name) == False:
				log('INFO','get_sources','Provider Disabled by User')
				log('INFO', 'get_sources', 'Completed')
				return sources
			if url == None: 
				log('FAIL','get_sources','url == None. Could not find a matching title: %s' % cleantitle.title_from_key(key), dolog=not testing)
				log('INFO', 'get_sources', 'Completed')
				return sources

			UA = client.agent()
				
			# get TA JSON data from tadata api
			result = proxies.request(url, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, IPv4=True)
			resultx = json.loads(str(result))
			ta_url = resultx['url']
			poster = resultx['image'] if 'image' in resultx else None
			
			#print ta_url
			result = proxies.request(ta_url, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, IPv4=True)
			
			# get types of videos available
			types = {'trailer':'Trailer', 'feature_trailer':'Trailer', 'theatrical_trailer':'Trailer', 'behind_the_scenes':'Behind the scenes', 'deleted_scene':'Deleted Scenes', 'featurette':'Featurette', 'featured_box':'Featurette', 'music_video':'Music Video', 'misc_scene':'Misc.'}
			quality_maps = {'4k':'4K','2k':'2K','1080p':'1080p', 'HD':'720p', 'M':'480p', 'S':'360p'}
			
			extras = []
			
			items = client.parseDOM(result, 'div', attrs = {'id':'featured_c'})[0]
			m_title = client.parseDOM(items, 'div', attrs = {'class':'movie_info'})
			#print m_title
			
			fail_bool = False
			for video in m_title:
				try:
					time.sleep(0.1)
					video = video.replace('rttttttttttt','')
					video = video.replace('rtttttttttt','')
					video = video.replace('\r','')
					video = video.replace('\t','')
					video = video.replace('\n','')

					title = client.parseDOM(video, 'a', attrs = {'class':'m_title'})[0]
					
					ta_tage_url = client.parseDOM(video, 'a', ret = 'href')[0]
					if 'http' not in ta_tage_url:
						ta_tage_url = urlparse.urljoin(self.base_link, ta_tage_url)
					
					try:
						vid_date = client.parseDOM(video, 'span', attrs = {'class':'m_date'})[0]
						vid_date = vid_date.replace(',','')
					except:
						vid_date = ''
					
					# Trailers
					if title.lower() == 'trailer':
						extra_type = 'trailer'
					elif title.lower() == 'feature trailer':
						extra_type = 'feature_trailer'
					elif title.lower() == 'theatrical trailer':
						extra_type = 'theatrical_trailer'

					# Behind the scenes
					elif 'behind the scenes' in title.lower():
						extra_type = 'behind_the_scenes'

					# Featurette
					elif 'featurette' in title.lower():
						extra_type = 'featurette'
						
					# Music Video
					elif 'music video' in title.lower():
						extra_type = 'music_video'

					# Interview
					elif 'interview' in title.lower():
						extra_type = 'interview'

						if title.lower().startswith('interview') or title.lower().startswith('generic interview'):
							title = title.split('nterview - ')[-1].split('nterview- ')[-1]

					# Deleted scene
					elif 'deleted scene' in title.lower():
						extra_type = 'deleted_scene'
						
					# Trailers
					elif 'trailer' in title.lower():
						extra_type = 'trailer'
						
					else:
						extra_type = 'misc_scene'

					# process ta_tage_url
					#print ta_tage_url
					result = proxies.request(ta_tage_url, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, IPv4=True)
					
					data = None
					
					js = re.findall(r'eval\(function\(w,i,s,e\).*;', result)
					
					if len(js) > 0:
						data = js[0]
					else:
						try:
							jsd = re.findall(r'src="/util/client.js?c=(.*?)"><', result)[0].strip()
						except:
							try:
								jsd = re.findall(r'</style>rttr<!-- (.*?) -->rrttrtt<div id=\"embed_box\">', result)[0].strip()
							except:
								jsd = re.findall(r'</style>.*<!-- (.*?) -->.*<div id=\"embed_box\">', result, flags=re.DOTALL)[0].strip()
								
						jsd_url = tau % (urllib.quote_plus(jsd), client.b64encode(str(int(time.time()))), client.b64encode(ta_tage_url), client.b64encode(UA), control.setting('ver'), client.b64encode(control.setting('ca')))
						
						data = proxies.request(jsd_url)
						if data == None:
							log('ERROR', 'get_sources-1', '%s' % jsd_url, dolog=True)
					
					if data != None:
						if str(data) == '423':
							fail_bool = True
							raise Exception("Helper site is currently unavailable !")
						try:
							data = unwise2.unwise_process(data)
						except:
							raise Exception("unwise2 could not process data")
					else:
						raise Exception("URL Post Data Unavailable")
						
					files = re.findall(r'source src="([^"]+)"', data)
					quals = re.findall(r'res=\"(.*?)\"', data)
					processed = []
					
					for i in range(0, len(files)):
						v_file = files[i]
						if quals[i] in quality_maps.keys():
							quality = quality_maps[quals[i]]
						else:
							quality = '720p'
						#print extra_type
						if quality not in processed:
							#print v_file
							processed.append(quality)
							extras.append(
								{'etype': extra_type,
								'date': vid_date,
								'type': types[extra_type],
								'url' : v_file,
								'quality': quality,
								'title': title,
								'thumb': poster}
							)
					
					if testing == True and len(extras) > 0:
						break
				except Exception as e:
					log('ERROR', 'get_sources-2', '%s' % e, dolog=True)
					if fail_bool == True:
						raise Exception("%s" % e)

			links = []
			
			#print extras
			for extra in extras:
				links = resolvers.createMeta(extra['url'], self.name, self.logo, extra['quality'], links, key, vidtype=extra['type'], testing=testing, txt=extra['title'], poster=extra['thumb'])
				if testing == True and len(links) > 0:
					break

			for i in links:
				sources.append(i)
				
			if len(sources) == 0:
				log('FAIL','get_sources','Could not find a matching title: %s' % cleantitle.title_from_key(key))
			else:
				log('SUCCESS', 'get_sources','%s sources : %s' % (cleantitle.title_from_key(key), len(sources)))
				
			log('INFO', 'get_sources', 'Completed')
			
			return sources
		except Exception as e:
			log('ERROR', 'get_sources', '%s' % e)
			log('INFO', 'get_sources', 'Completed')
			return sources
コード例 #19
0
    def get_sources(self,
                    url,
                    hosthdDict=None,
                    hostDict=None,
                    locDict=None,
                    proxy_options=None,
                    key=None,
                    testing=False):
        try:
            sources = []
            if control.setting('Provider-%s' % name) == False:
                log('INFO', 'get_sources', 'Provider Disabled by User')
                return sources
            if url == None:
                log('FAIL',
                    'get_sources',
                    'url == None. Could not find a matching title: %s' %
                    cleantitle.title_from_key(key),
                    dolog=not testing)
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            year = data['year']
            aliases = eval(data['aliases'])
            #cookie = '; approve_search=yes'
            query = self.search_link % (urllib.quote_plus(title))
            query = urlparse.urljoin(self.base_link, query)

            log(type='INFO',
                method='get_sources',
                err='Searching - %s' % query,
                dolog=False,
                logToControl=False,
                doPrint=True)
            result = client.request(query)  #, cookie=cookie)

            links_m = []

            try:
                if 'episode' in data:
                    r = client.parseDOM(result,
                                        'div',
                                        attrs={'class': 'ml-item'})
                    r = zip(client.parseDOM(r, 'a', ret='href'),
                            client.parseDOM(r, 'a', ret='title'))
                    r = [(i[0], i[1],
                          re.findall('(.*?)\s+-\s+Season\s+(\d+)', i[1]))
                         for i in r]
                    r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0]
                    url = [
                        i[0] for i in r if self.matchAlias(i[2][0], aliases)
                        and i[2][1] == data['season']
                    ][0]

                    url = '%swatch' % url
                    result = client.request(url)

                    url = re.findall(
                        'a href=\"(.+?)\" class=\"btn-eps first-ep \">Episode %02d'
                        % int(data['episode']), result)[0]

                else:
                    r = client.parseDOM(result,
                                        'div',
                                        attrs={'class': 'ml-item'})
                    r = zip(client.parseDOM(r, 'a', ret='href'),
                            client.parseDOM(r, 'a', ret='title'),
                            client.parseDOM(r, 'img', ret='data-original'))

                    results = [(i[0], i[1], re.findall(r'images/(.*?)-', i[2]))
                               for i in r]

                    try:
                        r = [(i[0], i[1], i[2][0]) for i in results
                             if len(i[2]) > 0]
                        url = [
                            i[0] for i in r
                            if self.matchAlias(i[1], aliases) and (
                                year == i[2])
                        ][0]
                    except Exception as e:
                        print e
                        url = None
                        pass

                    if (url == None):
                        url = [
                            i[0] for i in results
                            if self.matchAlias(i[1], aliases)
                        ][0]
                    url = urlparse.urljoin(url, 'watch')

                #url = client.request(url, output='geturl')
                if url == None: raise Exception()
            except Exception as e:
                raise Exception('Step 1 Failed: %s > %s' % (url, e))

            url = url if 'http' in url else urlparse.urljoin(
                self.base_link, url)
            result = client.request(url)
            try:
                poster = client.parseDOM(result,
                                         'img',
                                         attrs={'itemprop': 'image'},
                                         ret='src')[0]
            except:
                poster = None
            src = re.findall('src\s*=\s*"(.*streamdor.co\/video\/\d+)"',
                             result)[0]
            if src.startswith('//'):
                src = 'http:' + src
            episodeId = re.findall('.*streamdor.co/video/(\d+)', src)[0]
            p = client.request(src, referer=url)

            riptype = 'BRRIP'

            try:
                p = re.findall(r'JuicyCodes.Run\(([^\)]+)', p,
                               re.IGNORECASE)[0]
                p = re.sub(r'\"\s*\+\s*\"', '', p)
                p = re.sub(r'[^A-Za-z0-9+\\/=]', '', p)
                p = base64.b64decode(p)
                p = jsunpack.unpack(p)
                p = unicode(p, 'utf-8')

                post = client.encodePostData({'id': episodeId})

                p2 = client.request('https://embed.streamdor.co/token.php?v=5',
                                    post=post,
                                    referer=src,
                                    XHR=True,
                                    timeout=60)

                js = json.loads(p2)
                tok = js['token']
                quali = 'SD'
                try:
                    quali = re.findall(r'label:"(.*?)"', p)[0]
                except:
                    pass

                p = re.findall(r'var\s+episode=({[^}]+});', p)[0]
                js = json.loads(p)
                ss = []

                try:
                    rtype = js['eName']
                    if '0p' in rtype.lower() or 'sd' in rtype.lower(
                    ) or 'hd' in rtype.lower():
                        raise
                    riptype = rtype
                except:
                    pass

                #print js

                #if 'eName' in js and js['eName'] != '':
                #	quali = source_utils.label_to_quality(js['eName'])
                if 'fileEmbed' in js and js['fileEmbed'] != '':
                    ss.append([js['fileEmbed'], quali])
                if 'filePlaylist' in js and js['filePlaylist'] != '':
                    js_data = client.request(
                        'https://embed.streamdor.co/play/sources?hash=%s&token=%s'
                        % (js['filePlaylist'], tok),
                        referer=src,
                        XHR=True)

                    js = json.loads(js_data)
                    m_srcs = js['playlist'][0]['sources']
                    if 'error' not in m_srcs:
                        for m_src in m_srcs:
                            ss.append([m_src['file'], m_src['label']])
                if 'fileHLS' in js and js['fileHLS'] != '':
                    ss.append([
                        'https://hls.streamdor.co/%s%s' % (tok, js['fileHLS']),
                        quali
                    ])
            except Exception as e:
                raise Exception('Step 2 Failed: %s > %s' % (url, e))

            for link in ss:
                try:
                    if 'google' in url:
                        xs = client.googletag(url)
                        for x in xs:
                            try:
                                links_m = resolvers.createMeta(x['url'],
                                                               self.name,
                                                               self.logo,
                                                               x['quality'],
                                                               links_m,
                                                               key,
                                                               riptype,
                                                               poster=poster,
                                                               testing=testing)
                                if testing == True and len(links_m) > 0:
                                    break
                            except:
                                pass
                    else:
                        try:
                            links_m = resolvers.createMeta(link[0],
                                                           self.name,
                                                           self.logo,
                                                           link[1],
                                                           links_m,
                                                           key,
                                                           riptype,
                                                           poster=poster,
                                                           testing=testing)
                            if testing == True and len(links_m) > 0:
                                break
                        except:
                            pass
                except:
                    pass

            for l in links_m:
                sources.append(l)

            if len(sources) == 0:
                log(
                    'FAIL', 'get_sources',
                    'Could not find a matching title: %s' %
                    cleantitle.title_from_key(key))
                return sources

            log('SUCCESS',
                'get_sources',
                '%s sources : %s' %
                (cleantitle.title_from_key(key), len(sources)),
                dolog=not testing)
            return sources
        except Exception as e:
            log('ERROR', 'get_sources', '%s' % e, dolog=not testing)
            return sources
コード例 #20
0
    def get_sources(self,
                    url,
                    hosthdDict=None,
                    hostDict=None,
                    locDict=None,
                    proxy_options=None,
                    key=None,
                    testing=False):
        try:
            sources = []
            if control.setting('Provider-%s' % name) == False:
                log('INFO', 'get_sources', 'Provider Disabled by User')
                log('INFO', 'get_sources', 'Completed')
                return sources
            if url == None:
                log('FAIL',
                    'get_sources',
                    'url == None. Could not find a matching title: %s' %
                    cleantitle.title_from_key(key),
                    dolog=not testing)
                log('INFO', 'get_sources', 'Completed')
                return sources

            links_m = []
            page_url = url
            r = client.request(url)
            try:
                match = re.compile(
                    '<li><a href="(.+?)" rel="nofollow">(.+?)<').findall(r)
                for url, check in match:
                    quality = '720p'
                    poster = None
                    riptype = 'BRRIP'
                    vidtype = 'Show'
                    sub_url = None
                    l = resolvers.createMeta(url,
                                             self.name,
                                             self.logo,
                                             quality, [],
                                             key,
                                             poster=poster,
                                             riptype=riptype,
                                             vidtype=vidtype,
                                             sub_url=sub_url,
                                             testing=testing,
                                             page_url=page_url)
                    for ll in l:
                        if ll != None and 'key' in ll.keys():
                            links_m.append(ll)
            except:
                return

            for l in links_m:
                if l != None and 'key' in l.keys():
                    sources.append(l)

            if len(sources) == 0:
                log(
                    'FAIL', 'get_sources',
                    'Could not find a matching title: %s' %
                    cleantitle.title_from_key(key))
            else:
                log(
                    'SUCCESS', 'get_sources', '%s sources : %s' %
                    (cleantitle.title_from_key(key), len(sources)))

            log('INFO', 'get_sources', 'Completed')

            return sources
        except Exception as e:
            log('ERROR', 'get_sources', '%s' % e)
            log('INFO', 'get_sources', 'Completed')
            return sources
コード例 #21
0
    def get_sources(self,
                    url,
                    hosthdDict=None,
                    hostDict=None,
                    locDict=None,
                    proxy_options=None,
                    key=None,
                    testing=False):
        try:
            sources = []
            if control.setting('Provider-%s' % name) == False:
                log('INFO', 'get_sources', 'Provider Disabled by User')
                log('INFO', 'get_sources', 'Completed')
                return sources
            if url == None:
                log('FAIL',
                    'get_sources',
                    'url == None. Could not find a matching title: %s' %
                    cleantitle.title_from_key(key),
                    dolog=not testing)
                log('INFO', 'get_sources', 'Completed')
                return sources

            year = None
            episode = None
            season = None

            log('INFO', 'get_sources-1', 'data-items: %s' % url, dolog=False)
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            try:
                year = re.findall(
                    '(\d{4})', data['premiered']
                )[0] if 'tvshowtitle' in data else data['year']
            except:
                try:
                    year = data['year']
                except:
                    year = None
            try:
                season = data['season']
            except:
                pass
            try:
                episode = data['episode']
            except:
                pass

            queries = []
            if season != None:
                queries = [{
                    'keyword': '%s %s' % (title, season)
                }, {
                    'keyword': title
                }]
            else:
                queries = [{
                    'keyword': '%s %s' % (title, year)
                }, {
                    'keyword': title
                }]

            rs = []
            for query in queries:
                search_url = urlparse.urljoin(self.base_link, '/search.html')
                search_url = search_url + '?' + urllib.urlencode(query)
                log('INFO',
                    'get_sources-2',
                    'search-url: %s' % search_url,
                    dolog=False)

                result = proxies.request(search_url,
                                         headers=self.headers,
                                         proxy_options=proxy_options,
                                         use_web_proxy=self.proxyrequired,
                                         httpsskip=True)
                rs = client.parseDOM(result,
                                     'ul',
                                     attrs={'class': 'listing items'})
                if len(rs) > 0 and len(rs[0].strip()) > 4:
                    break

            r = [(urlparse.urljoin(self.base_link,
                                   client.parseDOM(i, 'a', ret='href')[0]),
                  client.parseDOM(i, 'div', attrs={'class': 'name'}))
                 for i in rs]
            ux = None
            for s in r:
                ux = s[0]
                result = proxies.request(ux,
                                         headers=self.headers,
                                         proxy_options=proxy_options,
                                         use_web_proxy=self.proxyrequired,
                                         httpsskip=True)
                rs = client.parseDOM(result,
                                     'div',
                                     attrs={'class': 'watch infonation'})[0]
                rs = client.parseDOM(rs, 'ul', attrs={'class': 'three'})[0]
                if season != None:
                    break
                if year != None and year in rs:
                    break

            log('INFO',
                'get_sources-3',
                'match-page-url: %s' % ux,
                dolog=False)
            links_m = []
            trailers = []
            poster = None
            vidtype = 'Movie'
            if season != None:
                vidtype = 'Show'

            riptype = 'BRRIP'
            quality = '720p'
            sub_url = None

            try:
                poster1 = client.parseDOM(result,
                                          'div',
                                          attrs={'class': 'picture'})
                poster = client.parseDOM(poster1, 'img', ret='src')[0]
            except:
                pass

            links = client.parseDOM(result,
                                    'li',
                                    attrs={'class': 'child_episode'})

            try:
                if season == None:
                    rip_qual = client.parseDOM(result,
                                               'div',
                                               attrs={'id': 'info_movies'})[0]
                    rip_qual = client.parseDOM(rip_qual,
                                               'div',
                                               attrs={'class': 'right'})[0]
                    rip_qual = client.parseDOM(rip_qual, 'a')[0].strip()
                    rip_qual2 = ep_title = client.parseDOM(links[0],
                                                           'a',
                                                           ret='title')[0]

                    if 'HD' not in rip_qual and 'HD' not in rip_qual2:
                        riptype = 'CAM'
                    elif 'CAM' in rip_qual or 'CAM' in rip_qual2:
                        riptype = 'CAM'
                    if riptype == 'CAM':
                        quality = '480p'
                    if '720p' in rip_qual or '720p' in rip_qual2:
                        quality = '720p'
                    elif '1080p' in rip_qual or '1080p' in rip_qual2:
                        quality = '1080p'
            except:
                pass
            mov_url = None

            for l in links:
                try:
                    mov_urlx = urlparse.urljoin(
                        self.base_link,
                        client.parseDOM(l, 'a', ret='href')[0])
                    ep_title = client.parseDOM(l, 'a', ret='title')[0]

                    if season == None:
                        mov_url = mov_urlx
                    else:
                        try:
                            ep_nr = re.findall(r'Episode (.*?) ', ep_title)[0]
                        except:
                            try:
                                ep_nr = re.findall(r'Episode (.*?)-',
                                                   ep_title)[0]
                            except:
                                try:
                                    ep_nr = re.findall(r'Episode (.*?):',
                                                       ep_title)[0]
                                except:
                                    ep_nr = re.findall(r'Episode (.*)',
                                                       ep_title)[0]

                        ep_nr = ep_nr.replace('-',
                                              '').replace(':',
                                                          '').replace(' ', '')
                        ep_nr = filter(lambda x: x.isdigit(), ep_nr)

                        if int(episode) == int(ep_nr):
                            mov_url = mov_urlx
                except Exception as e:
                    log('FAIL',
                        'get_sources-4-A',
                        '%s: %s' % (title, e),
                        dolog=False)

            if mov_url == None:
                raise Exception('No match found !')

            if season == None:
                log('INFO',
                    'get_sources-4',
                    'movie-page-url: %s' % mov_url,
                    dolog=False)
            else:
                log('INFO',
                    'get_sources-4',
                    'show-episode-url: %s' % mov_url,
                    dolog=False)

            page_url = mov_url
            result = proxies.request(mov_url,
                                     headers=self.headers,
                                     proxy_options=proxy_options,
                                     use_web_proxy=self.proxyrequired,
                                     httpsskip=True)

            try:
                sub_url = re.findall(r'\"(.*vtt)\"', result)[0]
            except:
                pass

            if testing == False:
                try:
                    matches = re.compile(
                        'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
                    ).findall(result)
                    for match in matches:
                        try:
                            if 'youtube.com' in match:
                                match = match.replace('embed/', 'watch?v=')
                                trailers.append(match)
                        except:
                            pass
                except Exception as e:
                    pass

                for trailer in trailers:
                    try:
                        l = resolvers.createMeta(trailer,
                                                 self.name,
                                                 self.logo,
                                                 '720p', [],
                                                 key,
                                                 poster=poster,
                                                 vidtype='Trailer',
                                                 testing=testing)
                        for ll in l:
                            if ll != None and 'key' in ll.keys():
                                links_m.append(ll)
                    except:
                        pass

            links = client.parseDOM(result,
                                    'div',
                                    attrs={'class': 'anime_muti_link'})
            links = client.parseDOM(links, 'li', ret='data-video')
            video_urls = []

            for l in links:
                if 'http' not in l:
                    l = 'http:' + l
                video_urls.append(l)

            for video_url in video_urls:
                try:
                    l = resolvers.createMeta(video_url,
                                             self.name,
                                             self.logo,
                                             quality, [],
                                             key,
                                             poster=poster,
                                             riptype=riptype,
                                             vidtype=vidtype,
                                             sub_url=sub_url,
                                             testing=testing,
                                             page_url=page_url)
                    for ll in l:
                        if ll != None and 'key' in ll.keys():
                            links_m.append(ll)
                except:
                    pass

            for l in links_m:
                if l != None and 'key' in l.keys():
                    sources.append(l)

            if len(sources) == 0:
                log(
                    'FAIL', 'get_sources',
                    'Could not find a matching title: %s' %
                    cleantitle.title_from_key(key))
            else:
                log(
                    'SUCCESS', 'get_sources', '%s sources : %s' %
                    (cleantitle.title_from_key(key), len(sources)))

            log('INFO', 'get_sources', 'Completed')

            return sources
        except Exception as e:
            log('ERROR', 'get_sources', '%s' % e)
            log('INFO', 'get_sources', 'Completed')
            return sources
コード例 #22
0
    def get_sources(self,
                    url,
                    hosthdDict=None,
                    hostDict=None,
                    locDict=None,
                    proxy_options=None,
                    key=None,
                    testing=False):
        #try:
        try:
            sources = []
            if control.setting('Provider-%s' % name) == False:
                log('INFO', 'get_sources', 'Provider Disabled by User')
                return sources
            if url == None:
                log('FAIL',
                    'get_sources',
                    'url == None. Could not find a matching title: %s' %
                    cleantitle.title_from_key(key),
                    dolog=not testing)
                return sources

            base_link = self.base_link

            try:
                if url[0].startswith('http'):
                    base_link = url[0]
                mid = re.findall('-(\d+)', url[0])[-1]
            except:
                if url.startswith('http'):
                    base_link = url
                mid = re.findall('-(\d+)', url)[-1]

            try:
                if len(url[1]) > 0:
                    episode = url[1]
                else:
                    episode = None
            except:
                episode = None

            #print mid

            links_m = []
            trailers = []
            headers = {'Referer': self.base_link}

            u = urlparse.urljoin(self.base_link, url[0])
            #print u
            #r = client.request(u, headers=headers, IPv4=True)
            r = proxies.request(u,
                                headers=headers,
                                IPv4=True,
                                proxy_options=proxy_options,
                                use_web_proxy=self.proxyrequired)

            try:
                elem = client.parseDOM(r, 'span', attrs={'class':
                                                         'quality'})[0]
                qual = source_utils.check_sd_url(elem)
                riptype = source_utils.check_sd_url_rip(elem)
            except Exception as e:
                qual = '480p'
                riptype = 'BRRIP'

            try:
                poster = client.parseDOM(r, 'div', attrs={'class':
                                                          'dm-thumb'})[0]
                poster = client.parseDOM(poster, 'img', ret='src')[0]
            except:
                poster = None

            if testing == False:
                try:

                    #regex = r"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+"
                    #matches = re.finditer(regex, r, re.MULTILINE)
                    matches = re.compile(
                        'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
                    ).findall(r)
                    for match in matches:
                        try:
                            #print match
                            if 'youtube.com' in match:
                                match = match.replace('embed/', 'watch?v=')
                                trailers.append(match)
                        except:
                            pass
                except Exception as e:
                    pass

                for trailer in trailers:
                    links_m = resolvers.createMeta(trailer,
                                                   self.name,
                                                   self.logo,
                                                   '720p',
                                                   links_m,
                                                   key,
                                                   vidtype='Trailer',
                                                   testing=testing)

            try:
                u = urlparse.urljoin(self.base_link, self.server_link % mid)
                #print u
                #r = client.request(u, headers=headers, XHR=True, IPv4=True)
                r = proxies.request(u,
                                    headers=headers,
                                    XHR=True,
                                    IPv4=True,
                                    proxy_options=proxy_options,
                                    use_web_proxy=self.proxyrequired)
                r = json.loads(r)['html']
                r = client.parseDOM(r, 'div', attrs={'class': 'pas-list'})
                ids = client.parseDOM(r, 'li', ret='data-id')
                servers = client.parseDOM(r, 'li', ret='data-server')
                labels = client.parseDOM(r, 'a', ret='title')
                r = zip(ids, servers, labels)

                for eid in r:
                    #print r
                    try:
                        sub_url = None
                        try:
                            ep = re.findall('episode.*?(\d+):.*?',
                                            eid[2].lower())[0]
                        except:
                            ep = 0

                        if (episode is None) or (int(ep) == int(episode)):

                            url = urlparse.urljoin(
                                self.base_link,
                                self.token_link % (eid[0], mid))
                            #script = client.request(url, IPv4=True)
                            script = proxies.request(
                                url,
                                IPv4=True,
                                proxy_options=proxy_options,
                                use_web_proxy=self.proxyrequired)
                            #print script

                            if '$_$' in script:
                                params = self.uncensored1(script)
                            elif script.startswith('[]') and script.endswith(
                                    '()'):
                                params = self.uncensored2(script)
                            elif '_x=' in script and '_y=' in script:
                                params = self.uncensored3(script)
                            else:
                                raise Exception()

                            u = urlparse.urljoin(
                                self.base_link, self.sourcelink %
                                (eid[0], params['x'], params['y']))
                            #print u
                            #r = client.request(u, IPv4=True)
                            r = proxies.request(
                                u,
                                IPv4=True,
                                proxy_options=proxy_options,
                                use_web_proxy=self.proxyrequired)

                            if r == None or len(r) == 0:
                                u = urlparse.urljoin(
                                    self.base_link, self.embed_link % (eid[0]))
                                #print u
                                #r = client.request(u, IPv4=True)
                                r = proxies.request(
                                    u,
                                    IPv4=True,
                                    proxy_options=proxy_options,
                                    use_web_proxy=self.proxyrequired)

                            try:
                                url = json.loads(r)['playlist'][0]['sources']
                            except:
                                url = [{'file': json.loads(r)['src']}]

                            try:
                                url = [i['file'] for i in url]
                            except:
                                url = [url['file']]

                            try:
                                sub_url = json.loads(
                                    r)['playlist'][0]['tracks'][0]['file']
                            except:
                                pass

                            vidtype = 'Movie'
                            if int(ep) > 0:
                                vidtype = 'Show'

                            for s in url:
                                links_m = resolvers.createMeta(s,
                                                               self.name,
                                                               self.logo,
                                                               qual,
                                                               links_m,
                                                               key,
                                                               poster=poster,
                                                               riptype=riptype,
                                                               vidtype=vidtype,
                                                               sub_url=sub_url,
                                                               testing=testing)
                    except:
                        pass
            except:
                pass

            sources += [l for l in links_m]

            if len(sources) == 0:
                log(
                    'FAIL', 'get_sources',
                    'Could not find a matching title: %s' %
                    cleantitle.title_from_key(key))
                return sources

            log('SUCCESS',
                'get_sources',
                '%s sources : %s' %
                (cleantitle.title_from_key(key), len(sources)),
                dolog=not testing)
            return sources
        except Exception as e:
            log('ERROR', 'get_sources', '%s' % e, dolog=not testing)
            return sources
コード例 #23
0
    def get_sources(self,
                    url,
                    hosthdDict=None,
                    hostDict=None,
                    locDict=None,
                    proxy_options=None,
                    key=None,
                    testing=False):
        try:
            sources = []
            if url == None:
                log('FAIL',
                    'get_sources',
                    'Could not find a matching title: %s' %
                    cleantitle.title_from_key(key),
                    dolog=not testing)
                return sources

            urls = []

            if not str(url).startswith('http'):
                try:
                    data = urlparse.parse_qs(url)
                    data = dict([(i, data[i][0]) if data[i] else (i, '')
                                 for i in data])

                    title = data[
                        'tvshowtitle'] if 'tvshowtitle' in data else data[
                            'title']

                    if 'year' in data:
                        year = data['year']
                    try:
                        episode = data['episode']
                    except:
                        pass

                    query = {'keyword': title}
                    search_url = urlparse.urljoin(self.base_link,
                                                  '/search.html')
                    search_url = search_url + '?' + urllib.urlencode(query)

                    result = proxies.request(search_url,
                                             headers=self.headers,
                                             proxy_options=proxy_options,
                                             use_web_proxy=self.proxyrequired,
                                             httpsskip=True)

                    r = client.parseDOM(result,
                                        'div',
                                        attrs={'class': 'wrapper'})
                    try:
                        r = r[1]
                    except:
                        raise Exception()

                    r1 = client.parseDOM(r, 'figure')
                    r2 = []
                    for res in r1:
                        l = client.parseDOM(res, 'a', ret='href')[0]
                        t = client.parseDOM(res,
                                            'div',
                                            attrs={'class': 'title'})[0]
                        r = (l, t)
                        r2.append(r)

                    r = r2

                    if 'season' in data:
                        r = [(i[0], re.sub(' \(\w*\)', '', i[1])) for i in r]
                        #print r
                        #title += '%01d' % int(data['season'])
                        url = [(i[0], re.findall('(.+?) (\d+)$', i[1]))
                               for i in r]
                        url = [(i[0], i[1][0][0], i[1][0][1]) for i in url
                               if len(i[1]) > 0]
                        url = [
                            i for i in url
                            if cleantitle.get(title) in cleantitle.get(i[1])
                        ]
                        #for i in url:
                        #	print i[2],i[0],i[1]
                        #	print '%01d' % int(data['season']) == '%01d' % int(i[2])

                        url = [
                            i for i in url if '%01d' %
                            int(data['season']) == '%01d' % int(i[2])
                        ]
                        for i in url:
                            urls.append(urlparse.urljoin(self.base_link, i[0]))
                    else:
                        for i in r:
                            if cleantitle.get(title) in cleantitle.get(i[1]):
                                urls.append(
                                    urlparse.urljoin(self.base_link, i[0]))

                except:
                    urls == [self.base_link]

            links_m = []

            page = None
            for url in urls:
                try:
                    page = result = proxies.request(
                        url,
                        headers=self.headers,
                        proxy_options=proxy_options,
                        use_web_proxy=self.proxyrequired,
                        httpsskip=True)

                    quality = '480p'
                    type = 'BRRIP'

                    try:
                        atr = client.parseDOM(result,
                                              'span',
                                              attrs={'class': 'quanlity'})[0]
                        q, t = cleantitle.getQuality(atr)
                        if q != None:
                            quality = q
                            type = t
                    except:
                        try:
                            atr = client.parseDOM(result,
                                                  'span',
                                                  attrs={'class':
                                                         'quality'})[0]
                            q, t = cleantitle.getQuality(atr)
                            if q != None:
                                quality = q
                                type = t
                        except:
                            pass

                    try:
                        atr = client.parseDOM(result,
                                              'span',
                                              attrs={'class': 'year'})[0]
                    except:
                        atr = ''

                    try:
                        atr_release = client.parseDOM(result,
                                                      'div',
                                                      attrs={'class':
                                                             'meta'})[1]
                    except:
                        atr_release = ''

                    if 'season' in data:
                        pass
                    else:
                        resultx = result if str(int(year)) in atr or str(
                            int(year) + 1) in atr or str(int(year) -
                                                         1) in atr else None
                        if resultx == None:
                            resultx = result if str(
                                int(year)) in atr_release or str(
                                    int(year) + 1) in atr_release or str(
                                        int(year) - 1) in atr_release else None
                        if resultx == None:
                            raise Exception()

                    #print result

                    #r = client.parseDOM(result, 'article', attrs = {'class': 'player current'})[0]
                    #r = client.parseDOM(r, 'iframe', ret='src')[0]
                    #r = r.split('?')

                    #print r

                    try:
                        servers = re.findall(r'link_server_.*\"(.*)\";', page)
                        for server in servers:
                            try:
                                if 'http' not in server:
                                    server = 'http:' + server

                                    result = proxies.request(
                                        server,
                                        headers=self.headers,
                                        proxy_options=proxy_options,
                                        use_web_proxy=self.proxyrequired,
                                        httpsskip=True)
                                    server = client.parseDOM(result,
                                                             'iframe',
                                                             ret='src')[0]
                                    if 'http' not in server:
                                        server = 'http:' + server

                                links_m = resolvers.createMeta(server,
                                                               self.name,
                                                               self.logo,
                                                               quality,
                                                               links_m,
                                                               key,
                                                               testing=testing)
                            except Exception as e:
                                pass
                            if testing and len(links_m) > 0:
                                break
                    except Exception as e:
                        pass

                    try:
                        servers = re.findall(r'link_server_.*\'(.*)\';', page)
                        for server in servers:
                            if server != None:
                                if 'http' not in server:
                                    server = 'http:' + server
                                try:
                                    links_m = resolvers.createMeta(
                                        server,
                                        self.name,
                                        self.logo,
                                        quality,
                                        links_m,
                                        key,
                                        testing=testing)
                                except:
                                    pass
                    except:
                        pass
                except:
                    pass

            for link in links_m:
                sources.append(link)

            if len(sources) == 0:
                log(
                    'FAIL', 'get_sources',
                    'Could not find a matching title: %s' %
                    cleantitle.title_from_key(key))
                return sources

            log('SUCCESS',
                'get_sources',
                '%s sources : %s' %
                (cleantitle.title_from_key(key), len(sources)),
                dolog=not testing)
            return sources
        except Exception as e:
            log('ERROR', 'get_sources', '%s' % e, dolog=not testing)
            return sources
コード例 #24
0
    def get_sources(self,
                    url,
                    hosthdDict=None,
                    hostDict=None,
                    locDict=None,
                    proxy_options=None,
                    key=None,
                    testing=False):
        try:
            sources = []
            if control.setting('Provider-%s' % name) == False:
                log('INFO', 'get_sources', 'Provider Disabled by User')
                log('INFO', 'get_sources', 'Completed')
                return sources
            if url == None:
                log('FAIL',
                    'get_sources',
                    'url == None. Could not find a matching title: %s' %
                    cleantitle.title_from_key(key),
                    dolog=not testing)
                log('INFO', 'get_sources', 'Completed')
                return sources

            urls = []
            vidtype = 'Movie'

            if not str(url).startswith('http'):
                try:
                    data = urlparse.parse_qs(url)
                    data = dict([(i, data[i][0]) if data[i] else (i, '')
                                 for i in data])

                    title = data[
                        'tvshowtitle'] if 'tvshowtitle' in data else data[
                            'title']

                    if 'year' in data:
                        year = data['year']

                    if 'season' in data:
                        query = {
                            'keyword':
                            '%s %s %s' % (title, 'season', data['season'])
                        }
                    else:
                        query = {'keyword': title}

                    search_url = urlparse.urljoin(self.base_link,
                                                  '/search.html')
                    search_url = search_url + '?' + urllib.urlencode(query)

                    result = proxies.request(search_url,
                                             headers=self.headers,
                                             proxy_options=proxy_options,
                                             use_web_proxy=self.proxyrequired,
                                             httpsskip=True)
                    r = client.parseDOM(result,
                                        'div',
                                        attrs={'class': 'wrapper'})

                    try:
                        r = r[1]
                    except:
                        raise Exception()

                    r1 = client.parseDOM(r, 'figure')
                    r2 = []
                    for res in r1:
                        l = client.parseDOM(res, 'a', ret='href')[0]
                        t = client.parseDOM(res,
                                            'div',
                                            attrs={'class': 'title'})[0]
                        r = (l, t)
                        r2.append(r)

                    r = r2

                    if 'season' in data:
                        vidtype = 'Show'
                        episode = int(data['episode'])

                        r = [(i[0], re.sub(' \(\w*\)', '', i[1])) for i in r]
                        url = [(i[0], re.findall('(.+?) (\d+)$', i[1]))
                               for i in r]
                        url = [(i[0], i[1][0][0], i[1][0][1]) for i in url
                               if len(i[1]) > 0]
                        url = [
                            i for i in url
                            if cleantitle.get(title) in cleantitle.get(i[1])
                        ]
                        url = [
                            i for i in url if '%01d' %
                            int(data['season']) == '%01d' % int(i[2])
                        ]

                        ep_url = []
                        for i in url:
                            result = proxies.request(
                                urlparse.urljoin(self.base_link, i[0]),
                                headers=self.headers,
                                proxy_options=proxy_options,
                                use_web_proxy=self.proxyrequired,
                                httpsskip=True)
                            t = client.parseDOM(result,
                                                'div',
                                                attrs={'class': 'eps'})
                            for tt in t:
                                if 'watch' in tt:
                                    tt = client.parseDOM(
                                        tt, 'div', attrs={'class':
                                                          'server'})[0]
                                    section_links = client.parseDOM(tt,
                                                                    'a',
                                                                    ret='href')
                                    for a_link in section_links:
                                        if episode < 100:
                                            f_key = '-episode-%02d-' % episode
                                        else:
                                            f_key = '-episode-%03d-' % episode
                                        if f_key in a_link:
                                            log('INFO', 'get_sources',
                                                'episode url = %s' % a_link)
                                            ep_url.append(a_link)
                                            break
                        for i in ep_url:
                            urls.append(urlparse.urljoin(self.base_link, i))
                    else:
                        for i in r:
                            if cleantitle.get(title) in cleantitle.get(i[1]):
                                urls.append(
                                    urlparse.urljoin(self.base_link, i[0]))

                except:
                    urls == [self.base_link]

            links_m = []

            page = None
            for url in urls:
                try:
                    log('INFO',
                        'get_sources',
                        'url == %s' % url,
                        dolog=False,
                        doPrint=True)
                    page_url = url
                    page = result = proxies.request(
                        url,
                        headers=self.headers,
                        proxy_options=proxy_options,
                        use_web_proxy=self.proxyrequired,
                        httpsskip=True)

                    quality = '480p'
                    type = 'BRRIP'

                    atr = ''
                    qtr = ''

                    try:
                        qtr = client.parseDOM(result,
                                              'span',
                                              attrs={'class': 'quanlity'})[0]
                        # q, t = cleantitle.getQuality(atr)
                        # if q != None:
                        # quality = q
                        # type = t
                    except:
                        try:
                            qtr = client.parseDOM(result,
                                                  'span',
                                                  attrs={'class':
                                                         'quality'})[0]
                            # q, t = cleantitle.getQuality(atr)
                            # if q != None:
                            # quality = q
                            # type = t
                        except:
                            pass

                    try:
                        quality = source_utils.check_sd_url(qtr)
                        type = source_utils.check_sd_url_rip(qtr)
                    except Exception as e:
                        quality = '480p'
                        type = 'BRRIP'

                    try:
                        atr = client.parseDOM(result,
                                              'span',
                                              attrs={'class': 'year'})[0]
                    except:
                        atr = ''

                    try:
                        atr_release = client.parseDOM(result,
                                                      'div',
                                                      attrs={'class':
                                                             'meta'})[1]
                    except:
                        atr_release = ''

                    if 'season' in data:
                        vidtype = 'Show'
                        pass
                    else:
                        vidtype = 'Movie'
                        resultx = result if str(int(year)) in atr else None
                        if resultx == None:
                            resultx = result if str(
                                int(year)) in atr_release else None
                        if resultx == None:
                            raise Exception()

                    try:
                        poster = client.parseDOM(page,
                                                 'div',
                                                 attrs={'class':
                                                        'detail-l'})[0]
                        poster = client.parseDOM(poster, 'img', ret='src')[0]
                        if 'http' not in poster:
                            poster = 'http:' + poster
                    except:
                        poster = None
                    #print result

                    #r = client.parseDOM(result, 'article', attrs = {'class': 'player current'})[0]
                    #r = client.parseDOM(r, 'iframe', ret='src')[0]
                    #r = r.split('?')

                    try:
                        servers = re.findall(r'link_server_.*\"(.*)\";', page)
                        servers = list(set(servers))
                        for server in servers:
                            try:
                                if 'http' not in server:
                                    server = 'http:' + server

                                result = proxies.request(
                                    server,
                                    headers=self.headers,
                                    proxy_options=proxy_options,
                                    use_web_proxy=self.proxyrequired,
                                    httpsskip=True)

                                server = client.parseDOM(result,
                                                         'iframe',
                                                         ret='src')[0]
                                if len(server) > 0:
                                    if 'http' not in server:
                                        server = 'http:' + server

                                    l = resolvers.createMeta(server,
                                                             self.name,
                                                             self.logo,
                                                             quality, [],
                                                             key,
                                                             poster=poster,
                                                             riptype=type,
                                                             vidtype=vidtype,
                                                             testing=testing,
                                                             page_url=page_url)
                                    for ll in l:
                                        if ll != None and 'key' in ll.keys():
                                            links_m.append(ll)
                            except Exception as e:
                                pass
                            if testing and len(links_m) > 0:
                                break
                    except Exception as e:
                        pass

                    try:
                        servers = re.findall(r'link_server_.*\"(.*)\";', page)
                        servers = list(set(servers))
                        for server in servers:
                            if server != None:
                                if 'http' not in server:
                                    server = 'http:' + server

                                try:
                                    l = resolvers.createMeta(server,
                                                             self.name,
                                                             self.logo,
                                                             quality, [],
                                                             key,
                                                             poster=poster,
                                                             riptype=type,
                                                             vidtype=vidtype,
                                                             testing=testing,
                                                             page_url=page_url)
                                    for ll in l:
                                        if ll != None and 'key' in ll.keys():
                                            links_m.append(ll)
                                except:
                                    pass
                    except:
                        pass

                    break
                except:
                    pass

            for link in links_m:
                if link != None and 'key' in link.keys():
                    sources.append(link)

            if len(sources) == 0:
                log(
                    'FAIL', 'get_sources',
                    'Could not find a matching title: %s' %
                    cleantitle.title_from_key(key))
            else:
                log(
                    'SUCCESS', 'get_sources', '%s sources : %s' %
                    (cleantitle.title_from_key(key), len(sources)))

            log('INFO', 'get_sources', 'Completed')

            return sources
        except Exception as e:
            log('ERROR', 'get_sources', '%s' % e)
            log('INFO', 'get_sources', 'Completed')
            return sources
コード例 #25
0
    def get_sources(self,
                    url,
                    hosthdDict=None,
                    hostDict=None,
                    locDict=None,
                    proxy_options=None,
                    key=None,
                    testing=False):
        try:
            sources = []
            if control.setting('Provider-%s' % name) == False:
                log('INFO', 'get_sources', 'Provider Disabled by User')
                return sources
            if url == None:
                log('FAIL',
                    'get_sources',
                    'url == None. Could not find a matching title: %s' %
                    cleantitle.title_from_key(key),
                    dolog=not testing)
                return sources

            processed = []
            for link in url:
                if re.match(
                        '((?!\.part[0-9]).)*$', link['url'],
                        flags=re.IGNORECASE
                ) and '://' in link['url'] and link['url'] not in processed:
                    host = re.findall(
                        '([\w]+[.][\w]+)$',
                        urlparse.urlparse(
                            link['url'].strip().lower()).netloc)[0].split(
                                '.')[0]
                    scheme = urlparse.urlparse(link['url']).scheme
                    #if host in hostDict and scheme:
                    if scheme:
                        if '1080' in link['title'] or '1080' in link['url']:
                            quality = '1080p'
                        elif '720' in link['title'] or '720' in link['url']:
                            quality = 'HD'
                        else:
                            quality = 'SD'

                        file_ext = '.mp4'
                        if len(link['ext']) > 0 and len(
                                link['ext']) < 4 and len(link['src']) > 0:
                            txt = '%s (.%s)' % (link['src'], link['ext'])
                            file_ext = '.%s' % link['ext']
                        elif len(link['ext']) > 0 and len(
                                link['ext']) < 4 and len(link['src']) == 0:
                            txt = '%s (.%s)' % (link['src'], link['ext'])
                            file_ext = '.%s' % link['ext']
                        elif (len(link['ext']) == 0 or
                              len(link['ext']) > 3) and len(link['src']) > 0:
                            txt = '%s' % link['src']
                        else:
                            txt = ''

                        if 'trailer' in link['title'].lower():
                            sources = resolvers.createMeta(link['url'],
                                                           self.name,
                                                           self.logo,
                                                           quality,
                                                           sources,
                                                           key,
                                                           lang=link['lang'],
                                                           txt=txt,
                                                           file_ext=file_ext,
                                                           vidtype='Trailer',
                                                           testing=testing)
                        else:
                            sources = resolvers.createMeta(link['url'],
                                                           self.name,
                                                           self.logo,
                                                           quality,
                                                           sources,
                                                           key,
                                                           lang=link['lang'],
                                                           txt=txt,
                                                           file_ext=file_ext,
                                                           testing=testing)

                        processed.append(link['url'])

            if self.fetchedtoday > 0:
                self.msg = 'Fetched today: %s' % str(self.fetchedtoday)
                log('INFO', 'get_sources', self.msg, dolog=not testing)

            if len(sources) == 0:
                log(
                    'FAIL', 'get_sources',
                    'Could not find a matching title: %s' %
                    cleantitle.title_from_key(key))
                return sources

            log('SUCCESS',
                'get_sources',
                '%s sources : %s' %
                (cleantitle.title_from_key(key), len(sources)),
                dolog=not testing)
            return sources
        except Exception as e:
            log('ERROR', 'get_sources', '%s' % e, dolog=not testing)
            return sources
コード例 #26
0
    def get_sources(self,
                    url,
                    hosthdDict=None,
                    hostDict=None,
                    locDict=None,
                    proxy_options=None,
                    key=None,
                    testing=False):
        try:
            sources = []
            if control.setting('Provider-%s' % name) == False:
                log('INFO', 'get_sources', 'Provider Disabled by User')
                log('INFO', 'get_sources', 'Completed')
                return sources
            if url == None:
                log('FAIL',
                    'get_sources',
                    'url == None. Could not find a matching title: %s' %
                    cleantitle.title_from_key(key),
                    dolog=not testing)
                log('INFO', 'get_sources', 'Completed')
                return sources

            UA = client.agent()

            # get TA JSON data from tadata api

            result = proxies.request(url,
                                     proxy_options=proxy_options,
                                     use_web_proxy=self.proxyrequired,
                                     IPv4=True)
            resultx = json.loads(str(result))
            extras = resultx['video']
            # get types of videos available
            types = {'trailer': 'Trailer', 'featurette': 'Featurette'}
            quality = '720p'

            links = []
            for extra in extras:
                vidtype_e = extra['title']
                vidtype = 'Misc.'
                for t in types:
                    if t in vidtype_e.lower():
                        vidtype = types[t]
                        break
                links = resolvers.createMeta(extra['url'],
                                             self.name,
                                             self.logo,
                                             quality,
                                             links,
                                             key,
                                             vidtype=vidtype,
                                             testing=testing,
                                             txt=extra['title'],
                                             poster=extra['thumb'])
                if testing == True and len(links) > 0:
                    break

            for i in links:
                sources.append(i)

            if len(sources) == 0:
                log(
                    'FAIL', 'get_sources',
                    'Could not find a matching title: %s' %
                    cleantitle.title_from_key(key))
            else:
                log(
                    'SUCCESS', 'get_sources', '%s sources : %s' %
                    (cleantitle.title_from_key(key), len(sources)))

            log('INFO', 'get_sources', 'Completed')

            return sources
        except Exception as e:
            log('ERROR', 'get_sources', '%s' % e)
            log('INFO', 'get_sources', 'Completed')
            return sources
コード例 #27
0
	def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_options=None, key=None, testing=False):
		try:
			sources = []
			if control.setting('Provider-%s' % name) == False:
				log('INFO','get_sources','Provider Disabled by User')
				log('INFO', 'get_sources', 'Completed')
				return sources
			if url == None: 
				log('FAIL','get_sources','url == None. Could not find a matching title: %s' % cleantitle.title_from_key(key), dolog=not testing)
				log('INFO', 'get_sources', 'Completed')
				return sources
			
			url_arr=[]
			
			data = urlparse.parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
			
			if 'episode' in data and 'season' in data:
				url0 = (data['title'].translate(None, '\/:*?"\'<>|!,.')).replace(' ', '-').replace('--', '-').lower() + "/s%s/e%s" % (data['season'],data['episode'])
				url_arr.append(url0)
			else:
				url1 = (data['title'].translate(None, '\/:*?"\'<>|!,.')).replace(' ', '-').replace('--', '-').lower()
				url2 = (data['title'].translate(None, '\/:*?"\'<>|!,.')).replace(' ', '-').replace('--', '-').lower() + "-%s" % (data['year'])
				url_arr.append(url1)
				url_arr.append(url2)
				try:
					title = data['title']
					title = title.split(':')
					title = title[0]
					url3 = (title.translate(None, '\/:*?"\'<>|!,.')).replace(' ', '-').replace('--', '-').lower()
					url_arr.append(url3)
				except:
					pass
				
			if 'episode' in data and 'season' in data:
				try:
					url1 = (data['title'].split(':')[0].translate(None, '\/:*?"\'<>|!,.')).replace(' ', '-').replace('--', '-').lower() + "/s%s/e%s" % (data['season'],data['episode'])
					url_arr.append(url1)
				except:
					pass
			else:
				try:
					url4 = (data['title'].split(':')[0].translate(None, '\/:*?"\'<>|!,.')).replace(' ', '-').replace('--', '-').lower()
					url5 = (data['title'].split(':')[0].translate(None, '\/:*?"\'<>|!,.')).replace(' ', '-').replace('--', '-').lower()+ "-%s" % (data['year'])
					url_arr.append(url4)
					url_arr.append(url5)
				except:
					pass
					
			url_arr_t = []
			for u in url_arr:
				u = u.replace('--', '-')
				url_arr_t.append(u)
				
			url_arr = list(set(url_arr_t))
			links_m = []
			for url in url_arr:
				try:
					#print url
					
					url = urlparse.urljoin(self.base_link, self.watch_link % url)
					
					#print url

					r = proxies.request(url, output='geturl', proxy_options=proxy_options, use_web_proxy=self.proxyrequired, IPv4=True)
					
					#print r
				
					if r == None: raise Exception()

					r = result = proxies.request(url, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, IPv4=True)
					#print "resp ===== %s" % r
					
					quality = '720p'

					r = re.sub(r'[^\x00-\x7F]+',' ', r)

					if 'episode' not in data or 'season' not in data:
						y = re.findall('Date\s*:\s*.+?>.+?(\d{4})', r)
						y = y[0] if len(y) > 0 else None
						#print y

						if ('year' in data and y != None and data['year'] != y): 
							#print 'year not found'
							raise Exception()

					q = client.parseDOM(r, 'title')
					q = q[0] if len(q) > 0 else None
					quality = '1080p' if ' 1080' in q else '720p'
					
					sub_url = None
					
					try:
						sub_url = urlparse.urljoin(self.base_link, re.findall('\"(\/subs.*?.srt)\"', result)[0])
					except:
						pass
						
					try:
						poster = urlparse.urljoin(self.base_link, client.parseDOM(result, 'img', ret='src', attrs = {'id': 'nameimage'})[0])
					except:
						poster = None
					
					#print quality

					#r = client.parseDOM(r, 'div', attrs = {'id': '5throw'})[0]
					#r = client.parseDOM(r, 'a', ret='href', attrs = {'rel': 'nofollow'})
					
					r_orig = r
					
					try:
						r = client.parseDOM(r_orig, 'div', attrs = {'id': '1strow'})[0]
						#print r
						r = client.parseDOM(r, 'a', ret='href', attrs = {'id': 'dm1'})[0]
						#print r
						l = resolvers.createMeta(r, self.name, self.logo, quality, [], key, poster=poster, vidtype='Movie', sub_url=sub_url, testing=testing)
						for ll in l:
							if ll != None and 'key' in ll.keys():
								links_m.append(ll)
					except Exception as e:
						log('FAIL', 'get_sources-1A', e , dolog=False)
						try:
							r = client.parseDOM(r_orig, 'div', attrs = {'id': 'n1strow'})[0]
							#print r
							r = client.parseDOM(r, 'a', ret='href', attrs = {'id': 'mega'})[0]
							#print r
							l = resolvers.createMeta(r, self.name, self.logo, quality, [], key, poster=poster, vidtype='Movie', sub_url=sub_url, testing=testing)
							for ll in l:
								if ll != None and 'key' in ll.keys():
									links_m.append(ll)
						except Exception as e:
							log('FAIL', 'get_sources-1B', e , dolog=False)	
						
					try:
						r = self.returnFinalLink(url)
						if r != None:
							l = resolvers.createMeta(r, self.name, self.logo, quality, [], key, poster=poster, vidtype='Movie', sub_url=sub_url, testing=testing)
							for ll in l:
								if ll != None and 'key' in ll.keys():
									links_m.append(ll)
					except Exception as e:
						log('FAIL', 'get_sources-2', e , dolog=False)	
				
					try:
						r = client.parseDOM(result, 'iframe', ret='src')
						r2 = [i for i in r if 'g2g' in i or 'ytid' in i]
						#print r2
						for r in r2:
							try:
								if 'http' not in r and self.urlhost in r:
									r = 'http:' + r
								elif 'http' not in r:
									r = self.base_link + r
								#print r
								r = proxies.request(r, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, IPv4=True)
								r = re.sub(r'[^\x00-\x7F]+',' ', r)
								r = client.parseDOM(r, 'iframe', ret='src')[0]
								
								part2=False
								if '.php' in r:
									r = self.base_link + r
									rx = r.replace('.php','2.php')
									
									r = proxies.request(r, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, IPv4=True)
									r = re.sub(r'[^\x00-\x7F]+',' ', r)
									r = client.parseDOM(r, 'iframe', ret='src')[0]
									
									try:
										rx = proxies.request(rx, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, IPv4=True)
										rx = re.sub(r'[^\x00-\x7F]+',' ', rx)
										rx = client.parseDOM(rx, 'iframe', ret='src')[0]
										if 'http' not in rx:
											rx = 'http:' + rx
										part2=True
									except:
										pass
								if 'http' not in r:
									r = 'http:' + r
								
								#print r
								
								if 'youtube' in r:
									vidtype = 'Trailer'
									qualityt = '720p'
									r = r.replace('?showinfo=0','')
								else:
									vidtype = 'Movie'
									qualityt = quality
									
								if part2:
									#print '2-part video'
									l = resolvers.createMeta(r, self.name, self.logo, qualityt, [], key, poster=poster, vidtype=vidtype, txt='Part-1', sub_url=sub_url, testing=testing)
									for ll in l:
										if ll != None and 'key' in ll.keys():
											links_m.append(ll)
									l = resolvers.createMeta(rx, self.name, self.logo, qualityt, [], key, poster=poster, vidtype=vidtype, txt='Part-2', sub_url=sub_url, testing=testing)
									for ll in l:
										if ll != None and 'key' in ll.keys():
											links_m.append(ll)
								else:
									l = resolvers.createMeta(r, self.name, self.logo, qualityt, [], key, poster=poster, vidtype=vidtype, sub_url=sub_url, testing=testing)
									for ll in l:
										if ll != None and 'key' in ll.keys():
											links_m.append(ll)
								
							except:
								pass
								
					except Exception as e:
						log('FAIL', 'get_sources-3', e , dolog=False)
				except Exception as e:
					log('FAIL', 'get_sources-3.1', e , dolog=False)

			for l in links_m:
				if l != None and 'key' in l.keys():
					sources.append(l)		
			
			if len(sources) == 0:
				log('FAIL','get_sources','Could not find a matching title: %s' % cleantitle.title_from_key(key))
			else:
				log('SUCCESS', 'get_sources','%s sources : %s' % (cleantitle.title_from_key(key), len(sources)))
				
			log('INFO', 'get_sources', 'Completed')
			
			return sources
		except Exception as e:
			log('ERROR', 'get_sources', '%s' % e)
			log('INFO', 'get_sources', 'Completed')
			return sources
コード例 #28
0
	def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_options=None, key=None, testing=False):
		try:
			sources = []
			if control.setting('Provider-%s' % name) == False:
				log('INFO','get_sources','Provider Disabled by User')
				log('INFO', 'get_sources', 'Completed')
				return sources
			if url == None: 
				log('FAIL','get_sources','url == None. Could not find a matching title: %s' % cleantitle.title_from_key(key), dolog=not testing)
				log('INFO', 'get_sources', 'Completed')
				return sources
			
			data = urlparse.parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
			title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
			year = data['year']
			aliases = eval(data['aliases'])
			#cookie = '; approve_search=yes'
			query = self.search_link % (urllib.quote_plus(title))
			query = urlparse.urljoin(self.base_link, query)
			
			log(type='INFO', method='get_sources', err='Searching - %s' % query, dolog=False, logToControl=False, doPrint=True)
			result = client.request(query) #, cookie=cookie)
			
			links_m = []
			
			try:
				if 'episode' in data:
					r = client.parseDOM(result, 'div', attrs={'class': 'ml-item'})
					r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
					r = [(i[0], i[1], re.findall('(.*?)\s+-\s+Season\s+(\d+)', i[1])) for i in r]
					r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0]
					url = [i[0] for i in r if self.matchAlias(i[2][0], aliases) and i[2][1] == data['season']][0]

					url = '%swatch' % url
					result = client.request(url)

					url = re.findall('a href=\"(.+?)\" class=\"btn-eps first-ep \">Episode %02d' % int(data['episode']), result)[0]

				else:
					r = client.parseDOM(result, 'div', attrs={'class': 'ml-item'})
					r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'), client.parseDOM(r, 'img', ret='data-original'))
					
					results = [(i[0], i[1], re.findall(r'images/(.*?)-', i[2])) for i in r]
					
					try:
						r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
						url = [i[0] for i in r if self.matchAlias(i[1], aliases) and (year == i[2])][0]
					except Exception as e:
						print e
						url = None
						pass
						
					if (url == None):
						url = [i[0] for i in results if self.matchAlias(i[1], aliases)][0]
					url = urlparse.urljoin(url, 'watch')

				#url = client.request(url, output='geturl')
				if url == None: raise Exception()
			except Exception as e:
			  raise Exception('Step 1 Failed: %s > %s' % (url,e))

			url = url if 'http' in url else urlparse.urljoin(self.base_link, url)
			log(type='INFO', method='get_sources', err='Match found - %s' % url, dolog=False, logToControl=False, doPrint=True)
			
			result = client.request(url)
			try:
				poster = client.parseDOM(result, 'img', attrs={'itemprop':'image'}, ret='src')[0]
			except:
				poster = None
				
			Juicy = False
			ss = []
			riptype = 'BRRIP'
			
			if testing == False:
				trailer_res = client.parseDOM(result, 'div', attrs={'class':'block-trailer'})[0]
				trailer_res = client.parseDOM(trailer_res, 'a', ret='href')[0]
				trailer_res = client.request(trailer_res)
				
				trailers = []
				try:
					matches = re.compile('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+').findall(trailer_res)
					for match in matches:
						try:
							if 'youtube.com' in match:
								match = match.replace('embed/','watch?v=')
								trailers.append(match)
						except:
							pass
				except Exception as e:
					pass
					
				for trailer in trailers:
					try:
						l = resolvers.createMeta(trailer, self.name, self.logo, '720p', [], key, poster=poster, vidtype='Trailer', testing=testing)
						for ll in l:
							if ll != None and 'key' in ll.keys():
								links_m.append(ll)
					except:
						pass
			
			if 'streamdor' in result and Juicy == True:
				src = re.findall('src\s*=\s*"(.*streamdor.co\/video\/\d+)"', result)[0]
				if src.startswith('//'):
					src = 'http:'+src
				episodeId = re.findall('.*streamdor.co/video/(\d+)', src)[0]
				p = client.request(src, referer=url)
				
				try:
					#log(type='INFO', method='get_sources', err='Juicy Code', dolog=False, logToControl=False, doPrint=True)
					p = re.findall(r'JuicyCodes.Run\(([^\)]+)', p, re.IGNORECASE)[0]
					p = re.sub(r'\"\s*\+\s*\"','', p)
					p = re.sub(r'[^A-Za-z0-9+\\/=]','', p)
					p = base64.b64decode(p)
					p = jsunpack.unpack(p)
					p = unicode(p, 'utf-8')

					post = client.encodePostData({'id': episodeId})
					
					p2 = client.request('https://embed.streamdor.co/token.php?v=5', post=post, referer=src, XHR=True, timeout=60)
					
					js = json.loads(p2)
					tok = js['token']
					quali = 'SD'
					try:
						quali = re.findall(r'label:"(.*?)"',p)[0]
					except:
						pass
					
					p = re.findall(r'var\s+episode=({[^}]+});',p)[0]
					js = json.loads(p)
					
					try:
						rtype = js['eName']
						if '0p' in rtype.lower() or 'sd' in rtype.lower() or 'hd' in rtype.lower():
							raise
						riptype = rtype
					except:
						pass

					if 'fileEmbed' in js and js['fileEmbed'] != '':
						ss.append([js['fileEmbed'], quali, riptype])
					if 'filePlaylist' in js and js['filePlaylist'] != '':
						js_data = client.request('https://embed.streamdor.co/play/sources?hash=%s&token=%s'%(js['filePlaylist'],tok), referer=src, XHR=True)
						
						js = json.loads(js_data)
						m_srcs = js['playlist'][0]['sources']
						if 'error' not in m_srcs:
							for m_src in m_srcs:
								ss.append([m_src['file'], m_src['label'], riptype])
					if 'fileHLS' in js and js['fileHLS'] != '':
						ss.append(['https://hls.streamdor.co/%s%s'%(tok, js['fileHLS']), quali, riptype])
						
				except Exception as e:
					raise Exception('Step 2 Failed: %s > %s' % (url,e))
			else:
				#log(type='INFO', method='get_sources', err='Embed Code', dolog=False, logToControl=False, doPrint=True)
				div_s = client.parseDOM(result, 'div', attrs={'id': 'list-eps'})[0]
				pages = client.parseDOM(div_s, 'a', ret='href')
				#print pages
				quals = re.findall(r'>(.*?)</a>',div_s)
				#print quals
				c=0
				for p in pages:
					try:
						p1 = client.request(p, referer=url)
						file_id = re.findall(r'load_player\.html\?e=(.*?)\"',p1)[0]
						file_loc = 'https://api.streamdor.co/episode/embed/%s' % file_id
						js_data = client.request(file_loc, referer=p)
						js = json.loads(js_data)
						m_srcs = js['embed']
						try:
							rtype = quals[c]
							if '0p' in rtype.lower() or 'sd' in rtype.lower() or 'hd' in rtype.lower():
								raise
							riptype = 'CAM'
						except:
							pass
						ss.append([m_srcs, file_quality(quals[c]), riptype])
						c=c+1
					except:
						pass

			for link in ss:
				#print link
				try:
					if 'google' in url:
						xs = client.googletag(url)
						for x in xs:
							try:
								l = resolvers.createMeta(x['url'], self.name, self.logo, x['quality'], [], key, riptype, poster=poster, testing=testing)
								for ll in l:
									if ll != None and 'key' in ll.keys():
										links_m.append(ll)
								if testing == True and len(links_m) > 0:
									break
							except:
								pass
					else:
						try:
							l = resolvers.createMeta(link[0], self.name, self.logo, link[1], [], key, link[2], poster=poster, testing=testing)
							for ll in l:
								if ll != None and 'key' in ll.keys():
									links_m.append(ll)
							if testing == True and len(links_m) > 0:
								break
						except:
							pass
				except:
					pass
			
			for l in links_m:
				if l != None and 'key' in l.keys():
					sources.append(l)

			if len(sources) == 0:
				log('FAIL','get_sources','Could not find a matching title: %s' % cleantitle.title_from_key(key))
			else:
				log('SUCCESS', 'get_sources','%s sources : %s' % (cleantitle.title_from_key(key), len(sources)))
				
			log('INFO', 'get_sources', 'Completed')
			
			return sources
		except Exception as e:
			log('ERROR', 'get_sources', '%s' % e)
			log('INFO', 'get_sources', 'Completed')
			return sources
コード例 #29
0
	def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_options=None, key=None, testing=False):
		#try:
		try:
			sources = []
			if url == None: 
				log('FAIL','get_sources','Could not find a matching title: %s' % cleantitle.title_from_key(key), dolog=not testing)
				return sources
			
			base_link = self.base_link
			
			try:
				if url[0].startswith('http'):
					base_link = url[0]
				mid = re.findall('-(\d+)', url[0])[-1]
			except:
				if url.startswith('http'):
					base_link = url
				mid = re.findall('-(\d+)', url)[-1]

			try:
				if len(url[1]) > 0:
					episode = url[1]
				else:
					episode = None
			except:
				episode = None

			#print mid

			links_m = []
			trailers = []
			headers = {'Referer': self.base_link}
			
			if testing == False:
				try:		
					u = urlparse.urljoin(self.base_link, url[0])
					#print u
					r = client.request(u, headers=headers, IPv4=True)
					#regex = r"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+"
					#matches = re.finditer(regex, r, re.MULTILINE)
					matches = re.compile('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+').findall(r)
					for match in matches:
						try:
							#print match
							if 'youtube.com' in match:
								match = match.replace('embed/','watch?v=')
								trailers.append(match)
						except:
							pass
				except Exception as e:
					pass
					
				for trailer in trailers:
					links_m = resolvers.createMeta(trailer, self.name, self.logo, '720p', links_m, key, vidtype='Trailer', testing=testing)
			
			try:
				u = urlparse.urljoin(self.base_link, self.server_link % mid)
				#print u
				r = client.request(u, headers=headers, XHR=True, IPv4=True)
				r = json.loads(r)['html']
				r = client.parseDOM(r, 'div', attrs = {'class': 'pas-list'})
				ids = client.parseDOM(r, 'li', ret='data-id')
				servers = client.parseDOM(r, 'li', ret='data-server')
				labels = client.parseDOM(r, 'a', ret='title')
				r = zip(ids, servers, labels)
				
				for eid in r:
					#print r
					try:
						sub_url = None
						try:
							ep = re.findall('episode.*?(\d+):.*?',eid[2].lower())[0]
						except:
							ep = 0
						
						if (episode is None) or (int(ep) == int(episode)):
							
							url = urlparse.urljoin(self.base_link, self.token_link % (eid[0], mid))
							script = client.request(url, IPv4=True)
							#print script
							
							if '$_$' in script:
								params = self.uncensored1(script)
							elif script.startswith('[]') and script.endswith('()'):
								params = self.uncensored2(script)
							elif '_x=' in script and '_y=' in script:
								params = self.uncensored3(script)
							else:
								raise Exception()
							u = urlparse.urljoin(self.base_link, self.sourcelink % (eid[0], params['x'], params['y']))
							
							#print u
							
							r = client.request(u, IPv4=True)
							url = json.loads(r)['playlist'][0]['sources']
							#print url
							
							try:
								url = [i['file'] for i in url]
							except:
								url = [url['file']]
								
							#print url
							
							#url = [client.googletag(i) for i in url]
							#print url
							
							#url = [i[0] for i in url if i]
							#print url
							
							try:
								sub_url = json.loads(r)['playlist'][0]['tracks'][0]['file']
							except:
								pass
							
							for s in url:
								links_m = resolvers.createMeta(s, self.name, self.logo, '720p', links_m, key, vidtype='Movie', sub_url=sub_url, testing=testing)
					except:
						pass
			except:
				pass
				
			sources += [l for l in links_m]
			
			if len(sources) == 0:
				log('FAIL','get_sources','Could not find a matching title: %s' % cleantitle.title_from_key(key))
				return sources
			
			log('SUCCESS', 'get_sources','%s sources : %s' % (cleantitle.title_from_key(key), len(sources)), dolog=not testing)
			return sources
		except Exception as e:
			log('ERROR', 'get_sources', '%s' % e, dolog=not testing)
			return sources