Example #1
0
def openloadS(url, videoData=None, usePairing=True, session=None):
	try:
		ret_error = ''
		if videoData == None:
			videoData = client.request(url, headers=openloadhdr)
			
		try:
			ol_id = client.search_regex('<span[^>]+id="[^"]+"[^>]*>([0-9A-Za-z]+)</span>',videoData, 'openload ID')
		except:
			ol_id = None
		try:
			video_id = match_id(url)
		except:
			video_id = None
		log(type='INFO',method='openloadS', err=u'OpenLoad iD: %s' % video_id)
		video_url = None
		e = ''
		try:
			if USE_PHANTOMJS == True and ((session == None or control.setting('%s-%s' % (session, 'Use-PhantomJS')) == True) and control.setting('use_phantomjs') == control.phantomjs_choices[1]) or control.setting('use_phantomjs') == control.phantomjs_choices[2]:
				log(type='INFO',method='openloadS', err=u'trying phantomjs method: %s' % (video_id))
				try:
					v_url, bool = phantomjs.decode(url, user_agent=client.USER_AGENT)
					if bool == False:
						ret_error = v_url
						raise DecodeError(ret_error)
					else:
						video_url = v_url
						ret_error = ''
						log(type='SUCCESS',method='openloadS', err=u'*PhantomJS* method is working: %s' % video_id)
				except:
					raise DecodeError('phantomjs not working')
			else:
				raise DecodeError('phantomjs is disabled')
		except DecodeError as e:
			try:
				if USE_LOGIN_KEY == True and video_url == None:
					log(type='INFO',method='openloadS', err=u'%s; trying L/K API method: %s' % (e,video_id))
					v_url, cont, cu, dlk, ret_error = link_from_api(video_id)
					if v_url == None:
						raise DecodeError('%s' % ret_error)
					else:
						ret_error = ''
						video_url = v_url
						log(type='SUCCESS',method='openloadS', err=u'*L/K API* method is working: %s' % video_id)
				else:
					raise DecodeError('L/K method disabled via hard coded option')
			except DecodeError as e:
				if USE_DECODING1 == True and video_url == None:
					log(type='INFO',method='openloadS', err=u'%s; falling back to decode_id method: %s' % (e,video_id))
					try:
						v_url = 'https://openload.co/stream/%s?mime=true'
						decoded = decode_id(ol_id)
						video_url = v_url % decoded
					except DecodeError as e:
						pass
				if USE_DECODING2 == True and video_url == None:
					log(type='INFO',method='openloadS', err=u'%s; falling back to method with evaluating: %s' % (e,video_id))
					try:
						decoded = eval_id_decoding(videoData, ol_id)
						video_url = video_url % decoded
					except DecodeError as e:
						pass
				if USE_PAIRING == True:
					try:
						if usePairing == True and video_url == None:
							log(type='INFO',method='openloadS', err=u'%s; falling back to method with pairing: %s' % (e,video_id))
							title, video_url = pairing_method(video_id)
							if video_url == None:
								raise DecodeError('Pairing not working')
							ret_error = ''
							log(type='SUCCESS',method='openloadS', err=u'*Pairing* method is working: %s' % video_id)
						elif video_url == None:
							ret_error = 'pairing is the only option available'
							log(type='INFO',method='openloadS', err=u'%s; %s : %s' % (e,ret_error,video_id))
							video_url = None
						elif video_url != None:
							ret_error = ''
					except DecodeError as e:
						video_url = None
						ret_error = str(e)
						print ret_error

		return (video_url, videoData, ret_error)
	except Exception as e:
		ret_error = '%s ID:%s' % (e, video_id)
		log(type='ERROR',method='openloadS', err=u'%s: %s' % (e, video_id))
		
		return (None, videoData, ret_error)
	def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_options=None, key=None, testing=False):
		try:
			sources = []
			if control.setting('Provider-%s' % name) == False:
				log('INFO','get_sources','Provider Disabled by User')
				return sources
			if url == None: 
				log('FAIL','get_sources','url == None. Could not find a matching title: %s' % cleantitle.title_from_key(key), dolog=not testing)
				return sources
			
			myts = str(((int(time.time())/3600)*3600))
			log('INFO','get_sources-1', 'url: %s' % url, dolog=False)
			token_error = False
			urls = []
			sub_url = None
			page_url = None
			
			if not str(url).startswith('http'):
				try:
					data = urlparse.parse_qs(url)
					data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

					title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

					try:
						year = re.findall('(\d{4})', data['premiered'])[0] if 'tvshowtitle' in data else data['year']
					except:
						try:
							year = data['year']
						except:
							year = None
					try: episode = data['episode']
					except: pass

					query = {'keyword': title}
					search_url = urlparse.urljoin(self.base_link, '/search')
					search_url = search_url + '?' + urllib.urlencode(query)
					result = proxies.request(search_url, headers=self.headers, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, httpsskip=True)
					
					log('INFO','get_sources-2', '%s' % search_url, dolog=False)
					
					rs = client.parseDOM(result, 'div', attrs = {'class': '[^"]*movie-list[^"]*'})[0]
					r = client.parseDOM(rs, 'div', attrs = {'class': 'item'})
					r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', attrs = {'class': 'name'})) for i in r]
					r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and  len(i[1]) > 0]
					r = [(re.sub('http.+?//.+?/','/', i[0]), re.sub('&#\d*;','', i[1])) for i in r]
					
					if 'season' in data:
						r = [(i[0], re.sub(' \(\w*\)', '', i[1])) for i in r]
						
						possible_hits = []
						for i in r:
							if cleantitle.get(title).lower() == cleantitle.get(i[1]).lower():
								possible_hits.append((i[0], [[i[1], u'1']]))
							
						#title += '%01d' % int(data['season'])
						url = [(i[0], re.findall('(.+?) (\d+)$', i[1])) for i in r]

						for i in possible_hits:
							url.append(i)
							
						url = [(i[0], i[1][0][0], i[1][0][1]) for i in url if len(i[1]) > 0]
						
						url = [i for i in url if cleantitle.get(title) in cleantitle.get(i[1])]

						url = [i for i in url if '%01d' % int(data['season']) == '%01d' % int(i[2])]
						
						if len(url) == 0:
							url = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1])]
						if len(url) == 0:
							url = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1]+str(season))]
					else:
						url = [i for i in r if cleantitle.get(title) in cleantitle.get(i[1])]
					
					if len(url) == 0:
						log('FAIL','get_sources','Could not find a matching title: %s' % cleantitle.title_from_key(key))
						return sources
					
					for urli in url:
						url = urli[0]
						url = urlparse.urljoin(self.base_link, url)
						urls.append(url)
					
				except Exception as e:
					raise Exception(e)

			vidtype = 'Movie'
			for url in urls:
				try:
					try: url, episode = re.compile('(.+?)\?episode=(\d*)$').findall(url)[0]
					except: pass
					
					log('INFO','get_sources-3', url, dolog=False)

					referer = url
					page_url = url
					
					result = resultT = proxies.request(url, headers=self.headers, limit='0', proxy_options=proxy_options, use_web_proxy=self.proxyrequired, httpsskip=True)
					
					if 'data-type="series"' not in result:
						raise Exception('Not a TV-Series')
					
					vidtype = 'Show'
					alina = client.parseDOM(result, 'title')[0]

					atr = [i for i in client.parseDOM(result, 'title') if len(re.findall('(\d{4})', i)) > 0][-1]

					if 'season' in data:
						try: season = data['season']
						except: pass
						
						years = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '%s' % str(int(year) + int(season)), '%s' % str(int(year) - int(season))]
						mychk = False
						for y in years:
							if y in atr: 
								mychk = True
						result = result if mychk == True else None
						if mychk == True:
							break
					else:
						result = result if year in atr else None
						
					if result != None:
						break
				except Exception as e:
					log('FAIL','get_sources-3', '%s : %s' % (url,e), dolog=False)
					
			if result == None:
				log('FAIL','get_sources','Could not find a matching title: %s' % cleantitle.title_from_key(key))
				return sources

			try:
				myts = re.findall(r'data-ts="(.*?)"', result)[0]
			except:
				log('INFO','get_sources-3', 'could not parse ts ! will use generated one : %s' % myts, dolog=False)
				
			trailers = []
			links_m = []
			
			if testing == False:
				try:
					matches = re.compile('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+').findall(result)
					for match in matches:
						try:
							if 'youtube.com' in match:
								match = match.replace('embed/','watch?v=')
								trailers.append(match)
						except:
							pass
				except Exception as e:
					pass
					
				for trailer in trailers:
					links_m = resolvers.createMeta(trailer, self.name, self.logo, '720p', links_m, key, vidtype='Trailer', testing=testing)
			
			riptype = None
			try: quality = client.parseDOM(result, 'span', attrs = {'class': 'quality'})[0].lower()
			except: quality = 'hd'
			if quality == 'cam' or quality == 'ts': 
				quality = '480p'
				riptype = 'CAM'
			elif quality == 'hd' or 'hd ' in quality: 
				quality = '720p'
				riptype = 'BRRIP'
			else: 
				quality = '480p'
				riptype = 'BRRIP'

			result_servers = self.get_servers(url, proxy_options=proxy_options)
			result_servers = client.parseDOM(result_servers, 'ul', attrs = {'data-range-id':"0"})
			#print result_servers
			#result_servers = []
			#servers = client.parseDOM(result, 'li', attrs = {'data-type': 'direct'})
			result_servers = zip(client.parseDOM(result_servers, 'a', ret='data-id'), client.parseDOM(result_servers, 'a'))
			#print result_servers
			
			result_servers = [(i[0], re.findall('(\d+)', i[1])) for i in result_servers]
			
			servers = [(i[0], ''.join(i[1][:1])) for i in result_servers]
			
			try: servers = [i for i in servers if '%01d' % int(i[1]) == '%01d' % int(episode)]
			except: pass
			
			for s in servers[:len(servers)]:
				try:
					video_url = None
					#quality = '360p'
					if '1080' in s[1]: 
						quality = '1080p'
						#riptype = 'BRRIP'
					elif '720' in s[1] or 'hd' in s[1].lower(): 
						quality = '720p'
						#riptype = 'BRRIP'
					elif '480' in s[1]: 
						quality = '480p'
						#riptype = 'BRRIP'
					elif 'cam' in s[1].lower() or 'ts' in s[1].lower(): 
						quality = '480p'
						#riptype = 'CAM'
					else:
						quality = '480p'
						#riptype = 'CAM'
				
					if video_url == None:
						headers = {'X-Requested-With': 'XMLHttpRequest'}
						hash_url = urlparse.urljoin(self.base_link, self.hash_link)
						
						query = {'ts': myts, 'id': s[0], 'update': '0', 'server':'36'}
						
						query.update(self.__get_token(query))
						hash_url = hash_url + '?' + urllib.urlencode(query)
						headers['Referer'] = urlparse.urljoin(url, s[0])
						headers['Cookie'] = self.headers['Cookie']
						log('INFO','get_sources-4.b', '%s' % hash_url, dolog=False)
						result = proxies.request(hash_url, headers=headers, limit='0', proxy_options=proxy_options, use_web_proxy=self.proxyrequired, httpsskip=True)
						result = json.loads(result)
						
						if 'error' in result and result['error'] == True:
							token_error = True
							query.update(self.__get_token(query, token_error=token_error))
							hash_url = hash_url + '?' + urllib.urlencode(query)
							result = proxies.request(hash_url, headers=headers, limit='0', proxy_options=proxy_options, use_web_proxy=self.proxyrequired, httpsskip=True)
							result = json.loads(result)
							
							query = {'id': s[0], 'update': '0'}
							query.update(self.__get_token(query, token_error=token_error))
						else:
							token_error = False
							queryx = {'id': s[0], 'update': '0'}
							query.update(self.__get_token(queryx))
						
						url = url + '?' + urllib.urlencode(query)
						#result = client2.http_get(url, headers=headers)
					
						log('INFO','get_sources-5', result, dolog=False)
						
						if result['target'] != "":
							pass
						else:
							grabber = result['grabber']
							grab_data = grabber

							grabber_url = urlparse.urljoin(self.base_link, self.grabber_api)
							
							if '?' in grabber:
								grab_data = grab_data.split('?')
								grabber_url = grab_data[0]
								grab_data = grab_data[1]
								
							grab_server = str(urlparse.parse_qs(grab_data)['server'][0])
							
							b, resp = self.decode_t(result['params']['token'], -18)
							if b == False:
								raise Exception(resp)
							token = resp
							b, resp = self.decode_t(result['params']['options'], -18)
							if b == False:
								raise Exception(resp)
							options = resp
							
							grab_query = {'ts':myts, grabber_url:'','id':result['params']['id'],'server':grab_server,'mobile':'0','token':token,'options':options}
							tk = self.__get_token(grab_query, token_error)

							if tk == None:
								raise Exception('video token algo')
							grab_info = {'token':token,'options':options}
							del query['server']
							query.update(grab_info)
							query.update(tk)
							
							sub_url = result['subtitle']
							if sub_url==None or len(sub_url) == 0:
								sub_url = None
							
							if '?' in grabber:
								grabber += '&' + urllib.urlencode(query)
							else:
								grabber += '?' + urllib.urlencode(query)
						
							if grabber!=None and not grabber.startswith('http'):
								grabber = 'http:'+grabber
								
							log('INFO','get_sources-6', grabber, dolog=False)

							result = proxies.request(grabber, headers=headers, referer=url, limit='0', proxy_options=proxy_options, use_web_proxy=self.proxyrequired, httpsskip=True)

							result = json.loads(result)
						
						if 'data' in result.keys():
							result = [i['file'] for i in result['data'] if 'file' in i]
							
							for i in result:
								video_url = i
								links_m = resolvers.createMeta(i, self.name, self.logo, quality, links_m, key, riptype, vidtype=vidtype, sub_url=sub_url, testing=testing)
						else:
							target = result['target']
							b, resp = self.decode_t(target, -18)
							if b == False:
								raise Exception(resp)
							target = resp
							sub_url = result['subtitle']
							if sub_url==None or len(sub_url) == 0:
								sub_url = None
							
							if target!=None and not target.startswith('http'):
								target = 'http:' + target
								
							video_url = target
							links_m = resolvers.createMeta(target, self.name, self.logo, quality, links_m, key, riptype, vidtype=vidtype, sub_url=sub_url, testing=testing)

				except Exception as e:
					log('FAIL', 'get_sources-7','%s' % e, dolog=False)
					
				try:
					if video_url == None and USE_PHANTOMJS == True and control.setting('use_phantomjs') != control.phantomjs_choices[0]:
						vx_url = '%s/%s' % (page_url,s[0])
						log(type='INFO',method='get_sources-4.a.1', err=u'trying phantomjs method: %s' % vx_url)
						try:
							v_url, bool = phantomjs.decode(vx_url, js='fmovies.js')
							if bool == False:
								ret_error = v_url
								raise Exception(ret_error)
							else:
								video_url = v_url
								ret_error = ''
								log(type='SUCCESS',method='get_sources-4.a.2', err=u'*PhantomJS* method is working: %s' % vx_url)
								links_m = resolvers.createMeta(video_url, self.name, self.logo, quality, links_m, key, riptype, vidtype=vidtype, sub_url=sub_url, testing=testing)
						except:
							raise Exception('phantomjs not working')
					else:
						raise Exception('phantomjs is disabled')
				except Exception as e:
					log(type='FAIL',method='get_sources-4.a.3', err=u'%s' % e)

			sources += [l for l in links_m]
			
			if len(sources) == 0:
				log('FAIL','get_sources','Could not find a matching title: %s' % cleantitle.title_from_key(key))
				return sources
			
			log('SUCCESS', 'get_sources','%s sources : %s' % (cleantitle.title_from_key(key), len(sources)), dolog=not testing)
			return sources
		except Exception as e:
			log('ERROR', 'get_sources', '%s' % e, dolog=not testing)
			return sources