Example #1
0
	def search_tvshow(self, showname, season, episode, year=None, imdb_id=None, tvdb_id=None, return_sources=False):
		DB.connect()
		if self.cache_results:	
			self.hashid = hashlib.md5(showname+str(season)+str(episode)).hexdigest()
			DB.execute("DELETE FROM search_cache WHERE search_cache.cache_id in (select cache_id FROM stale_cache)")
			DB.commit()
		#self.result_stats = DB.query("SELECT service, host, attempts, success, ROUND (success / attempts * 100 ) AS percent FROM scraper_stats", force_double_array=True)
		self._get_active_resolvers()
		args = {"showname": showname, "season": season, "episode": episode, "year": year, "domains": self.domains, "imdb_id": imdb_id, "tmdb_id": tvdb_id}
		workers = ThreadPool(self.threadpool_size)
		if self.show_scraper_progress:
			self.PB = ProgressBar()
			self.PB.new('Searching for TV Sources', self.enabled_scrapers)
			self.PB.results = 0
		for index in range(0, self.enabled_scrapers):
			service = self.get_scraper_by_index(index).service
			if self.cache_results and  self.get_scraper_by_index(index).is_cachable:
				SQL = '''SELECT hashid, service, host, title, url, quality, debrid, alldebrid, realdebrid, rpnet, premiumize FROM fresh_cache WHERE hashid=? AND service=?'''
				cached = DB.query_assoc(SQL, [self.hashid, service], force_double_array=True)
			else:
				cached = False
			if cached:
				if self.show_scraper_progress and self.PB:
					self.PB.results += len(cached)
					self.PB.next('Found [COLOR yellow]%s[/COLOR] cached sources, %s total' % (len(cached), self.PB.results))
				self.search_results += cached
			else:
				if 'search_tvshow' in dir(self.get_scraper_by_index(index)):
					add_task = True
					if self.get_scraper_by_index(index).require_auth and (ADDON.get_setting(service + '_username') == '' or ADDON.get_setting(service + '_password') == ''): add_task = False
					if add_task: workers.queueTask(self.get_scraper_by_index(index).search_tvshow, args=args, taskCallback=self.process_results)
		workers.joinAll()
		resolved_url = None
		DB.disconnect()
		if return_sources:
			return self.process_sources(self)
		else:
			raw_url =  self.select_stream()
			if raw_url:
				resolved_url = self.resolve_url(raw_url)
			return resolved_url	
Example #2
0
	def search_movie(self, title, year, imdb_id=None, tmdb_id=None, return_sources=False):
		DB.connect()
		if self.cache_results:
			self.hashid = hashlib.md5(title+str(year)).hexdigest()
			DB.execute("DELETE FROM search_cache WHERE search_cache.cache_id in (select cache_id FROM stale_cache)")
			DB.commit()
		self.result_stats = DB.query("SELECT service, host, attempts, success, ROUND((success/attempts) * 100) AS score FROM scraper_stats", force_double_array=True)
		self._get_active_resolvers()
		args = {"title": title, "year": year, "domains": self.domains, "imdb_id": imdb_id, "tmdb_id": tmdb_id}
		workers = ThreadPool(self.threadpool_size)
		for index in range(0, self.enabled_scrapers):
			service = self.get_scraper_by_index(index).service
			if self.cache_results:
				SQL = '''SELECT hashid, service, host, title, url, quality FROM fresh_cache WHERE hashid=? AND service=?'''
				cached = DB.query_assoc(SQL, [self.hashid, service], force_double_array=True)
			else:
				cached = False	
			if cached:
				self.search_results += cached
			else:
				if 'search_movie' in dir(self.get_scraper_by_index(index)):
					add_task = True
					if self.get_scraper_by_index(index).require_auth and (ADDON.get_setting(service + '_username') == '' or ADDON.get_setting(service + '_password') == ''): add_task = False
					if add_task: workers.queueTask(self.get_scraper_by_index(index).search_movie, args=args, taskCallback=self.process_results)
		workers.joinAll()
		resolved_url = None
		DB.disconnect()
		if return_sources:
			return self.process_sources(self)
		else:
			raw_url =  self.select_stream()
			if raw_url:
				resolved_url = self.resolve_url(raw_url)
			return resolved_url
    def search_movie(self, title, year):
        self.hashid = hashlib.md5(title + str(year)).hexdigest()
        DB = MyDatabaseAPI(DB_FILE)
        DB.execute(
            "DELETE FROM search_cache WHERE hash=? AND strftime('%s','now') -  strftime('%s',ts) > (3600 * ?)",
            [self.hashid, DECAY],
        )
        DB.commit()
        self._get_active_resolvers()
        args = {"title": title, "year": year, "domains": self.domains}
        workers = ThreadPool(5)
        for index in range(0, self.enabled_scrapers):
            service = self.get_scraper_by_index(index).service
            SQL = """
				SELECT
				"%s" AS hashid,
				"%s" AS service,
				host,
				display as title,
				url,
				quality,""" % (
                self.hashid,
                service,
            )
            SQL += """strftime("%s",'now') -  strftime("%s",ts) < (3600 * ?) AS fresh
			FROM search_cache 
			WHERE
			hash=? AND service=?
			"""
            cached = DB.query_assoc(SQL, [DECAY, self.hashid, service])
            if cached:
                self.search_results += cached
            else:
                workers.queueTask(self.get_scraper_by_index(index).search_movie, args, self.process_results)
            workers.joinAll()
        resolved_url = None
        raw_url = self.select_stream()
        if raw_url:
            resolved_url = self.resolve_url(raw_url)
        return resolved_url
	def __init__(self, id, url, raw_url, filename, file_id, video_type='tvshow', save_dir=False):
		self.win = xbmcgui.Window(10000)
		self.threads = NUMBER_THREADS
		self.block_size = BLOCK_SIZE
		self.total_bytes = 0
		self.total_blocks = 0
		self.cached_bytes = 0
		self.cached_blocks = 0
		self.total_bytes = False
		self.id = id
		self.url = url
		self.filename = filename
		self.raw_url = raw_url
		self.file_id = file_id
		self.save_dir = save_dir
		self.video_type = video_type
		self.Pool = ThreadPool(NUMBER_THREADS)
		self.completed = []
		self.__aborting = False
		self.set_headers(url)
		set_thread_count(0)
Example #5
0
 def search_movies(self,
                   title,
                   year,
                   imdb_id=None,
                   tmdb_id=None,
                   return_sources=False):
     self._start_time = time.time()
     DB.connect()
     self.hashid = hashlib.md5(title + str(year)).hexdigest()
     last_hash_id = Plugin().get_property('last_hash_id')
     if self.hashid == last_hash_id:
         self.skip_autoplay = True
     else:
         self.skip_autoplay = False
         Plugin().set_property('last_hash_id', self.hashid)
     if self.cache_results:
         self.processor = Thread(target=self.process_queue)
         self.processor.start()
     self._get_active_resolvers()
     args = {
         "title": title,
         "year": year,
         "domains": self.domains,
         "imdb_id": imdb_id,
         "tmdb_id": tmdb_id
     }
     workers = ThreadPool(self.threadpool_size)
     if self.show_scraper_progress:
         self.PB = ProgressBar()
         self.PB.new('Searching for Movie Sources', self.enabled_scrapers)
         self.PB.results = 0
     self.threadpool_size = self.enabled_scrapers if self.threadpool_size == 0 else self.threadpool_size
     for index in range(0, self.enabled_scrapers):
         if self.show_scraper_progress and self.PB:
             if self.PB.is_canceled(): break
         service = self.get_scraper_by_index(index).service
         if self.cache_results:
             SQL = "SELECT result FROM fresh_cache WHERE hash=? AND service=?"
             results = DB.query(SQL, [self.hashid, service],
                                force_double_array=True)
             cached = [pickle.loads(r[0]) for r in results]
         else:
             cached = False
         if cached:
             number = len(cached)
             ADDON.log("Search returned %s cached links from %s" %
                       (number, service))
             if self.show_scraper_progress and self.PB:
                 self.PB.results += number
                 self.PB.next(
                     'Found [COLOR yellow]%s[/COLOR] cached sources (%s total)'
                     % (number, self.PB.results))
             self.search_results += cached
         else:
             if 'search_movie' in dir(self.get_scraper_by_index(index)):
                 if self.get_scraper_by_index(index).require_auth and (
                         ADDON.get_setting(service + '_username') == ''
                         or ADDON.get_setting(service + '_password') == ''):
                     continue
                 workers.queueTask(
                     self.get_scraper_by_index(index).search_movie,
                     args=args,
                     taskCallback=self.process_results)
             else:
                 print "bbbbbasdf"
                 print self.get_scraper_by_index(index).service
     workers.joinAll()
     resolved_url = None
     if self.cache_results:
         self.queue_SQL('EOL')
     DB.disconnect()
     if return_sources:
         return self.process_sources(self)
     else:
         raw_url, autoplay = self.select_stream()
         if raw_url:
             resolved_url = self.resolve_url(raw_url, autoplay)
         return resolved_url
class Transmogrifier():
	def __init__(self, id, url, raw_url, filename, file_id, video_type='tvshow', save_dir=False):
		self.win = xbmcgui.Window(10000)
		self.threads = NUMBER_THREADS
		self.block_size = BLOCK_SIZE
		self.total_bytes = 0
		self.total_blocks = 0
		self.cached_bytes = 0
		self.cached_blocks = 0
		self.total_bytes = False
		self.id = id
		self.url = url
		self.filename = filename
		self.raw_url = raw_url
		self.file_id = file_id
		self.save_dir = save_dir
		self.video_type = video_type
		self.Pool = ThreadPool(NUMBER_THREADS)
		self.completed = []
		self.__aborting = False
		self.set_headers(url)
		set_thread_count(0)
	
	def set_headers(self, url):
		self.__headers = {
			'Connection': 'keep-alive',
			'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.93 Safari/537.36',
			'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
			'Accept': 'image/webp,image/*,*/*;q=0.8',
			'Accept-Language': 'en-us,en;q=0.5',
			'Accept-Encoding': 'gzip, deflate, sdch',
		}
		temp = url.split("|")
		if len(temp) > 1:
			header_string = temp[1]
			test = re.search('User-Agent=(.+?)(&|$)+', header_string)
			if test:
				self.__headers['User-Agent'] = test.group(1)
			test = re.search('Referer=(.+?)(&|$)+', header_string)
			if test:
				self.__headers['Referer'] = test.group(1)
			test = re.search('Cookie=(.+?)(&|$)+', header_string)
			if test:
				self.__headers['Cookie'] = test.group(1)
				
	def check_abort(self):
		return get_property('abort_id') == self.file_id or self.__aborting
	
	def abort_all(self):
		if self.__aborting is False:
			self.Input.__abort = True
			clear_property('abort_id')
			clear_property("caching.file_id")
			clear_property(self.file_id +'.status')
			clear_property('streaming.abort')
			clear_property('streaming.tail_requested')
			ADDON.log("Aborting Transmogrification...", LOG_LEVEL.VERBOSE)
			ADDON.log("Cleaning Cache...", LOG_LEVEL.VERBOSE)
			ADDON.log("Waiting to Transmogrify...", LOG_LEVEL.VERBOSE)
			self.__aborting = True
		
	def transmogrify(self, block_number):
		if get_property("streaming.seek_block"):
			if block_number < int(get_property("streaming.seek_block")): 
				return [True, block_number]
		if self.check_abort(): 
			ADDON.log("Abort All", LOG_LEVEL.STANDARD)
			self.abort_all()
			return False
		block, cached = self.Input.get_block(block_number)
		if not block: 
			return [False, block_number]

		if self.video_type == 'stream':
			self.Input.save_block(block_number, block)
			self.cached_bytes += len(block)
			percent, delta, kbs = self.calculate_progress()
			set_property(self.file_id +'.status', json.dumps({'id': self.id, 'total_bytes': self.total_bytes, 'cached_bytes': self.cached_bytes, 'speed': kbs}))
			ADDON.log("Streaming Progress: %s/%s %s KBs" % (self.cached_bytes, self.total_bytes, kbs), LOG_LEVEL.STANDARD)
		else:
			offset_byte = block_number * self.block_size
			self.Output.queue_block(block, offset_byte, block_number)
			self.cached_bytes += len(block)
			percent, delta, kbs = self.calculate_progress()
			self.cached_blocks += 1
			set_property(self.file_id +'.status', json.dumps({'id': self.id, 'total_bytes': self.total_bytes, 'cached_bytes': self.cached_bytes, 'cached_blocks': self.cached_blocks, 'total_blocks': self.total_blocks, 'percent': percent, 'speed': kbs, 'active_threads': get_property("active_threads")}))
			ADDON.log("Caching Progress: %s%s %s/%s %s KBs" % (percent, '%', self.cached_bytes, self.total_bytes, kbs), LOG_LEVEL.STANDARD)
		return [True, block_number]
			
			
	def transmogrified(self, result):
		if self.check_abort(): 
			self.abort_all()
			return False
		status = result[0]
		block_number = result[1]
		if status is False and not get_property('streaming.abort'):
			if get_property("streaming.seek_block") and block_number < int(get_property("streaming.seek_block")): return
			ADDON.log("Requeue %s" % block_number, LOG_LEVEL.STANDARD)
			self.Pool.queueTask(self.transmogrify, block_number, block_number, self.transmogrified)

	def calculate_progress(self):
		try:
			now = time.time()
			delta = int(now - self.started)
			kbs = int(self.cached_bytes / (delta * 1024))
			percent = int(100 * self.cached_bytes / self.total_bytes)
			return percent, delta, kbs
		except:
			return False, False, False
		
	def start(self):
		valid = self.get_target_info()
		if valid:
			self.state_file = vfs.join(WORK_DIRECTORY, self.file_id + '.state')
			completed_blocks = []
			if vfs.exists(self.state_file):
				temp = ADDON.load_data(self.state_file)
				if int(temp['total_blocks']) == self.total_blocks:
					completed_blocks = temp['completed_blocks']
			self.Output = OutputHandler(self.video_type, self.filename, self.file_id, self.total_blocks, completed_blocks=completed_blocks, extension=self.extension, save_dir=self.save_dir)
			self.Input = InputHandler(self.url, self.raw_url, self.file_id, self.total_blocks, self.total_bytes, self.__headers, completed_blocks=completed_blocks)
			self.processor = Thread(target=self.Output.process_queue)
			self.processor.start()
			self.started = time.time()
			self.cached_bytes = 0
			for block_number in range(0, self.total_blocks+1):
				self.Pool.queueTask(self.transmogrify, block_number, block_number, self.transmogrified)

			
			self.processor.join()
			self.Pool.joinAll()
			percent, delta, kbs = self.calculate_progress()
			clear_property(self.file_id +'.status')
			message = 'Completed %s in %s second(s) at %s KB/s.' % (self.filename, delta, kbs)
			ADDON.log(message, LOG_LEVEL.VERBOSE)
			ADDON.raise_notify(ADDON_NAME, message)
		else:
			ADDON.log('Invalid url, sorry!', LOG_LEVEL.VERBOSE)
			ADDON.raise_notify(ADDON_NAME, "Unable to download source, try another")
		
	def stream(self, start_byte=0):
		first_block = self.get_block_number_from_byte(start_byte)
		self.Input = InputHandler(self.url, self.raw_url, self.file_id, self.total_blocks, self.total_bytes, self.__headers)
		self.Input.__streaming = True
		self.started = time.time()
		self.cached_bytes = 0
		for block_number in range(first_block, self.total_blocks+1):
			self.Pool.queueTask(self.transmogrify, block_number, block_number, self.transmogrified)
		return True

	def seek(self, start_byte):
		ADDON.log("Seek to byte %s " % start_byte, LOG_LEVEL.VERBOSE)
		first_block = self.get_block_number_from_byte(start_byte)
		ADDON.log("Seek to block %s " % first_block, LOG_LEVEL.VERBOSE)
		set_property("streaming.seek_block", str(first_block))

		set_property('streaming.abort', 'true')
		self.Pool.emptyQueue()
		time.sleep(.25)

		self.Input = InputHandler(self.url, self.raw_url, self.file_id, self.total_blocks, self.total_bytes, self.__headers)
		self.Input.__streaming = True
		self.started = time.time()
		for block_number in range(first_block, self.total_blocks+1):
			self.Pool.queueTask(self.transmogrify, block_number, block_number, self.transmogrified)
	
	def get_last_byte(self, last_byte):
		''''r = 'bytes=%s-' % last_byte
		set_property('streaming.abort', 'true')
		while True:
			if self.check_abort(): return False
			try:
				headers = self.__headers
				headers["Range"] = r
				req = urllib2.Request(self.url, headers=headers)
				f = urllib2.urlopen(req, timeout=2)
				last_byte = f.read()
				f.close()
				if last_byte:
					clear_property('streaming.abort')
					return last_byte
			except:
				pass
			time.sleep(.1)
		'''
		pass
		
	def read_block(self, start_byte=0):
		end_byte = (start_byte + self.block_size) - 1
		if end_byte > self.total_bytes: end_byte = self.total_bytes
		block_number = self.get_block_number_from_byte(start_byte)
		block = self.Input.read_block(block_number)
		if block:
			return block, end_byte, block_number
		else:
			return False, start_byte, block_number

	def get_block_number_from_byte(self, start_byte):
		block_number = int(math.floor(float(start_byte) / self.block_size))
		return block_number

	def request_tail_bytes(self):
		tail_bytes = ''
		self.tail_file = vfs.join(WORK_DIRECTORY, self.file_id + '.tail')
		if not get_property('streaming.tail_requested'):
			ADDON.log("Requesting Remote Tail Bytes")
			out_f = open(self.tail_file, 'wb')
			tail_byte = self.total_bytes - 65536
			r = 'bytes=%s-' % tail_byte
			headers = self.__headers
			headers["Range"] = r
			req = urllib2.Request(self.url, headers=headers)
			in_f = urllib2.urlopen(req, timeout=2)
			tail_bytes = in_f.read()
			out_f.write( tail_bytes )
			in_f.close()
			out_f.close()
			set_property('streaming.tail_requested', 'true')
		else:
			ADDON.log("Reading Cached Tail Bytes")
			in_f = open(self.tail_file, 'rb')
			tail_bytes = in_f.read()
			in_f.close()
		return tail_bytes
	
	def get_tail_bytes(self):
		tail_bytes = self.request_tail_bytes()
		
	def get_target_info(self):
		table = {
			"application/x-troff-msvideo":		"avi",
			"video/avi":						"avi",
			"video/msvideo":					"avi",
			"video/x-msvideo":					"avi",
			"video/quicktime":					"mov",
			"video/mp4":						"mp4",
			"video/x-matroska":					"mkv",
			"video/flv":						"flv",
			"video/x-flv":						"flv"
		}
		try:
			req = urllib2.Request(self.url, headers=self.__headers)
			self.net = urllib2.urlopen(req, timeout=3)
			self.headers = self.net.headers.items()
			try:
				self.total_bytes = int(self.net.headers["Content-Length"])
				self.total_blocks = int(math.ceil(self.total_bytes / self.block_size))
			except: return False
			ADDON.log("Total blocks: %s" % self.total_blocks, LOG_LEVEL.VERBOSE)
			self.extension = False
			try:
				self.extension = re.search('filename="(.+?)\.(mkv|avi|mov|mp4|flv)"$', self.net.headers["Content-Disposition"], re.IGNORECASE).group(2).lower()
			except:pass
			if not self.extension:
				try:
					ADDON.log("Found content-type: %s" % self.net.headers["Content-Type"], LOG_LEVEL.VERBOSE)
					self.extension = table[self.net.headers["Content-Type"]]
				except:
					ADDON.log("No content-type found, assuming avi", LOG_LEVEL.VERBOSE)
					self.extension = 'avi'
			set_property(self.file_id +'.status', json.dumps({'id': self.id, 'total_bytes': self.total_bytes, 'cached_bytes': self.cached_bytes, 'cached_blocks': 0, 'total_blocks': self.total_blocks, 'percent': 0, 'speed': 0}))
		
			if self.video_type == 'stream':
				self.request_tail_bytes()
		
		except urllib2.URLError, e:
			ADDON.log("HTTP Error: %s" % e, LOG_LEVEL.VERBOSE)
			ADDON.raise_notify("%s ERROR" % ADDON_NAME, "Unable to open URL","HTTP Error: %s" % e)
			return False
		if self.total_bytes is False :
			return False
		return True