def __open_part_file__(self, id): #file = open(os.path.join(self.tmp_path, self._filename + ".part%s" % id), "a+") #file = open(os.path.join(self.tmp_path, self._filename + ".part%s" % id), "r+b") self.file = filetools.file_open(filetools.join(self.tmp_path, self._filename + ".part%s" % id), "a+", vfs=VFS) self.file.close() self.file = filetools.file_open(filetools.join(self.tmp_path, self._filename + ".part%s" % id), "r+b", vfs=VFS) file.seek(self._download_info["parts"][id]["current"] - self._download_info["parts"][id]["start"], 0) return file
def download(item=None): ret = True if filetools.exists(elementum_path): if platformtools.dialog_yesno(config.get_localized_string(70784), config.get_localized_string(70783)): addon_file = filetools.file_open( filetools.join(elementum_path, 'addon.xml')).read() required = support.match(addon_file, patron=r'addon="([^"]+)').matches for r in required: xbmc.executebuiltin('InstallAddon(' + r + ')', wait=True) setting() platformtools.dialog_ok('Elementum', config.get_localized_string(70783)) else: ret = False else: if platformtools.dialog_yesno(config.get_localized_string(70784), config.get_localized_string(70782)): pform = get_platform() url = support.match( elementum_url, patron=r'<a href="([a-zA-Z0-9/\.-]+{}.zip)'.format( pform)).match support.info('OS:', pform) support.info('Extract IN:', elementum_path) support.info('URL:', url) if url: dl = downloadtools.downloadfile(host + url, filename) if dl == -3: filetools.remove(filename) dl = downloadtools.downloadfile(host + url, filename) if dl == None: extract() xbmc.sleep(1000) addon_file = filetools.file_open( filetools.join(elementum_path, 'addon.xml')).read() required = support.match(addon_file, patron=r'addon="([^"]+)').matches for r in required: xbmc.executebuiltin('InstallAddon(' + r + ')', wait=True) setting() else: ret = False else: ret = False else: ret = False return ret
def __init__(self, *args, **kwargs): logger.info() self.action_exitkeys_id = [ xbmcgui.ACTION_STOP, xbmcgui.ACTION_BACKSPACE, xbmcgui.ACTION_PREVIOUS_MENU, xbmcgui.ACTION_NAV_BACK ] self.progress_control = None # set info f = filetools.file_open(INFO, 'r') full_info = f.read().split('\n') full_info = full_info[1:] f.close() full_info = "".join(full_info) info = jsontools.load(full_info) info = info["infoLabels"] self.setProperty("title", info["tvshowtitle"]) self.setProperty( "ep_title", "%dx%02d - %s" % (info["season"], info["episode"], info["title"])) if "episodio_imagen" in info: img = info["episodio_imagen"] else: img = filetools.join(config.get_runtime_path(), "resources", "noimage.png") self.setProperty("next_img", img)
def extract(self, file, dir, folder_to_extract="", overwrite_question=False, backup=False): logger.info("file= %s" % file) logger.info("dir= %s" % dir) if not dir.endswith(':') and not filetools.exists(dir): filetools.mkdir(dir) zf = zipfile.ZipFile(filetools.file_open(file, vfs=False)) if not folder_to_extract: self._createstructure(file, dir) num_files = len(zf.namelist()) for nameo in zf.namelist(): name = nameo.replace(':', '_').replace('<', '_').replace('>', '_').replace('|', '_').replace('"', '_').replace('?', '_').replace('*', '_') logger.info("name=%s" % nameo) if not name.endswith('/'): logger.info("it's not a directory") try: (path, filename) = filetools.split(filetools.join(dir, name)) logger.info("path=%s" % path) logger.info("name=%s" % name) if folder_to_extract: if path != filetools.join(dir, folder_to_extract): break else: filetools.mkdir(path) except: pass if folder_to_extract: outfilename = filetools.join(dir, filename) else: outfilename = filetools.join(dir, name) logger.info("outfilename=%s" % outfilename) try: if filetools.exists(outfilename) and overwrite_question: from platformcode import platformtools dyesno = platformtools.dialog_yesno("File already exists "," File %s to unzip already exists, do you want to overwrite it?" % filetools.basename(outfilename)) if not dyesno: break if backup: import time hora_folder = "Backup [%s]" % time.strftime("%d-%m_%H-%M", time.localtime()) backup = filetools.join(config.get_data_path(), 'backups', hora_folder, folder_to_extract) if not filetools.exists(backup): filetools.mkdir(backup) filetools.copy(outfilename, filetools.join(backup, filetools.basename(outfilename))) if not filetools.write(outfilename, zf.read(nameo), silent=True, vfs=VFS): #TRUNCA en FINAL en Kodi 19 con VFS logger.error("File error " + nameo) except: import traceback logger.error(traceback.format_exc()) logger.error("File error " + nameo) try: zf.close() except: logger.info("Error closing .zip " + file)
def unzip(dir, file): if file.lower().startswith('smb://'): temp = filetools.join(temp_path, os.path.split(file)[-1]) filetools.copy(file, temp) file = temp with ZipFile(filetools.file_open(file, 'rb', vfs=False), 'r') as zf: zf.extractall(dir)
def _listdirs(self, file): zf = zipfile.ZipFile(filetools.file_open(file, vfs=False)) dirs = [] for name in zf.namelist(): if name.endswith('/'): dirs.append(name) dirs.sort() return dirs
def __init__(self, url, path, filename=None, headers=[], resume = True, max_connections = 10, part_size = 2097152): #Parametros self._resume = resume self._path = path self._filename = filename self._max_connections = max_connections self._part_size = part_size self.states = type('states', (), {"stopped":0, "connecting": 1, "downloading": 2, "completed": 3, "error": 4}) self._block_size = 1024*100 self._state = self.states.stopped self._write_lock = Lock() self._download_lock = Lock() self._headers = {"User-Agent":"Kodi/15.2 (Windows NT 10.0; WOW64) App_Bitness/32 Version/15.2-Git:20151019-02e7013"} self._speed = 0 self._threads = [Thread(target= self.__start_part__) for x in range(self._max_connections)] self._speed_thread = Thread(target= self.__speed_metter__) #Actualizamos los headers self._headers.update(dict(headers)) #Separamos los headers de la url self.__url_to_headers__(url) #Obtenemos la info del servidor self.__get_download_headers__() self._file_size = int(self.response_headers.get("content-length", "0")) if not self.response_headers.get("accept-ranges") == "bytes" or self._file_size == 0: self._max_connections = 1 self._part_size = 0 self._resume = False #Obtenemos el nombre del archivo self.__get_download_filename__() #Abrimos en modo "a+" para que cree el archivo si no existe, luego en modo "r+b" para poder hacer seek() self.file = filetools.file_open(filetools.join(self._path, self._filename), "a+") self.file = filetools.file_open(filetools.join(self._path, self._filename), "r+b") self.__get_download_info__()
def readbookmark(filepath): logger.debug() try: import urllib.parse as urllib except ImportError: import urllib bookmarkfile = filetools.file_open(filepath) lines = bookmarkfile.readlines() try: titulo = urllib.unquote_plus(lines[0].strip()) except: titulo = lines[0].strip() try: url = urllib.unquote_plus(lines[1].strip()) except: url = lines[1].strip() try: thumbnail = urllib.unquote_plus(lines[2].strip()) except: thumbnail = lines[2].strip() try: server = urllib.unquote_plus(lines[3].strip()) except: server = lines[3].strip() try: plot = urllib.unquote_plus(lines[4].strip()) except: plot = lines[4].strip() # ContentTitle and channel fields added if len(lines) >= 6: try: contentTitle = urllib.unquote_plus(lines[5].strip()) except: contentTitle = lines[5].strip() else: contentTitle = titulo if len(lines) >= 7: try: canal = urllib.unquote_plus(lines[6].strip()) except: canal = lines[6].strip() else: canal = "" bookmarkfile.close() return canal, titulo, thumbnail, plot, server, url, contentTitle
def zip(self, dir, file): import os zf = zipfile.ZipFile(filetools.file_open(file, "w", vfs=False), "w", zipfile.ZIP_DEFLATED) abs_src = os.path.abspath(dir) for dirname, subdirs, files in os.walk(dir): for filename in files: absname = os.path.abspath(os.path.join(dirname, filename)) arcname = absname[len(abs_src) + 1:] zf.write(absname, arcname) zf.close()
def readbookmark(filepath): logger.info() import urllib bookmarkfile = filetools.file_open(filepath) lines = bookmarkfile.readlines() try: titulo = urllib.unquote_plus(lines[0].strip()) except: titulo = lines[0].strip() try: url = urllib.unquote_plus(lines[1].strip()) except: url = lines[1].strip() try: thumbnail = urllib.unquote_plus(lines[2].strip()) except: thumbnail = lines[2].strip() try: server = urllib.unquote_plus(lines[3].strip()) except: server = lines[3].strip() try: plot = urllib.unquote_plus(lines[4].strip()) except: plot = lines[4].strip() # Campos contentTitle y canal añadidos if len(lines) >= 6: try: contentTitle = urllib.unquote_plus(lines[5].strip()) except: contentTitle = lines[5].strip() else: contentTitle = titulo if len(lines) >= 7: try: canal = urllib.unquote_plus(lines[6].strip()) except: canal = lines[6].strip() else: canal = "" bookmarkfile.close() return canal, titulo, thumbnail, plot, server, url, contentTitle
def download(item=None): if filetools.exists(elementum_path): if platformtools.dialog_yesno(config.get_localized_string(70784), config.get_localized_string(70783)): addon_file = filetools.file_open( filetools.join(elementum_path, 'addon.xml')).read() required = support.match(addon_file, patron=r'addon="([^"]+)').matches for r in required: xbmc.executebuiltin('InstallAddon(' + r + ')', wait=True) setting() platformtools.dialog_ok('Elementum', config.get_localized_string(70783)) else: if platformtools.dialog_yesno(config.get_localized_string(70784), config.get_localized_string(70782)): pform = get_platform() url = support.match( elementum_url, patronBlock= r'<div class="release-entry">(.*?)<!-- /.release-body -->', patron=r'<a href="([a-zA-Z0-9/\.-]+%s.zip)' % pform).match support.info('OS:', pform) support.info('Extract IN:', elementum_path) support.info('URL:', url) if url: downloadtools.downloadfile(host + url, filename) extract() xbmc.sleep(1000) addon_file = filetools.file_open( filetools.join(elementum_path, 'addon.xml')).read() required = support.match(addon_file, patron=r'addon="([^"]+)').matches for r in required: xbmc.executebuiltin('InstallAddon(' + r + ')', wait=True) setting()
def fixZipGetHash(zipFile): hash = '' with filetools.file_open(zipFile, 'r+b', vfs=False) as f: data = f.read() pos = data.find(b'\x50\x4b\x05\x06') # End of central directory signature if pos > 0: f.seek(pos + 20) # +20: see secion V.I in 'ZIP format' link above. hash = f.read()[2:] f.seek(pos + 20) f.truncate() f.write( b'\x00\x00') # Zip file comment length: 0 byte length; tell zip applications to stop reading. return hash.decode('utf-8')
def getEpg(): now = datetime.now() fileName = support.config.get_temp_file('guidatv-') + now.strftime( '%Y %m %d') archiveName = fileName + '.gz' xmlName = fileName + '.xml' if not filetools.exists(xmlName): support.info('downloading epg') # cancello quelli vecchi for f in glob.glob(support.config.get_temp_file('guidatv-') + '*'): filetools.remove(f, silent=True) # inmemory = io.BytesIO(httptools.downloadpage(host).data) downloadtools.downloadfile(host, archiveName) support.info('opening gzip and writing xml') with gzip.GzipFile(fileobj=filetools.file_open( archiveName, mode='rb', vfs=False)) as f: guide = f.read().decode('utf-8') guide = guide.replace('\n', ' ').replace('><', '>\n<') with open(xmlName, 'w') as f: f.write(guide) # else: guide = filetools.file_open(xmlName, vfs=False) return guide
def zip(dir, file): smb = False if file.lower().startswith('smb://'): temp = file file = filetools.join(temp_path, os.path.split(file)[-1]) smb = True with ZipFile(filetools.file_open(file, 'wb', vfs=False), "w") as zf: abs_src = os.path.abspath(dir) for dirname, subdirs, files in os.walk(dir): for filename in files: absname = os.path.abspath(os.path.join(dirname, filename)) arcname = absname[len(abs_src) + 1:] zf.write(absname, arcname) zf.close() if smb: filetools.move(file, temp)
def extract(): import zipfile from platformcode.updater import fixZipGetHash support.info('Estraggo Elementum in:', elementum_path) try: # hash = fixZipGetHash(filename) # support.info(hash) with zipfile.ZipFile(filetools.file_open(filename, 'rb', vfs=False)) as zip_ref: zip_ref.extractall(xbmc.translatePath(addon_path)) except Exception as e: support.info('Non sono riuscito ad estrarre il file zip') support.logger.error(e) import traceback support.logger.error(traceback.print_exc())
def updateFromZip(message=config.get_localized_string(80050)): dp = platformtools.dialog_progress_bg(config.get_localized_string(20000), message) dp.update(0) remotefilename = 'https://github.com/' + user + "/" + repo + "/archive/" + branch + ".zip" localfilename = filetools.join( xbmc.translatePath("special://home/addons/"), "plugin.video.kod.update.zip") destpathname = xbmc.translatePath("special://home/addons/") extractedDir = filetools.join(destpathname, "addon-" + branch) logger.info("remotefilename=%s" % remotefilename) logger.info("localfilename=%s" % localfilename) logger.info('extract dir: ' + extractedDir) # pulizia preliminare remove(localfilename) removeTree(extractedDir) try: urllib.urlretrieve(remotefilename, localfilename, lambda nb, bs, fs, url=remotefilename: _pbhook( nb, bs, fs, url, dp)) except Exception as e: platformtools.dialog_ok(config.get_localized_string(20000), config.get_localized_string(80031)) logger.info('Non sono riuscito a scaricare il file zip') logger.info(e) dp.close() return False # Lo descomprime logger.info("decompressione...") logger.info("destpathname=%s" % destpathname) if os.path.isfile(localfilename): logger.info('il file esiste') dp.update( 80, config.get_localized_string(20000) + '\n' + config.get_localized_string(80032)) import zipfile try: hash = fixZipGetHash(localfilename) logger.info(hash) with zipfile.ZipFile( filetools.file_open(localfilename, 'rb', vfs=False)) as zip: size = sum([zinfo.file_size for zinfo in zip.filelist]) cur_size = 0 for member in zip.infolist(): zip.extract(member, destpathname) cur_size += member.file_size dp.update(int(80 + cur_size * 15 / size)) except Exception as e: logger.info('Non sono riuscito ad estrarre il file zip') logger.error(e) import traceback logger.error(traceback.print_exc()) dp.close() remove(localfilename) return False dp.update(95) # puliamo tutto global addonDir if extractedDir != addonDir: removeTree(addonDir) xbmc.sleep(1000) rename(extractedDir, 'plugin.video.kod') addonDir = filetools.join(destpathname, 'plugin.video.kod') logger.info("Cancellando il file zip...") remove(localfilename) dp.update(100) xbmc.sleep(1000) dp.close() if message != config.get_localized_string(80050): xbmc.executebuiltin("UpdateLocalAddons") refreshLang() return hash
def downloadfileGzipped(url, pathfichero): logger.info("url= " + url) nombrefichero = pathfichero logger.info("filename= " + nombrefichero) import xbmc nombrefichero = xbmc.makeLegalFilename(nombrefichero) logger.info("filename= " + nombrefichero) patron = "(http://[^/]+)/.+" matches = re.compile(patron, re.DOTALL).findall(url) if len(matches): logger.info("Main URL: " + matches[0]) url1 = matches[0] else: url1 = url txheaders = { 'User-Agent': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; SLCC1; .NET CLR 2.0.50727; ' 'Media Center PC 5.0; .NET CLR 3.0.04506)', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language': 'es-es,es;q=0.8,en-us;q=0.5,en;q=0.3', 'Accept-Encoding': 'gzip,deflate', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Keep-Alive': '115', 'Connection': 'keep-alive', 'Referer': url1, } txdata = "" # Create the progress dialog from platformcode import platformtools progreso = platformtools.dialog_progress("addon", config.get_localized_string(60200) + '\n' + url.split("|")[0] + '\n' + nombrefichero) # Socket timeout at 60 seconds socket.setdefaulttimeout(10) h = urllib.request.HTTPHandler(debuglevel=0) request = urllib.request.Request(url, txdata, txheaders) # if existSize > 0: # request.add_header('Range', 'bytes=%d-' % (existSize, )) opener = urllib.request.build_opener(h) urllib.request.install_opener(opener) try: connexion = opener.open(request) except urllib.error.HTTPError as e: logger.error("error %d (%s) when opening the url %s" % (e.code, e.msg, url)) progreso.close() # Error 416 is that the requested range is greater than the file => is that it is already complete if e.code == 416: return 0 else: return -2 nombre_fichero_base = filetools.basename(nombrefichero) if len(nombre_fichero_base) == 0: logger.info("Searching for name in the answer Headers") nombre_base = connexion.headers["Content-Disposition"] logger.info(nombre_base) patron = 'filename="([^"]+)"' matches = re.compile(patron, re.DOTALL).findall(nombre_base) if len(matches) > 0: titulo = matches[0] titulo = GetTitleFromFile(titulo) nombrefichero = filetools.join(pathfichero, titulo) else: logger.info("Name of the file not found, Placing temporary name: no_name.txt") titulo = "no_name.txt" nombrefichero = filetools.join(pathfichero, titulo) totalfichero = int(connexion.headers["Content-Length"]) # then f = filetools.file_open(nombrefichero, 'w', vfs=VFS) logger.info("new file open") grabado = 0 logger.info("Content-Length= %s" % totalfichero) blocksize = 100 * 1024 bloqueleido = connexion.read(blocksize) try: import io compressedstream = io.StringIO(bloqueleido) import gzip gzipper = gzip.GzipFile(fileobj=compressedstream) bloquedata = gzipper.read() gzipper.close() logger.info("Starting downloading the file, blocked= %s" % len(bloqueleido)) except: logger.error("ERROR: The file to be downloaded is not compressed with Gzip") f.close() progreso.close() return -2 maxreintentos = 10 while len(bloqueleido) > 0: try: # Write the block read f.write(bloquedata) grabado += len(bloqueleido) percent = int(float(grabado) * 100 / float(totalfichero)) totalmb = float(float(totalfichero) / (1024 * 1024)) descargadosmb = float(float(grabado) / (1024 * 1024)) # Read the next block, retrying not to stop everything at the first timeout reintentos = 0 while reintentos <= maxreintentos: try: before = time.time() bloqueleido = connexion.read(blocksize) import gzip import io compressedstream = io.StringIO(bloqueleido) gzipper = gzip.GzipFile(fileobj=compressedstream) bloquedata = gzipper.read() gzipper.close() after = time.time() if (after - before) > 0: velocidad = old_div(len(bloqueleido), (after - before)) falta = totalfichero - grabado if velocidad > 0: tiempofalta = old_div(falta, velocidad) else: tiempofalta = 0 logger.info(sec_to_hms(tiempofalta)) progreso.update(percent, "%.2fMB/%.2fMB (%d%%) %.2f Kb/s %s left " % (descargadosmb, totalmb, percent, old_div(velocidad, 1024), sec_to_hms(tiempofalta))) break except: reintentos += 1 logger.info("ERROR in block download, retry %d" % reintentos) for line in sys.exc_info(): logger.error("%s" % line) # The user cancels the download if progreso.iscanceled(): logger.info("Download of file canceled") f.close() progreso.close() return -1 # There was an error in the download if reintentos > maxreintentos: logger.info("ERROR in the file download") f.close() progreso.close() return -2 except: logger.info("ERROR in the file download") for line in sys.exc_info(): logger.error("%s" % line) f.close() progreso.close() return -2 f.close() # print data progreso.close() logger.info("End download of the file") return nombrefichero
def extract(self, file, dir, folder_to_extract="", overwrite_question=False, backup=False): logger.info("file=%s" % file) logger.info("dir=%s" % dir) if not dir.endswith(':') and not filetools.exists(dir): filetools.mkdir(dir) zf = zipfile.ZipFile(file) if not folder_to_extract: self._createstructure(file, dir) num_files = len(zf.namelist()) for nameo in zf.namelist(): name = nameo.replace(':', '_').replace('<', '_').replace( '>', '_').replace('|', '_').replace('"', '_').replace('?', '_').replace('*', '_') logger.info("name=%s" % nameo) if not name.endswith('/'): logger.info("no es un directorio") try: (path, filename) = filetools.split(filetools.join(dir, name)) logger.info("path=%s" % path) logger.info("name=%s" % name) if folder_to_extract: if path != filetools.join(dir, folder_to_extract): break else: filetools.mkdir(path) except: pass if folder_to_extract: outfilename = filetools.join(dir, filename) else: outfilename = filetools.join(dir, name) logger.info("outfilename=%s" % outfilename) try: if filetools.exists(outfilename) and overwrite_question: from platformcode import platformtools dyesno = platformtools.dialog_yesno("El archivo ya existe", "El archivo %s a descomprimir ya existe" \ ", ¿desea sobrescribirlo?" \ % filetools.basename(outfilename)) if not dyesno: break if backup: import time hora_folder = "Copia seguridad [%s]" % time.strftime( "%d-%m_%H-%M", time.localtime()) backup = filetools.join(config.get_data_path(), 'backups', hora_folder, folder_to_extract) if not filetools.exists(backup): filetools.mkdir(backup) filetools.copy( outfilename, filetools.join( backup, filetools.basename(outfilename))) outfile = filetools.file_open(outfilename, 'wb') outfile.write(zf.read(nameo)) except: import traceback logger.error(traceback.format_exc()) logger.error("Error en fichero " + nameo) try: zf.close() except: logger.info("Error cerrando .zip " + file)
def downloadfile(url, nombrefichero, headers=None, silent=False, continuar=False, resumir=True, header=''): logger.info("url= " + url) logger.info("filename= " + nombrefichero) if headers is None: headers = [] if not header: header = "plugin" progreso = None if config.is_xbmc() and nombrefichero.startswith("special://"): import xbmc nombrefichero = xbmc.translatePath(nombrefichero) try: # If it is not XBMC, always "Silent" from platformcode import platformtools # before # f=open(nombrefichero,"wb") try: import xbmc nombrefichero = xbmc.makeLegalFilename(nombrefichero) except: pass logger.info("filename= " + nombrefichero) # The file exists and you want to continue if filetools.exists(nombrefichero) and continuar: f = filetools.file_open(nombrefichero, 'r+b', vfs=VFS) if resumir: exist_size = filetools.getsize(nombrefichero) logger.info("the file exists, size= %d" % exist_size) grabado = exist_size f.seek(exist_size) else: exist_size = 0 grabado = 0 # the file already exists and you don't want to continue, it aborts elif filetools.exists(nombrefichero) and not continuar: logger.info("the file exists, it does not download again") return -3 # the file does not exist else: exist_size = 0 logger.info("the file does not exist") f = filetools.file_open(nombrefichero, 'wb', vfs=VFS) grabado = 0 # Create the progress dialog if not silent: progreso = platformtools.dialog_progress(header, "Downloading..." + '\n' + url + '\n' + nombrefichero) # If the platform does not return a valid dialog box, it assumes silent mode if progreso is None: silent = True if "|" in url: additional_headers = url.split("|")[1] if "&" in additional_headers: additional_headers = additional_headers.split("&") else: additional_headers = [additional_headers] for additional_header in additional_headers: logger.info("additional_header: " + additional_header) name = re.findall("(.*?)=.*?", additional_header)[0] value = urllib.parse.unquote_plus(re.findall(".*?=(.*?)$", additional_header)[0]) headers.append([name, value]) url = url.split("|")[0] logger.info("url=" + url) # Socket timeout at 60 seconds socket.setdefaulttimeout(60) h = urllib.request.HTTPHandler(debuglevel=0) request = urllib.request.Request(url) for header in headers: logger.info("Header= " + header[0] + ": " + header[1]) request.add_header(header[0], header[1]) if exist_size > 0: request.add_header('Range', 'bytes= %d-' % (exist_size,)) opener = urllib.request.build_opener(h) urllib.request.install_opener(opener) try: connexion = opener.open(request) except urllib.error.HTTPError as e: logger.error("error %d (%s) opening url %s" % (e.code, e.msg, url)) f.close() if not silent: progreso.close() # Error 416 is that the requested range is greater than the file => is that it is already complete if e.code == 416: return 0 else: return -2 try: totalfichero = int(connexion.headers["Content-Length"]) except ValueError: totalfichero = 1 if exist_size > 0: totalfichero = totalfichero + exist_size logger.info("Content-Length= %s" % totalfichero) blocksize = 100 * 1024 bloqueleido = connexion.read(blocksize) logger.info("Starting downloading the file, blocked= %s" % len(bloqueleido)) maxreintentos = 10 while len(bloqueleido) > 0: try: # Write the block read f.write(bloqueleido) grabado += len(bloqueleido) percent = int(float(grabado) * 100 / float(totalfichero)) totalmb = float(float(totalfichero) / (1024 * 1024)) descargadosmb = float(float(grabado) / (1024 * 1024)) # Read the next block, retrying not to stop everything at the first timeout reintentos = 0 while reintentos <= maxreintentos: try: before = time.time() bloqueleido = connexion.read(blocksize) after = time.time() if (after - before) > 0: velocidad = old_div(len(bloqueleido), (after - before)) falta = totalfichero - grabado if velocidad > 0: tiempofalta = old_div(falta, velocidad) else: tiempofalta = 0 # logger.info(sec_to_hms(tiempofalta)) if not silent: progreso.update(percent, "%.2fMB/%.2fMB (%d%%) %.2f Kb/s %s" % (descargadosmb, totalmb, percent, old_div(velocidad, 1024), sec_to_hms(tiempofalta))) break except: reintentos += 1 logger.info("ERROR in block download, retry %d" % reintentos) import traceback logger.error(traceback.print_exc()) # The user cancels the download try: if progreso.iscanceled(): logger.info("Download of file canceled") f.close() progreso.close() return -1 except: pass # There was an error in the download if reintentos > maxreintentos: logger.info("ERROR in the file download") f.close() if not silent: progreso.close() return -2 except: import traceback logger.error(traceback.print_exc()) f.close() if not silent: progreso.close() # platformtools.dialog_ok('Error al descargar' , 'Se ha producido un error' , 'al descargar el archivo') return -2 except: if url.startswith("rtmp"): error = downloadfileRTMP(url, nombrefichero, silent) if error and not silent: from platformcode import platformtools platformtools.dialog_ok("You cannot download that video "," RTMP downloads not yet supported") else: import traceback from pprint import pprint exc_type, exc_value, exc_tb = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_tb) for line in lines: line_splits = line.split("\n") for line_split in line_splits: logger.error(line_split) try: f.close() except: pass if not silent: try: progreso.close() except: pass logger.info("End of file download")
def downloadfileGzipped(url, pathfichero): logger.info("url=" + url) nombrefichero = pathfichero logger.info("nombrefichero=" + nombrefichero) import xbmc nombrefichero = xbmc.makeLegalFilename(nombrefichero) logger.info("nombrefichero=" + nombrefichero) patron = "(http://[^/]+)/.+" matches = re.compile(patron, re.DOTALL).findall(url) if len(matches): logger.info("URL principal :" + matches[0]) url1 = matches[0] else: url1 = url txheaders = { 'User-Agent': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; SLCC1; .NET CLR 2.0.50727; ' 'Media Center PC 5.0; .NET CLR 3.0.04506)', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language': 'es-es,es;q=0.8,en-us;q=0.5,en;q=0.3', 'Accept-Encoding': 'gzip,deflate', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Keep-Alive': '115', 'Connection': 'keep-alive', 'Referer': url1, } txdata = "" # Crea el diálogo de progreso from platformcode import platformtools progreso = platformtools.dialog_progress("addon", config.get_localized_string(60200), url.split("|")[0], nombrefichero) # Timeout del socket a 60 segundos socket.setdefaulttimeout(10) h = urllib.request.HTTPHandler(debuglevel=0) request = urllib.request.Request(url, txdata, txheaders) # if existSize > 0: # request.add_header('Range', 'bytes=%d-' % (existSize, )) opener = urllib.request.build_opener(h) urllib.request.install_opener(opener) try: connexion = opener.open(request) except urllib.error.HTTPError as e: logger.error("error %d (%s) al abrir la url %s" % (e.code, e.msg, url)) progreso.close() # El error 416 es que el rango pedido es mayor que el fichero => es que ya está completo if e.code == 416: return 0 else: return -2 nombre_fichero_base = filetools.basename(nombrefichero) if len(nombre_fichero_base) == 0: logger.info("Buscando nombre en el Headers de respuesta") nombre_base = connexion.headers["Content-Disposition"] logger.info(nombre_base) patron = 'filename="([^"]+)"' matches = re.compile(patron, re.DOTALL).findall(nombre_base) if len(matches) > 0: titulo = matches[0] titulo = GetTitleFromFile(titulo) nombrefichero = filetools.join(pathfichero, titulo) else: logger.info("Nombre del fichero no encontrado, Colocando nombre temporal :sin_nombre.txt") titulo = "sin_nombre.txt" nombrefichero = filetools.join(pathfichero, titulo) totalfichero = int(connexion.headers["Content-Length"]) # despues f = filetools.file_open(nombrefichero, 'w', vfs=VFS) logger.info("fichero nuevo abierto") grabado = 0 logger.info("Content-Length=%s" % totalfichero) blocksize = 100 * 1024 bloqueleido = connexion.read(blocksize) try: import io compressedstream = io.StringIO(bloqueleido) import gzip gzipper = gzip.GzipFile(fileobj=compressedstream) bloquedata = gzipper.read() gzipper.close() logger.info("Iniciando descarga del fichero, bloqueleido=%s" % len(bloqueleido)) except: logger.error("ERROR : El archivo a descargar no esta comprimido con Gzip") f.close() progreso.close() return -2 maxreintentos = 10 while len(bloqueleido) > 0: try: # Escribe el bloque leido f.write(bloquedata) grabado += len(bloqueleido) percent = int(float(grabado) * 100 / float(totalfichero)) totalmb = float(float(totalfichero) / (1024 * 1024)) descargadosmb = float(float(grabado) / (1024 * 1024)) # Lee el siguiente bloque, reintentando para no parar todo al primer timeout reintentos = 0 while reintentos <= maxreintentos: try: before = time.time() bloqueleido = connexion.read(blocksize) import gzip import io compressedstream = io.StringIO(bloqueleido) gzipper = gzip.GzipFile(fileobj=compressedstream) bloquedata = gzipper.read() gzipper.close() after = time.time() if (after - before) > 0: velocidad = old_div(len(bloqueleido), (after - before)) falta = totalfichero - grabado if velocidad > 0: tiempofalta = old_div(falta, velocidad) else: tiempofalta = 0 logger.info(sec_to_hms(tiempofalta)) progreso.update(percent, "%.2fMB/%.2fMB (%d%%) %.2f Kb/s %s falta " % (descargadosmb, totalmb, percent, old_div(velocidad, 1024), sec_to_hms(tiempofalta))) break except: reintentos += 1 logger.info("ERROR en la descarga del bloque, reintento %d" % reintentos) for line in sys.exc_info(): logger.error("%s" % line) # El usuario cancelo la descarga if progreso.iscanceled(): logger.info("Descarga del fichero cancelada") f.close() progreso.close() return -1 # Ha habido un error en la descarga if reintentos > maxreintentos: logger.info("ERROR en la descarga del fichero") f.close() progreso.close() return -2 except: logger.info("ERROR en la descarga del fichero") for line in sys.exc_info(): logger.error("%s" % line) f.close() progreso.close() return -2 f.close() # print data progreso.close() logger.info("Fin descarga del fichero") return nombrefichero
def __init__(self, url, path, filename=None, headers=[], resume=True, max_connections=10, block_size=2**17, part_size=2**24, max_buffer=10): # Parametros self._resume = resume self._path = path self._filename = filename self._max_connections = max_connections self._block_size = block_size self._part_size = part_size self._max_buffer = max_buffer try: import xbmc self.tmp_path = xbmc.translatePath("special://temp/") except: self.tmp_path = os.getenv("TEMP") or os.getenv("TMP") or os.getenv( "TMPDIR") self.states = type( 'states', (), { "stopped": 0, "connecting": 1, "downloading": 2, "completed": 3, "error": 4, "saving": 5 }) self._state = self.states.stopped self._download_lock = Lock() self._headers = { "User-Agent": "Kodi/15.2 (Windows NT 10.0; WOW64) App_Bitness/32 Version/15.2-Git:20151019-02e7013" } self._speed = 0 self._buffer = {} self._seekable = True self._threads = [ Thread(target=self.__start_part__, name="Downloader %s/%s" % (x + 1, self._max_connections)) for x in range(self._max_connections) ] self._speed_thread = Thread(target=self.__speed_metter__, name="Speed Meter") self._save_thread = Thread(target=self.__save_file__, name="File Writer") # Actualizamos los headers self._headers.update(dict(headers)) # Separamos los headers de la url self.__url_to_headers__(url) # Obtenemos la info del servidor self.__get_download_headers__() self._file_size = int(self.response_headers.get("content-length", "0")) if not self.response_headers.get( "accept-ranges") == "bytes" or self._file_size == 0: self._max_connections = 1 self._part_size = 0 self._resume = False # Obtenemos el nombre del archivo self.__get_download_filename__() # Abrimos en modo "a+" para que cree el archivo si no existe, luego en modo "r+b" para poder hacer seek() self.file = filetools.file_open( filetools.join(self._path, self._filename), "a+") self.file = filetools.file_open( filetools.join(self._path, self._filename), "r+b") if self._file_size >= 2**31 or not self._file_size: try: self.file.seek(2**31) except: self._seekable = False logger.info( "No se puede hacer seek() ni tell() en ficheros mayores de 2GB" ) else: # detectar error en seek en algunos dispositivos android try: self.file.seek(123) except: self._seekable = False logger.info("No se puede hacer seek()") self.__get_download_info__() logger.info( "Descarga inicializada: Partes: %s | Ruta: %s | Archivo: %s | Tamano: %s" % (len(self._download_info["parts"]), self._path, self._filename, self._download_info["size"]))
def downloadfile(url, nombrefichero, headers=None, silent=False, continuar=False, resumir=True): logger.info("url=" + url) logger.info("filename=" + nombrefichero) if headers is None: headers = [] progreso = None if config.is_xbmc() and nombrefichero.startswith("special://"): import xbmc nombrefichero = xbmc.translatePath(nombrefichero) try: # Si no es XBMC, siempre a "Silent" from platformcode import platformtools # antes # f=open(nombrefichero,"wb") try: import xbmc nombrefichero = xbmc.makeLegalFilename(nombrefichero) except: pass logger.info("filename=" + nombrefichero) # El fichero existe y se quiere continuar if filetools.exists(nombrefichero) and continuar: f = filetools.file_open(nombrefichero, 'r+b', vfs=VFS) if resumir: exist_size = filetools.getsize(nombrefichero) logger.info("the file exists, size=%d" % exist_size) grabado = exist_size f.seek(exist_size) else: exist_size = 0 grabado = 0 # el fichero ya existe y no se quiere continuar, se aborta elif filetools.exists(nombrefichero) and not continuar: logger.info("the file exists, it does not download again") return -3 # el fichero no existe else: exist_size = 0 logger.info("the file does not exist") f = filetools.file_open(nombrefichero, 'wb', vfs=VFS) grabado = 0 # Crea el diálogo de progreso if not silent: progreso = platformtools.dialog_progress("plugin", "Downloading...", url, nombrefichero) # Si la plataforma no devuelve un cuadro de diálogo válido, asume modo silencio if progreso is None: silent = True if "|" in url: additional_headers = url.split("|")[1] if "&" in additional_headers: additional_headers = additional_headers.split("&") else: additional_headers = [additional_headers] for additional_header in additional_headers: logger.info("additional_header: " + additional_header) name = re.findall("(.*?)=.*?", additional_header)[0] value = urllib.parse.unquote_plus( re.findall(".*?=(.*?)$", additional_header)[0]) headers.append([name, value]) url = url.split("|")[0] logger.info("url=" + url) # Timeout del socket a 60 segundos socket.setdefaulttimeout(60) h = urllib.request.HTTPHandler(debuglevel=0) request = urllib.request.Request(url) for header in headers: logger.info("Header=" + header[0] + ": " + header[1]) request.add_header(header[0], header[1]) if exist_size > 0: request.add_header('Range', 'bytes=%d-' % (exist_size, )) opener = urllib.request.build_opener(h) urllib.request.install_opener(opener) try: connexion = opener.open(request) except urllib.error.HTTPError as e: logger.error("error %d (%s) al abrir la url %s" % (e.code, e.msg, url)) f.close() if not silent: progreso.close() # El error 416 es que el rango pedido es mayor que el fichero => es que ya está completo if e.code == 416: return 0 else: return -2 try: totalfichero = int(connexion.headers["Content-Length"]) except ValueError: totalfichero = 1 if exist_size > 0: totalfichero = totalfichero + exist_size logger.info("Content-Length=%s" % totalfichero) blocksize = 100 * 1024 bloqueleido = connexion.read(blocksize) logger.info("Starting downloading the file, blocked=%s" % len(bloqueleido)) maxreintentos = 10 while len(bloqueleido) > 0: try: # Escribe el bloque leido f.write(bloqueleido) grabado += len(bloqueleido) percent = int(float(grabado) * 100 / float(totalfichero)) totalmb = float(float(totalfichero) / (1024 * 1024)) descargadosmb = float(float(grabado) / (1024 * 1024)) # Lee el siguiente bloque, reintentando para no parar todo al primer timeout reintentos = 0 while reintentos <= maxreintentos: try: before = time.time() bloqueleido = connexion.read(blocksize) after = time.time() if (after - before) > 0: velocidad = old_div(len(bloqueleido), (after - before)) falta = totalfichero - grabado if velocidad > 0: tiempofalta = old_div(falta, velocidad) else: tiempofalta = 0 # logger.info(sec_to_hms(tiempofalta)) if not silent: progreso.update( percent, "%.2fMB/%.2fMB (%d%%) %.2f Kb/s %s falta " % (descargadosmb, totalmb, percent, old_div(velocidad, 1024), sec_to_hms(tiempofalta))) break except: reintentos += 1 logger.info("ERROR in block download, retry %d" % reintentos) import traceback logger.error(traceback.print_exc()) # El usuario cancelo la descarga try: if progreso.iscanceled(): logger.info("Download of file canceled") f.close() progreso.close() return -1 except: pass # Ha habido un error en la descarga if reintentos > maxreintentos: logger.info("ERROR in the file download") f.close() if not silent: progreso.close() return -2 except: import traceback logger.error(traceback.print_exc()) f.close() if not silent: progreso.close() # platformtools.dialog_ok('Error al descargar' , 'Se ha producido un error' , 'al descargar el archivo') return -2 except: if url.startswith("rtmp"): error = downloadfileRTMP(url, nombrefichero, silent) if error and not silent: from platformcode import platformtools platformtools.dialog_ok("No puedes descargar ese vídeo", "Las descargas en RTMP aún no", "están soportadas") else: import traceback from pprint import pprint exc_type, exc_value, exc_tb = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_tb) for line in lines: line_splits = line.split("\n") for line_split in line_splits: logger.error(line_split) try: f.close() except: pass if not silent: try: progreso.close() except: pass logger.info("End of file download")
def __init__(self, url, path, filename=None, headers=[], resume=True, max_connections=10, block_size=2**17, part_size=2**24, max_buffer=10, json_path=None): # Parameters self._resume = resume self._path = path self._filename = filename self._max_connections = max_connections self._block_size = block_size self._part_size = part_size self._max_buffer = max_buffer self._json_path = json_path self._json_text = '' self._json_item = Item() try: import xbmc self.tmp_path = xbmc.translatePath("special://temp/") except: self.tmp_path = os.getenv("TEMP") or os.getenv("TMP") or os.getenv( "TMPDIR") self.states = type( 'states', (), { "stopped": 0, "connecting": 1, "downloading": 2, "completed": 3, "error": 4, "saving": 5 }) self._state = self.states.stopped self._download_lock = Lock() self._headers = { "User-Agent": "Kodi/15.2 (Windows NT 10.0; WOW64) App_Bitness/32 Version/15.2-Git:20151019-02e7013" } self._speed = 0 self._buffer = {} self._seekable = True self._threads = [ Thread(target=self.__start_part__, name="Downloader %s/%s" % (x + 1, self._max_connections)) for x in range(self._max_connections) ] self._speed_thread = Thread(target=self.__speed_metter__, name="Speed Meter") self._save_thread = Thread(target=self.__save_file__, name="File Writer") # We update the headers self._headers.update(dict(headers)) # We separate the headers from the url self.__url_to_headers__(url) # We get the server info self.__get_download_headers__() self._file_size = int(self.response_headers.get("content-length", "0")) if not self.response_headers.get( "accept-ranges") == "bytes" or self._file_size == 0: self._max_connections = 1 self._part_size = 0 self._resume = False # We get the file name self.__get_download_filename__() # We open in "a+" mode to create the file if it does not exist, then in "r + b" mode to be able to do seek () self.file = filetools.file_open(filetools.join(self._path, self._filename), "a+", vfs=VFS) if self.file: self.file.close() self.file = filetools.file_open(filetools.join(self._path, self._filename), "r+b", vfs=VFS) if not self.file: return if self._file_size >= 2**31 or not self._file_size: try: self.file.seek(2**31, 0) except OverflowError: self._seekable = False logger.error( "Cannot do seek() or tell() in files larger than 2GB") self.__get_download_info__() try: logger.info( "Download started: Parts: %s | Path: %s | File: %s | Size: %s" % (str(len(self._download_info["parts"])), self._pathencode('utf-8'), self._filenameencode('utf-8'), str(self._download_info["size"]))) except: pass