def get_nzoid(inputName): nzoid = None logger.debug("Searching for nzoid from SAbnzbd ...") if "http" in core.SABNZBDHOST: baseURL = "%s:%s/api" % (core.SABNZBDHOST, core.SABNZBDPORT) else: baseURL = "http://%s:%s/api" % (core.SABNZBDHOST, core.SABNZBDPORT) url = baseURL params = {} params['apikey'] = core.SABNZBDAPIKEY params['mode'] = "queue" params['output'] = 'json' try: r = requests.get(url, params=params, verify=False, timeout=(30, 120)) except requests.ConnectionError: logger.error("Unable to open URL") return nzoid # failure try: result = r.json() cleanName = os.path.splitext(os.path.split(inputName)[1])[0] for slot in result['queue']['slots']: if slot['filename'] in [inputName, cleanName]: nzoid = slot['nzo_id'] logger.debug("Found nzoid: %s" % nzoid) break except: logger.warning("Data from SABnzbd could not be parsed") return nzoid
def errores(item): logger.info("[descargas.py] errores") itemlist = [] # Crea un listado con las entradas de favoritos if usingsamba: ficheros = samba.get_files(ERROR_PATH) else: ficheros = os.listdir(ERROR_PATH) # Ordena el listado por orden de incorporación ficheros.sort() # Crea un listado con las entradas de la lista de descargas for fichero in ficheros: logger.info("[descargas.py] fichero="+fichero) try: # Lee el bookmark canal, titulo, thumbnail, plot, server, url, fulltitle = favoritos.readbookmark(fichero, ERROR_PATH) if canal == "": canal = "descargas" # Crea la entrada # En la categoría va el nombre del fichero para poder borrarlo itemlist.append(Item(channel=canal, action="play", url=url, server=server, title=titulo, fulltitle=fulltitle, thumbnail=thumbnail, plot=plot, fanart=thumbnail, category="errores", extra=os.path.join(ERROR_PATH, fichero), folder=False)) except: pass logger.info("[descargas.py] error al leer bookmark") for line in sys.exc_info(): logger.error("%s" % line) return itemlist
def worker(infile, queue): channel_result_itemlist = [] try: basename_without_extension = os.path.basename(infile)[:-4] # http://docs.python.org/library/imp.html?highlight=imp#module-imp obj = imp.load_source(basename_without_extension, infile[:-4] + ".py") logger.info( "streamondemand-pureita-master.channels.buscador cargado " + basename_without_extension + " de " + infile ) # item.url contains search type: serie, anime, etc... channel_result_itemlist.extend(obj.search(Item(extra=item.url), tecleado)) for local_item in channel_result_itemlist: local_item.title = ( " [COLOR azure] " + local_item.title + " [/COLOR] [COLOR orange]su[/COLOR] [COLOR green]" + basename_without_extension + "[/COLOR]" ) local_item.viewmode = "list" except: import traceback logger.error(traceback.format_exc()) queue.put(channel_result_itemlist)
def links(item): itemlist = [] try: count = 0 exit = False while(not exit and count < 5): #A veces da error al intentar acceder try: logger.info(str(item.url)) page = urllib2.urlopen(item.url) urlvideo = "\"" + page.geturl() + "\"" logger.info(str(page.read())) logger.info(item.url) exit = True except: import traceback logger.info(traceback.format_exc()) count = count + 1 logger.info("urlvideo="+urlvideo) for video in servertools.findvideos(urlvideo) : #scrapedtitle = title.strip() + " " + match[1] + " " + match[2] + " " + video[0] scrapedtitle = scrapertools.htmlclean(video[0]) scrapedurl = video[1] server = video[2] itemlist.append( Item(channel=__channel__, action="play" , title=scrapedtitle, url=scrapedurl, thumbnail=item.thumbnail, plot="", server=server, extra="", category=item.category, fanart=item.thumbnail, folder=False)) except: import sys for line in sys.exc_info(): logger.error( "%s" % line ) return itemlist
def search(item,texto): logger.info("[pelisalacarta.seriesblanco search texto="+texto) itemlist = [] item.url = urlparse.urljoin(host,"/search.php?q1=%s" % (texto)) data = scrapertools.cache_page(item.url) data = re.sub(r"\n|\r|\t|\s{2}| |<Br>|<BR>|<br>|<br/>|<br />|-\s","",data) data = re.sub(r"<!--.*?-->","",data) #<div style='float:left;width: 620px;'><div style='float:left;width: 33%;text-align:center;'><a href='/serie/20/against-the-wall.html' '><img class='ict' src='http://4.bp.blogspot.com/-LBERI18Cq-g/UTendDO7iNI/AAAAAAAAPrk/QGqjmfdDreQ/s320/Against_the_Wall_Seriesdanko.jpg' alt='Capitulos de: Against The Wall' height='184' width='120'></a><br><div style='text-align:center;line-height:20px;height:20px;'><a href='/serie/20/against-the-wall.html' style='font-size: 11px;'> Against The Wall</a></div><br><br> patron = "<img class='ict' src='([^']+)'.*?<div style='text-align:center;line-height:20px;height:20px;'><a href='([^']+)' style='font-size: 11px;'>([^<]+)</a>" matches = re.compile(patron,re.DOTALL).findall(data) for scrapedthumbnail, scrapedurl, scrapedtitle in matches: itemlist.append( Item(channel=__channel__, title =scrapedtitle , url=urlparse.urljoin(host,scrapedurl), action="episodios", thumbnail=scrapedthumbnail, fanart ="http://portfolio.vernier.se/files/2014/03/light-grey-wood-photography-hd-wallpaper-1920x1200-46471.jpg", show=scrapedtitle) ) try: return itemlist # Se captura la excepción, para no interrumpir al buscador global si un canal falla except: import sys for line in sys.exc_info(): logger.error( "%s" % line ) return []
def onAction(self,action): #s1 = str(action.getId()) #s2 = str(action.getButtonCode()) #print "======="+s1+"========="+s2+"==========" if action == ACTION_PREVIOUS_MENU: self.close() else: id = action.getId() keycode = action.getButtonCode() if keycode >= 61505 and keycode <= 61530: if self.getControl(CTRL_ID_CAPS).isSelected() or self.getControl(CTRL_ID_MAYS).isSelected(): keychar = chr(keycode - 61505 + ord('A')) else: keychar = chr(keycode - 61505 + ord('a')) self.getControl(CTRL_ID_TEXT).setLabel(self.getControl(CTRL_ID_TEXT).getLabel()+keychar) self.disableMayus() elif keycode >= 192577 and keycode <= 192602: if self.getControl(CTRL_ID_CAPS).isSelected() or self.getControl(CTRL_ID_MAYS).isSelected(): keychar = chr(keycode - 192577 + ord('a')) else: keychar = chr(keycode - 192577 + ord('A')) self.getControl(CTRL_ID_TEXT).setLabel(self.getControl(CTRL_ID_TEXT).getLabel()+keychar) self.disableMayus() elif keycode >= 61536 and keycode <= 61545: self.onClick( keycode-61536+48 ) elif keycode == 61472: self.onClick( CTRL_ID_SPACE ) elif keycode == 61448: self.onClick( CTRL_ID_BACK ) elif(keycode!=0): s = "Unattended keycode: " + str(action.getButtonCode()) logger.error( "%s" % s )
def move_to_libray(item): try: from platformcode import library except: return # Copiamos el archivo a la biblioteca origen = filetools.join(config.get_setting("downloadpath"), item.downloadFilename) destino = filetools.join(config.get_library_path(), *filetools.split(item.downloadFilename)) if not filetools.isdir(filetools.dirname(destino)): filetools.mkdir(filetools.dirname(destino)) if filetools.isfile(destino) and filetools.isfile(origen) : filetools.remove(destino) if filetools.isfile(origen): filetools.move(origen, destino) if len(filetools.listdir(filetools.dirname(origen))) == 0: filetools.rmdir(filetools.dirname(origen)) else: logger.error("No se ha encontrado el archivo: %s" % origen) if filetools.isfile(destino): if item.contentType == "movie" and item.infoLabels["tmdb_id"]: library_item = Item(title="Descargado: %s" % item.downloadFilename, channel= "descargas", action="findvideos", infoLabels=item.infoLabels, url=destino) library.save_library_movie(library_item) elif item.contentType == "episode" and item.infoLabels["tmdb_id"]: library_item = Item(title="Descargado: %s" % item.downloadFilename, channel= "descargas", action="findvideos", infoLabels=item.infoLabels, url=destino) tvshow = Item(channel= "descargas", contentType="tvshow", infoLabels = {"tmdb_id": item.infoLabels["tmdb_id"]}) library.save_library_tvshow(tvshow, [library_item])
def rmdirtree(path): """ Elimina un directorio y su contenido @param path: ruta a eliminar @type path: str @rtype: bool @return: devuelve False en caso de error """ path = encode(path) try: if path.lower().startswith("smb://"): for raiz, subcarpetas, ficheros in samba.walk(path, topdown=False): for f in ficheros: samba.remove(join(decode(raiz),decode(f))) for s in subcarpetas: samba.rmdir(join(decode(raiz),decode(s))) samba.rmdir(path) else: import shutil shutil.rmtree(path, ignore_errors=True) except: logger.error("ERROR al eliminar el directorio: %s" %(path)) logger.error(traceback.format_exc()) platformtools.dialog_notification("Error al eliminar el directorio", path) return False else: return not exists(path)
def initeventservice(self, filename=None, shutdown=False): """ Re-initialize the EMANE Event service. The multicast group and/or port may be configured. """ self.deleteeventservice() if shutdown: return # Get the control network to be used for events group, port = self.get_config("eventservicegroup").split(":") self.event_device = self.get_config("eventservicedevice") eventnetidx = self.session.get_control_net_index(self.event_device) if eventnetidx < 0: logger.error("invalid emane event service device provided: %s", self.event_device) return False # make sure the event control network is in place eventnet = self.session.add_remove_control_net(net_index=eventnetidx, remove=False, conf_required=False) if eventnet is not None: # direct EMANE events towards control net bridge self.event_device = eventnet.brname eventchannel = (group, int(port), self.event_device) # disabled otachannel for event service # only needed for e.g. antennaprofile events xmit by models logger.info("using %s for event service traffic", self.event_device) try: self.service = EventService(eventchannel=eventchannel, otachannel=None) except EventServiceException: logger.exception("error instantiating emane EventService") return True
def move(path, dest): """ Mueve un archivo @param path: ruta del fichero a mover @type path: str @param dest: ruta donde mover @type dest: str @rtype: bool @return: devuelve False en caso de error """ try: #samba/samba if path.lower().startswith("smb://") and dest.lower().startswith("smb://"): dest = encode(dest, True) path = encode(path, True) samba.rename(path, dest) #local/local elif not path.lower().startswith("smb://") and not dest.lower().startswith("smb://"): dest = encode(dest) path = encode(path) os.rename(path, dest) #mixto En este caso se copia el archivo y luego se elimina el de origen else: return copy(path, dest) == True and remove(path) == True except: logger.error("ERROR al mover el archivo: %s" %(path)) logger.error(traceback.format_exc()) return False else: return True
def copy(path, dest, silent = False): """ Copia un archivo @param path: ruta del fichero a copiar @type path: str @param dest: ruta donde copiar @type dest: str @rtype: bool @return: devuelve False en caso de error """ import time try: fo = file_open(path, "rb") fd = file_open(dest, "wb") if fo and fd: if not silent: dialogo = platformtools.dialog_progress("Copiando archivo", "") size = getsize(path) copiado = 0 while True: if not silent: dialogo.update(copiado * 100 / size, basename(path)) buf=fo.read(1024*1024) if not buf: break if not silent and dialogo.iscanceled(): dialogo.close() return False fd.write(buf) copiado +=len(buf) if not silent: dialogo.close() except: logger.error("ERROR al copiar el archivo: %s" %(path)) logger.error(traceback.format_exc()) return False else: return True
def write(path, data): """ Guarda los datos en un archivo @param path: ruta del archivo a guardar @type path: str @param data: datos a guardar @type data: str @rtype: bool @return: devuelve True si se ha escrito correctamente o False si ha dado un error """ path = encode(path) try: if path.lower().startswith("smb://"): f = samba.smb_open(path, "wb") else: f = open(path, "wb") f.write(data) f.close() except: logger.error("ERROR al guardar el archivo: %s" %(path)) logger.error(traceback.format_exc()) return False else: return True
def rename(path, new_name): """ Renombra un archivo o carpeta @param path: ruta del fichero o carpeta a renombrar @type path: str @param new_name: nuevo nombre @type new_name: str @rtype: bool @return: devuelve False en caso de error """ path = encode(path) try: if path.lower().startswith("smb://"): new_name = encode(new_name, True) samba.rename(path, join(dirname(path), new_name)) else: new_name = encode(new_name, False) os.rename(path, os.path.join(os.path.dirname(path), new_name)) except: logger.error("ERROR al renombrar el archivo: %s" %(path)) logger.error(traceback.format_exc()) platformtools.dialog_notification("Error al renombrar", path) return False else: return True
def newest(categoria): logger.info() itemlist = [] item = Item() item.extra = 'Estrenos' try: if categoria == 'peliculas': item.url = host+'/Ordenar/Estreno/?page=1' elif categoria == 'infantiles': item.url = host+'/Categoria/Animacion/?page=1' elif categoria == 'documentales': item.url = host+'/Documentales/?page=1' item.extra = 'documental' itemlist = lista(item) if itemlist[-1].title == 'Siguiente >>>': itemlist.pop() except: import sys for line in sys.exc_info(): logger.error("{0}".format(line)) return [] return itemlist
def read(path, linea_inicio=0, total_lineas=None): """ Lee el contenido de un archivo y devuelve los datos @param path: ruta del fichero @type path: str @param linea_inicio: primera linea a leer del fichero @type linea_inicio: int positivo @param total_lineas: numero maximo de lineas a leer. Si es None o superior al total de lineas se leera el fichero hasta el final. @type total_lineas: int positivo @rtype: str @return: datos que contiene el fichero """ path = encode(path) try: if path.lower().startswith("smb://"): f = samba.smb_open(path, "rb") else: f = open(path, "rb") data = [] for x, line in enumerate(f): if x < linea_inicio: continue if len(data) == total_lineas: break data.append(line) f.close() except: logger.error("ERROR al leer el archivo: %s" %(path)) logger.error(traceback.format_exc()) return False else: return "".join(data)
def downloadall(item): logger.info("[descargas.py] downloadall") if usingsamba(DOWNLOAD_LIST_PATH): ficheros = samba.get_files(DOWNLOAD_LIST_PATH) else: ficheros = os.listdir(DOWNLOAD_LIST_PATH) ficheros.sort() for fichero in ficheros: if fichero.endswith('.txt'): try: item = LeerDescarga(fichero,DOWNLOAD_LIST_PATH) dev = download(item) if dev == -1: logger.info("[descargas.py] Descarga cancelada") guitools.Dialog_OK("pelisalacarta", "Descargas canceladas") break elif dev == -2: logger.info("[descargas.py] ERROR EN DESCARGA DE "+fichero) BorrarDescarga(item, DOWNLOAD_LIST_PATH) GuardarDescarga(item, ERROR_PATH) else: BorrarDescarga(item, DOWNLOAD_LIST_PATH) except: logger.info("[descargas.py] ERROR EN DESCARGA DE "+fichero) import traceback logger.error(traceback.format_exc()) GuardarDescarga(item,ERROR_PATH) BorrarDescarga(item, DOWNLOAD_LIST_PATH) return ""
def newest(categoria): logger.info("pelisalacarta.channels.cinefox newest") itemlist = [] item = Item() try: if categoria == "peliculas": item.url = "http://www.cinefox.cc/catalogue?type=peliculas" itemlist = peliculas(item) if itemlist[-1].action == "peliculas": itemlist.pop() if categoria == "series": item.url = "http://www.cinefox.cc/ultimos-capitulos" itemlist = ultimos(item) if itemlist[-1].action == "ultimos": itemlist.pop() # Se captura la excepción, para no interrumpir al canal novedades si un canal falla except: import sys for line in sys.exc_info(): logger.error("{0}".format(line)) return [] return itemlist
def search(item,texto): logger.info("metaserie.py search") texto = texto.replace(" ","+") item.url = item.url+texto itemlist = [] if texto!='': try: data = scrapertools.cache_page(item.url) patron = '<a href="([^\"]+)" rel="bookmark" class="local-link">([^<]+)<.*?' matches = re.compile(patron,re.DOTALL).findall(data) scrapertools.printMatches(matches) for scrapedurl,scrapedtitle in matches: url = scrapedurl title = scrapertools.decodeHtmlentities(scrapedtitle) thumbnail = '' plot = '' if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"])") itemlist.append( Item(channel=item.channel, action="temporadas" , title=title , fulltitle=title, url=url, thumbnail=thumbnail, plot=plot, folder ="true" )) return itemlist except: import sys for line in sys.exc_info(): logger.error( "%s" % line ) return []
def newest(categoria): itemlist = [] item = Item() try: if categoria == 'peliculas': item.url = "http://www.divxatope.com/categoria/peliculas" elif categoria == 'series': item.url = "http://www.divxatope.com/categoria/series" else: return [] itemlist = lista(item) if itemlist[-1].title == ">> Página siguiente": itemlist.pop() # Esta pagina coloca a veces contenido duplicado, intentamos descartarlo dict_aux = {} for i in itemlist: if not i.url in dict_aux: dict_aux[i.url] = i else: itemlist.remove(i) # Se captura la excepción, para no interrumpir al canal novedades si un canal falla except: import sys for line in sys.exc_info(): logger.error("{0}".format(line)) return [] #return dict_aux.values() return itemlist
def findvideos(data): logger.info("[servertools.py] findvideos") encontrados = set() devuelve = [] # Ejecuta el findvideos en cada servidor #for serverid in ALL_SERVERS: for serverid in ENABLED_SERVERS: try: # Sustituye el código por otro "Plex compatible" #exec "from servers import "+serverid #exec "devuelve.extend("+serverid+".find_videos(data))" servers_module = __import__("servers."+serverid) server_module = getattr(servers_module,serverid) devuelve.extend( server_module.find_videos(data) ) except ImportError: logger.info("Non esiste un server per riprodurre "+serverid) except: logger.info("Errore nel connettersi con "+serverid) import traceback,sys from pprint import pprint exc_type, exc_value, exc_tb = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_tb) for line in lines: line_splits = line.split("\n") for line_split in line_splits: logger.error(line_split) return devuelve
def newest(categoria): logger.info("pelisalacarta.channels.cinetux newest") itemlist = [] item = Item() try: if categoria == "peliculas": item.url = CHANNEL_HOST itemlist = peliculas(item) if itemlist[-1].action == "peliculas": itemlist.pop() if categoria == "documentales": item.url = urlparse.urljoin(CHANNEL_HOST, "genero/documental/") itemlist = peliculas(item) if itemlist[-1].action == "peliculas": itemlist.pop() # Se captura la excepción, para no interrumpir al canal novedades si un canal falla except: import sys for line in sys.exc_info(): logger.error("{0}".format(line)) return [] return itemlist
def categorias(item): logger.info("[italiafilm.py] categorias") itemlist = [] logger.error("io") data = scrapertools.cache_page(item.url) ''' <a href="#">Categorie</a> <ul class="sub-menu"> <li id="menu-item-22311" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-22311"><a href="http://www.italiafilms.tv/archivio-alfabetico-film-e-serie-tv/">Archivio alfabetico</a></li> <li id="menu-item-21089" class="menu-item menu-item-type-taxonomy menu-item-object-category menu-item-21089"><a href="http://www.italiafilms.tv/category/now-on-cinema/">Adesso Nei Cinema</a></li> <li id="menu-item-21090" class="menu-item menu-item-type-taxonomy menu-item-object-category menu-item-21090"><a href="http://www.italiafilms.tv/category/anime-e-cartoon/">Anime e Cartoon</a></li> <li id="menu-item-21091" class="menu-item menu-item-type-taxonomy menu-item-object-category menu-item-21091"><a href="http://www.italiafilms.tv/category/archivio-film/">Archivio Film</a></li> <li id="menu-item-21092" class="menu-item menu-item-type-taxonomy menu-item-object-category menu-item-21092"><a href="http://www.italiafilms.tv/category/film-animazione/">Film Animazione</a></li> ''' data = scrapertools.find_single_match(data,'<a href=".">Categorie</a>(.*?)</div>') patron = '<li class="[^"]+"><a href="([^"]+)">([^<]+)</a></li>' patron = '<li[^>]+><a href="([^"]+)">([^<]+)</a></li>' matches = re.compile(patron,re.DOTALL).findall(data) scrapertools.printMatches(matches) for url,title in matches: scrapedtitle = title scrapedurl = urlparse.urljoin(item.url,url) scrapedplot = "" scrapedthumbnail = "" if DEBUG: logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]") itemlist.append( Item(channel=__channel__, action='peliculas', title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) ) return itemlist
def load_json(data): # callback to transform json string values to utf8 def to_utf8(dct): rdct = {} for k, v in dct.items() : if isinstance(v, (str, unicode)): rdct[k] = v.encode('utf8', 'ignore') else : rdct[k] = v return rdct try: import json except: try: import simplejson as json except: from lib import simplejson as json try : json_data = json.loads(data, object_hook=to_utf8) return json_data except: import sys for line in sys.exc_info(): logger.error( "%s" % line )
def get_all_videos(item): logger.info("tvalacarta.core.favoritos get_all_videos") itemlist=[] # Crea un listado con las entradas de favoritos if usingsamba(BOOKMARK_PATH): ficheros = samba.get_files(BOOKMARK_PATH) else: ficheros = os.listdir(BOOKMARK_PATH) # Ordena el listado por nombre de fichero (orden de incorporaci?) ficheros.sort() # Rellena el listado for fichero in ficheros: try: # Lee el bookmark canal,titulo,thumbnail,plot,server,url,fulltitle = readbookmark(fichero) if canal=="": canal="favoritos" # Crea la entrada # En extra va el nombre del fichero para poder borrarlo ## <-- A?do fulltitle con el titulo de la peli if server=="": itemlist.append( Item( channel=canal , action="play" , url=url , server=server, title=fulltitle, thumbnail=thumbnail, plot=plot, fanart=thumbnail, extra=os.path.join( BOOKMARK_PATH, fichero ), fulltitle=fulltitle, folder=False )) else: itemlist.append( Item( channel=canal , action="play" , url=url , server=server, title=fulltitle, thumbnail=thumbnail, plot=plot, fanart=thumbnail, extra=os.path.join( BOOKMARK_PATH, fichero ), fulltitle=fulltitle, folder=False )) except: for line in sys.exc_info(): logger.error( "%s" % line ) return itemlist
def register_method(self, name, func): """ this is a stand-up method for registering a class or function with the bots api to allow module developers to use it without having to import the other module. once a class/function is registered, its not unregistered unless an API().deregister_method(name) is called. of course, this module will not have to be imported into your own module. an instance of it will exist in var.store as var.store.api. to use the api, simply source it, or use the remote. either: api = events.api or, call it directly: events.api.register_method(...) -----------or------------------ @events.api.register('name') i will also provide decorators to decorate your class/function and streamline your code. """ if inspect.isclass(func) is True: type = 'class' elif inspect.isfunction(func) is True: type = 'function' else: logger.error('api(): registered object is not function or class') if name in self._db: logger.info('api(): \'%s\' already exists in api' % (name)) return False self._db.update({name: {'object': func, 'type': type}}) logger.info('api(): \'%s\' registered in api database' % (name))
def cleanDir(path, section, subsection): if not os.path.exists(path): logger.info('Directory %s has been processed and removed ...' % (path), 'CLEANDIR') return if core.FORCE_CLEAN and not core.FAILED: logger.info('Doing Forceful Clean of %s' % (path), 'CLEANDIR') rmDir(path) return try: minSize = int(core.CFG[section][subsection]['minSize']) except:minSize = 0 try: delete_ignored = int(core.CFG[section][subsection]['delete_ignored']) except:delete_ignored = 0 try: num_files = len(listMediaFiles(path, minSize=minSize, delete_ignored=delete_ignored)) except: num_files = 'unknown' if num_files > 0: logger.info( "Directory %s still contains %s unprocessed file(s), skipping ..." % (path, num_files), 'CLEANDIRS') return logger.info("Directory %s has been processed, removing ..." % (path), 'CLEANDIRS') try: shutil.rmtree(path, onerror=onerror) except: logger.error("Unable to delete directory %s" % (path))
def search(item,texto): logger.info("pelisalacarta.channels.cinehanwer search") texto = texto.replace(" ","-") if item.url=="": item.url="http://cinehanwer.com/estrenos/" if item.url=="http://cinehanwer.com/estrenos/": # Mete el referer en item.extra item.extra = item.url #item.url = item.url+"search/query/"+texto+"/years/1950/on/undefined/showlist/all" item.url = "http://cinehanwer.com/buscar/?q="+texto try: #return buscar(item) return peliculas(item) # Se captura la excepción, para no interrumpir al buscador global si un canal falla except: import sys for line in sys.exc_info(): logger.error( "%s" % line ) return [] if item.url=="http://series.cinehanwer.com": item.extra = item.url item.url = "http://series.cinehanwer.com/wp-admin/admin-ajax.php?action=dwls_search&s="+texto try: return series_buscar(item) # Se captura la excepción, para no interrumpir al buscador global si un canal falla except: import sys for line in sys.exc_info(): logger.error( "%s" % line ) return []
def get_newest(channel_name, categoria): logger.info("streamondemand.channels.novedades get_newest channel_name="+channel_name+", categoria="+categoria) global list_newest # Solicitamos las novedades de la categoria (item.extra) buscada en el canal channel # Si no existen novedades para esa categoria en el canal devuelve una lista vacia try: puede = True try: modulo = __import__('channels.%s' % channel_name, fromlist=["channels.%s" % channel_name]) except: try: exec "import channels."+channel_name+" as modulo" except: puede = False if not puede: return logger.info("pelisalacarta.channels.novedades running channel "+modulo.__name__+" "+modulo.__file__) list_result = modulo.newest(categoria) logger.info("streamondemand.channels.novedades.get_newest canal= %s %d resultados" %(channel_name, len(list_result))) for item in list_result: logger.info("streamondemand.channels.novedades.get_newest item="+item.tostring()) item.channel = channel_name list_newest.append(item) except: logger.error("No se pueden recuperar novedades de: "+ channel_name) import traceback logger.error(traceback.format_exc())
def onAction(self, action): if action == ACTION_PREVIOUS_MENU: self.close() else: keycode = action.getButtonCode() if 61505 <= keycode <= 61530: if self.getControl(CTRL_ID_CAPS).isSelected() or self.getControl(CTRL_ID_MAYS).isSelected(): keychar = chr(keycode - 61505 + ord("A")) else: keychar = chr(keycode - 61505 + ord("a")) self.getControl(CTRL_ID_TEXT).setLabel(self.getControl(CTRL_ID_TEXT).getLabel() + keychar) self.disableMayus() elif 192577 <= keycode <= 192602: if self.getControl(CTRL_ID_CAPS).isSelected() or self.getControl(CTRL_ID_MAYS).isSelected(): keychar = chr(keycode - 192577 + ord("a")) else: keychar = chr(keycode - 192577 + ord("A")) self.getControl(CTRL_ID_TEXT).setLabel(self.getControl(CTRL_ID_TEXT).getLabel() + keychar) self.disableMayus() elif 61536 <= keycode <= 61545: self.onClick(keycode - 61536 + 48) elif keycode == 61472: self.onClick(CTRL_ID_SPACE) elif keycode == 61448: self.onClick(CTRL_ID_BACK) elif keycode != 0: s = "Unattended keycode: " + str(action.getButtonCode()) logger.error("%s" % s)
def findepisodevideo(item): logger.info("[SerieTVU.py]==> findepisodevideo") try: # Download Pagina data = scrapertools.anti_cloudflare(item.url, headers=headers) # Prendo il blocco specifico per la stagione richiesta patron = r'<div class="list [active]*" data-id="%s">(.*?)</div>\s*</div>' % item.extra[0][0] blocco = scrapertools.find_single_match(data, patron) # Estraggo l'episodio patron = r'<a data-id="%s[^"]*" data-href="([^"]+)" data-original="([^"]+)" class="[^"]+">' % item.extra[0][1].lstrip("0") matches = re.compile(patron, re.DOTALL).findall(blocco) itemlist = servertools.find_video_items(data=matches[0][0]) # Non sono riuscito a trovare un modo migliore di questo, se qualcuno ha un metodo migliore di questo # per estrarre il video lo sistemi per favore. if len(itemlist) > 1: itemlist.remove(itemlist[1]) server = re.sub(r'[-\[\]\s]+', '', itemlist[0].title) itemlist[0].title = "".join(["[%s] " % color(server, 'orange'), item.title]) itemlist[0].fulltitle = item.fulltitle itemlist[0].show = item.show itemlist[0].thumbnail = matches[0][1] itemlist[0].channel = __channel__ except: import sys for line in sys.exc_info(): logger.error("%s" % line) return [] return itemlist
def save_library_episodes(path, episodelist, serie, silent=False, overwrite=True): """ guarda en la ruta indicada todos los capitulos incluidos en la lista episodelist @type path: str @param path: ruta donde guardar los episodios @type episodelist: list @param episodelist: listado de items que representan los episodios que se van a guardar. @type serie: item @param serie: serie de la que se van a guardar los episodios @type silent: bool @param silent: establece si se muestra la notificación @param overwrite: permite sobreescribir los ficheros existentes @type overwrite: bool @rtype insertados: int @return: el número de episodios insertados @rtype sobreescritos: int @return: el número de episodios sobreescritos @rtype fallidos: int @return: el número de episodios fallidos """ logger.info() # No hay lista de episodios, no hay nada que guardar if not len(episodelist): logger.info("No hay lista de episodios, salimos sin crear strm") return 0, 0, 0 insertados = 0 sobreescritos = 0 fallidos = 0 news_in_playcounts = {} # Listamos todos los ficheros de la serie, asi evitamos tener que comprobar si existe uno por uno ficheros = os.listdir(path) ficheros = [filetools.join(path, f) for f in ficheros] # Silent es para no mostrar progreso (para library_service) if not silent: # progress dialog p_dialog = platformtools.dialog_progress('mitvspain', 'Añadiendo episodios...') p_dialog.update(0, 'Añadiendo episodio...') new_episodelist = [] # Obtenemos el numero de temporada y episodio y descartamos los q no lo sean for e in episodelist: try: season_episode = scrapertools.get_season_and_episode(e.title) e.infoLabels = serie.infoLabels e.contentSeason, e.contentEpisodeNumber = season_episode.split("x") new_episodelist.append(e) except: continue # No hay lista de episodios, no hay nada que guardar if not len(new_episodelist): logger.info("No hay lista de episodios, salimos sin crear strm") return 0, 0, 0 # fix float porque la division se hace mal en python 2.x t = float(100) / len(new_episodelist) for i, e in enumerate(scraper.sort_episode_list(new_episodelist)): if not silent: p_dialog.update(int(math.ceil((i + 1) * t)), 'Añadiendo episodio...', e.title) season_episode = "%sx%s" % (e.contentSeason, str(e.contentEpisodeNumber).zfill(2)) strm_path = filetools.join(path, "%s.strm" % season_episode) nfo_path = filetools.join(path, "%s.nfo" % season_episode) json_path = filetools.join(path, ("%s [%s].json" % (season_episode, e.channel)).lower()) strm_exists = strm_path in ficheros nfo_exists = nfo_path in ficheros json_exists = json_path in ficheros if not strm_exists: # Si no existe season_episode.strm añadirlo item_strm = Item(action='play_from_library', channel='biblioteca', strm_path=strm_path.replace(TVSHOWS_PATH, ""), infoLabels={}) item_strm.contentSeason = e.contentSeason item_strm.contentEpisodeNumber = e.contentEpisodeNumber item_strm.contentType = e.contentType item_strm.contentTitle = season_episode # FILTERTOOLS if item_strm.list_language: # si tvshow.nfo tiene filtro se le pasa al item_strm que se va a generar if "library_filter_show" in serie: item_strm.library_filter_show = serie.library_filter_show if item_strm.library_filter_show == "": logger.error("Se ha producido un error al obtener el nombre de la serie a filtrar") # logger.debug("item_strm" + item_strm.tostring('\n')) # logger.debug("serie " + serie.tostring('\n')) strm_exists = filetools.write(strm_path, '%s?%s' % (addon_name, item_strm.tourl())) item_nfo = None if not nfo_exists and e.infoLabels["code"]: # Si no existe season_episode.nfo añadirlo scraper.find_and_set_infoLabels(e) head_nfo = scraper.get_nfo(e) item_nfo = e.clone(channel="biblioteca", url="", action='findvideos', strm_path=strm_path.replace(TVSHOWS_PATH, "")) nfo_exists = filetools.write(nfo_path, head_nfo + item_nfo.tojson()) # Solo si existen season_episode.nfo y season_episode.strm continuamos if nfo_exists and strm_exists: if not json_exists or overwrite: # Obtenemos infoLabel del episodio if not item_nfo: head_nfo, item_nfo = read_nfo(nfo_path) e.infoLabels = item_nfo.infoLabels if filetools.write(json_path, e.tojson()): if not json_exists: logger.info("Insertado: %s" % json_path) insertados += 1 # Marcamos episodio como no visto news_in_playcounts[season_episode] = 0 # Marcamos la temporada como no vista news_in_playcounts["season %s" % e.contentSeason] = 0 # Marcamos la serie como no vista # logger.debug("serie " + serie.tostring('\n')) news_in_playcounts[serie.contentTitle] = 0 else: logger.info("Sobreescrito: %s" % json_path) sobreescritos += 1 else: logger.info("Fallido: %s" % json_path) fallidos += 1 else: logger.info("Fallido: %s" % json_path) fallidos += 1 if not silent and p_dialog.iscanceled(): break if not silent: p_dialog.close() if news_in_playcounts: # Si hay nuevos episodios los marcamos como no vistos en tvshow.nfo ... tvshow_path = filetools.join(path, "tvshow.nfo") try: import datetime head_nfo, tvshow_item = read_nfo(tvshow_path) tvshow_item.library_playcounts.update(news_in_playcounts) if tvshow_item.active == 30: tvshow_item.active = 1 update_last = datetime.date.today() tvshow_item.update_last = update_last.strftime('%Y-%m-%d') update_next = datetime.date.today() + datetime.timedelta(days=int(tvshow_item.active)) tvshow_item.update_next = update_next.strftime('%Y-%m-%d') filetools.write(tvshow_path, head_nfo + tvshow_item.tojson()) except: logger.error("Error al actualizar tvshow.nfo") fallidos = -1 else: # ... si ha sido correcto actualizamos la biblioteca de Kodi if config.is_xbmc() and not silent: from platformcode import xbmc_library xbmc_library.update(FOLDER_TVSHOWS, filetools.basename(path)) if fallidos == len(episodelist): fallidos = -1 logger.debug("%s [%s]: insertados= %s, sobreescritos= %s, fallidos= %s" % (serie.contentSerieName, serie.channel, insertados, sobreescritos, fallidos)) return insertados, sobreescritos, fallidos
def onClick(self, id): # Valores por defecto if id == 10006: if self.custom_button is not None: try: cb_channel = __import__('channels.%s' % self.channel, None, None, ["channels.%s" % self.channel]) except ImportError: logger.error('Imposible importar %s' % self.channel) else: self.return_value = getattr( cb_channel, self.custom_button['function'])(self.item) if self.custom_button["close"]: self.close() else: for c in self.list_controls: if c["type"] == "text": c["control"].setText(c["default"]) self.values[c["id"]] = c["default"] if c["type"] == "bool": c["control"].setSelected(c["default"]) self.values[c["id"]] = c["default"] if c["type"] == "list": c["label"].setLabel(c["lvalues"][c["default"]]) self.values[c["id"]] = c["default"] self.evaluate_conditions() self.dispose_controls(self.index, force=True) self.check_default() self.check_ok() # Boton Cancelar y [X] if id == 10003 or id == 10005: self.close() # Boton Aceptar if id == 10004: if not self.callback: for v in self.values: config.set_setting(v, self.values[v], self.channel) self.close() else: self.close() cb_channel = None try: cb_channel = __import__('channels.%s' % self.channel, None, None, ["channels.%s" % self.channel]) except ImportError: logger.error('Imposible importar %s' % self.channel) self.return_value = getattr(cb_channel, self.callback)(self.item, self.values) # Controles de ajustes, si se cambia el valor de un ajuste, cambiamos el valor guardado en el diccionario de # valores # Obtenemos el control sobre el que se ha echo click control = self.getControl(id) # Lo buscamos en el listado de controles for cont in self.list_controls: # Si el control es un "downBtn" o "upBtn" son los botones del "list" # en este caso cambiamos el valor del list if cont["type"] == "list" and (cont["downBtn"] == control or cont["upBtn"] == control): # Para bajar una posicion if cont["downBtn"] == control: index = cont["lvalues"].index(cont["label"].getLabel()) if index > 0: cont["label"].setLabel(cont["lvalues"][index - 1]) # Para subir una posicion elif cont["upBtn"] == control: index = cont["lvalues"].index(cont["label"].getLabel()) if index < len(cont["lvalues"]) - 1: cont["label"].setLabel(cont["lvalues"][index + 1]) # Guardamos el nuevo valor en el diccionario de valores self.values[cont["id"]] = cont["lvalues"].index( cont["label"].getLabel()) # Si esl control es un "bool", guardamos el nuevo valor True/False if cont["type"] == "bool" and cont["control"] == control: self.values[cont["id"]] = bool(cont["control"].isSelected()) # Si esl control es un "text", guardamos el nuevo valor if cont["type"] == "text" and cont["control"] == control: # Versiones antiguas requieren abrir el teclado manualmente if xbmcgui.ControlEdit == ControlEdit: import xbmc keyboard = xbmc.Keyboard(cont["control"].getText(), cont["control"].getLabel(), cont["control"].isPassword) keyboard.setHiddenInput(cont["control"].isPassword) keyboard.doModal() if keyboard.isConfirmed(): cont["control"].setText(keyboard.getText()) self.values[cont["id"]] = cont["control"].getText() self.evaluate_conditions() self.dispose_controls(self.index, force=True) self.check_default() self.check_ok()
def rmDir(dirName): logger.info("Deleting {0}".format(dirName)) try: shutil.rmtree(dirName, onerror=onerror) except: logger.error("Unable to delete folder {0}".format(dirName))
def processDir(path): folders = [] logger.info("Searching {0} for mediafiles to post-process ...".format(path)) sync = [o for o in os.listdir(path) if os.path.splitext(o)[1] in ['.!sync', '.bts']] # search for single files and move them into their own folder for post-processing for mediafile in [os.path.join(path, o) for o in os.listdir(path) if os.path.isfile(os.path.join(path, o))]: if len(sync) > 0: break if os.path.split(mediafile)[1] in ['Thumbs.db', 'thumbs.db']: continue try: logger.debug("Found file {0} in root directory {1}.".format(os.path.split(mediafile)[1], path)) newPath = None fileExt = os.path.splitext(mediafile)[1] try: if fileExt in core.AUDIOCONTAINER: f = beets.mediafile.MediaFile(mediafile) # get artist and album info artist = f.artist album = f.album # create new path newPath = os.path.join(path, "{0} - {1}".format(sanitizeName(artist), sanitizeName(album))) elif fileExt in core.MEDIACONTAINER: f = guessit.guessit(mediafile) # get title title = f.get('series') or f.get('title') if not title: title = os.path.splitext(os.path.basename(mediafile))[0] newPath = os.path.join(path, sanitizeName(title)) except Exception as e: logger.error("Exception parsing name for media file: {0}: {1}".format(os.path.split(mediafile)[1], e)) if not newPath: title = os.path.splitext(os.path.basename(mediafile))[0] newPath = os.path.join(path, sanitizeName(title)) try: newPath = newPath.encode(core.SYS_ENCODING) except: pass # Just fail-safe incase we already have afile with this clean-name (was actually a bug from earlier code, but let's be safe). if os.path.isfile(newPath): newPath2 = os.path.join(os.path.join(os.path.split(newPath)[0], 'new'), os.path.split(newPath)[1]) newPath = newPath2 # create new path if it does not exist if not os.path.exists(newPath): makeDir(newPath) newfile = os.path.join(newPath, sanitizeName(os.path.split(mediafile)[1])) try: newfile = newfile.encode(core.SYS_ENCODING) except: pass # link file to its new path copy_link(mediafile, newfile, link) except Exception as e: logger.error("Failed to move {0} to its own directory: {1}".format(os.path.split(mediafile)[1], e)) # removeEmptyFolders(path, removeRoot=False) if os.listdir(path): for dir in [os.path.join(path, o) for o in os.listdir(path) if os.path.isdir(os.path.join(path, o))]: sync = [o for o in os.listdir(dir) if os.path.splitext(o)[1] in ['.!sync', '.bts']] if len(sync) > 0 or len(os.listdir(dir)) == 0: continue folders.extend([dir]) return folders
def find_imdbid(dirName, inputName): imdbid = None logger.info('Attemping imdbID lookup for {0}'.format(inputName)) # find imdbid in dirName logger.info('Searching folder and file names for imdbID ...') m = re.search('(tt\d{7})', dirName + inputName) if m: imdbid = m.group(1) logger.info("Found imdbID [{0}]".format(imdbid)) return imdbid if os.path.isdir(dirName): for file in os.listdir(dirName): m = re.search('(tt\d{7})', file) if m: imdbid = m.group(1) logger.info("Found imdbID [{0}] via file name".format(imdbid)) return imdbid if 'NZBPR__DNZB_MOREINFO' in os.environ: dnzb_more_info = os.environ.get('NZBPR__DNZB_MOREINFO', '') if dnzb_more_info != '': regex = re.compile(r'^http://www.imdb.com/title/(tt[0-9]+)/$', re.IGNORECASE) m = regex.match(dnzb_more_info) if m: imdbid = m.group(1) logger.info("Found imdbID [{0}] from DNZB-MoreInfo".format(imdbid)) return imdbid logger.info('Searching IMDB for imdbID ...') guess = guessit.guessit(inputName) if guess: # Movie Title title = None if 'title' in guess: title = guess['title'] # Movie Year year = None if 'year' in guess: year = guess['year'] url = "http://www.omdbapi.com" logger.debug("Opening URL: {0}".format(url)) try: r = requests.get(url, params={'y': year, 't': title}, verify=False, timeout=(60, 300)) except requests.ConnectionError: logger.error("Unable to open URL {0}".format(url)) return results = r.json() try: imdbid = results['imdbID'] except: pass if imdbid: logger.info("Found imdbID [{0}]".format(imdbid)) return imdbid logger.warning('Unable to find a imdbID for {0}'.format(inputName)) return imdbid
def save_library_movie(item): """ guarda en la libreria de peliculas el elemento item, con los valores que contiene. @type item: item @param item: elemento que se va a guardar. @rtype insertados: int @return: el número de elementos insertados @rtype sobreescritos: int @return: el número de elementos sobreescritos @rtype fallidos: int @return: el número de elementos fallidos o -1 si ha fallado todo """ logger.info() # logger.debug(item.tostring('\n')) insertados = 0 sobreescritos = 0 fallidos = 0 path = "" # Itentamos obtener el titulo correcto: # 1. contentTitle: Este deberia ser el sitio correcto, ya que title suele contener "Añadir a la biblioteca..." # 2. fulltitle # 3. title if not item.contentTitle: # Colocamos el titulo correcto en su sitio para que scraper lo localize if item.fulltitle: item.contentTitle = item.fulltitle else: item.contentTitle = item.title # Si llegados a este punto no tenemos titulo, salimos if not item.contentTitle or not item.channel: logger.debug("NO ENCONTRADO contentTitle") return 0, 0, -1 # Salimos sin guardar scraper_return = scraper.find_and_set_infoLabels(item) # Llegados a este punto podemos tener: # scraper_return = True: Un item con infoLabels con la información actualizada de la peli # scraper_return = False: Un item sin información de la peli (se ha dado a cancelar en la ventana) # item.infoLabels['code'] == "" : No se ha encontrado el identificador de IMDB necesario para continuar, salimos if not scraper_return or not item.infoLabels['code']: # TODO de momento si no hay resultado no añadimos nada, # aunq podriamos abrir un cuadro para introducir el identificador/nombre a mano logger.debug("NO ENCONTRADO EN SCRAPER O NO TIENE code") return 0, 0, -1 _id = item.infoLabels['code'][0] # progress dialog p_dialog = platformtools.dialog_progress('mitvspain', 'Añadiendo película...') if config.get_setting("original_title_folder", "biblioteca") == 1 and item.infoLabels['originaltitle']: base_name = item.infoLabels['originaltitle'] else: base_name = item.contentTitle base_name = unicode(filetools.validate_path(base_name.replace('/', '-')), "utf8").lower().encode("utf8") subcarpetas = os.listdir(MOVIES_PATH) for c in subcarpetas: code = scrapertools.find_single_match(c, '\[(.*?)\]') if code and code in item.infoLabels['code']: path = filetools.join(MOVIES_PATH, c) _id = code break if not path: # Crear carpeta path = filetools.join(MOVIES_PATH, ("%s [%s]" % (base_name, _id)).strip()) logger.info("Creando directorio pelicula:" + path) if not filetools.mkdir(path): logger.debug("No se ha podido crear el directorio") return 0, 0, -1 nfo_path = filetools.join(path, "%s [%s].nfo" % (base_name, _id)) strm_path = filetools.join(path, "%s.strm" % base_name) json_path = filetools.join(path, ("%s [%s].json" % (base_name, item.channel.lower()))) nfo_exists = filetools.exists(nfo_path) strm_exists = filetools.exists(strm_path) json_exists = filetools.exists(json_path) if not nfo_exists: # Creamos .nfo si no existe logger.info("Creando .nfo: " + nfo_path) head_nfo = scraper.get_nfo(item) item_nfo = Item(title=item.contentTitle, channel="biblioteca", action='findvideos', library_playcounts={"%s [%s]" % (base_name, _id): 0}, infoLabels=item.infoLabels, library_urls={}) else: # Si existe .nfo, pero estamos añadiendo un nuevo canal lo abrimos head_nfo, item_nfo = read_nfo(nfo_path) if not strm_exists: # Crear base_name.strm si no existe item_strm = Item(channel='biblioteca', action='play_from_library', strm_path=strm_path.replace(MOVIES_PATH, ""), contentType='movie', contentTitle=item.contentTitle) strm_exists = filetools.write(strm_path, '%s?%s' % (addon_name, item_strm.tourl())) item_nfo.strm_path = strm_path.replace(MOVIES_PATH, "") # Solo si existen item_nfo y .strm continuamos if item_nfo and strm_exists: if json_exists: logger.info("El fichero existe. Se sobreescribe") sobreescritos += 1 else: insertados += 1 if filetools.write(json_path, item.tojson()): p_dialog.update(100, 'Añadiendo película...', item.contentTitle) item_nfo.library_urls[item.channel] = item.url if filetools.write(nfo_path, head_nfo + item_nfo.tojson()): # actualizamos la biblioteca de Kodi con la pelicula if config.is_xbmc(): from platformcode import xbmc_library xbmc_library.update(FOLDER_MOVIES, filetools.basename(path) + "/") p_dialog.close() return insertados, sobreescritos, fallidos # Si llegamos a este punto es por q algo ha fallado logger.error("No se ha podido guardar %s en la biblioteca" % item.contentTitle) p_dialog.update(100, 'Fallo al añadir...', item.contentTitle) p_dialog.close() return 0, 0, -1
def add_serie_to_library(item, channel=None): """ Guarda contenido en la libreria de series. Este contenido puede ser uno de estos dos: - La serie con todos los capitulos incluidos en la lista episodelist. - Un solo capitulo descargado previamente en local. Para añadir episodios descargados en local, el item debe tener exclusivamente: - contentSerieName (o show): Titulo de la serie - contentTitle: titulo del episodio para extraer season_and_episode ("1x01 Piloto") - title: titulo a mostrar junto al listado de enlaces -findvideos- ("Reproducir video local") - infoLabels["tmdb_id"] o infoLabels["imdb_id"] - contentType != "movie" - channel = "descargas" - url : ruta local al video @type item: item @param item: item que representa la serie a guardar @type channel: modulo @param channel: canal desde el que se guardara la serie. Por defecto se importara item.from_channel o item.channel """ logger.info("show=#" + item.show + "#") if item.channel == "descargas": itemlist = [item.clone()] else: # Esta marca es porque el item tiene algo más aparte en el atributo "extra" item.action = item.extra if isinstance(item.extra, str) and "###" in item.extra: item.action = item.extra.split("###")[0] item.extra = item.extra.split("###")[1] if item.from_action: item.__dict__["action"] = item.__dict__.pop("from_action") if item.from_channel: item.__dict__["channel"] = item.__dict__.pop("from_channel") if not channel: try: channel = __import__('channels.%s' % item.channel, fromlist=["channels.%s" % item.channel]) except ImportError: exec "import channels." + item.channel + " as channel" # Obtiene el listado de episodios itemlist = getattr(channel, item.action)(item) insertados, sobreescritos, fallidos, path = save_library_tvshow(item, itemlist) if not insertados and not sobreescritos and not fallidos: platformtools.dialog_ok("Biblioteca", "ERROR, la serie NO se ha añadido a la biblioteca", "No se ha podido obtener ningun episodio") logger.error("La serie %s no se ha podido añadir a la biblioteca. No se ha podido obtener ningun episodio" % item.show) elif fallidos == -1: platformtools.dialog_ok("Biblioteca", "ERROR, la serie NO se ha añadido a la biblioteca") logger.error("La serie %s no se ha podido añadir a la biblioteca" % item.show) elif fallidos > 0: platformtools.dialog_ok("Biblioteca", "ERROR, la serie NO se ha añadido completa a la biblioteca") logger.error("No se han podido añadir %s episodios de la serie %s a la biblioteca" % (fallidos, item.show)) else: platformtools.dialog_ok("Biblioteca", "La serie se ha añadido a la biblioteca") logger.info("Se han añadido %s episodios de la serie %s a la biblioteca" % (insertados, item.show)) if config.is_xbmc(): if config.get_setting("sync_trakt_new_tvshow", "biblioteca"): import xbmc from platformcode import xbmc_library if config.get_setting("sync_trakt_new_tvshow_wait", "biblioteca"): # Comprobar que no se esta buscando contenido en la biblioteca de Kodi while xbmc.getCondVisibility('Library.IsScanningVideo()'): xbmc.sleep(1000) # Se lanza la sincronizacion para la biblioteca de Kodi xbmc_library.sync_trakt_kodi() # Se lanza la sincronización para la biblioteca de mitvspain xbmc_library.sync_trakt_mitvspain(path)
import xbmcgui xbmcgui.Dialog().ok("Se ha producido un error", e.value) except InvalidAuthException, e: import xbmcgui xbmcgui.Dialog().ok("Se ha producido un error", e.value) # Invalidate current session config.set_setting("account_session", "") # Goes to main menu run(Item(action="selectchannel")) except urllib2.URLError, e: import traceback logger.error(traceback.format_exc()) import xbmcgui ventana_error = xbmcgui.Dialog() # Agarra los errores surgidos localmente enviados por las librerias internas if hasattr(e, 'reason'): logger.info("Razon del error, codigo: %d , Razon: %s" % (e.reason[0], e.reason[1])) texto = config.get_localized_string( 30050) # "No se puede conectar con el sitio web" ok = ventana_error.ok("plugin", texto) # Agarra los errores con codigo de respuesta del servidor externo solicitado elif hasattr(e, 'code'): logger.info("codigo de error HTTP : %d" % e.code) texto = ( config.get_localized_string(30051) % e.code
def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, clientAgent): status = 1 # 1 = failed | 0 = success root = 0 foundFile = 0 if clientAgent != 'manual' and not core.DOWNLOADINFO: logger.debug('Adding TORRENT download info for directory {0} to database'.format(inputDirectory)) myDB = nzbToMediaDB.DBConnection() inputDirectory1 = inputDirectory inputName1 = inputName try: encoded, inputDirectory1 = CharReplace(inputDirectory) encoded, inputName1 = CharReplace(inputName) except: pass controlValueDict = {"input_directory": text_type(inputDirectory1)} newValueDict = {"input_name": text_type(inputName1), "input_hash": text_type(inputHash), "input_id": text_type(inputID), "client_agent": text_type(clientAgent), "status": 0, "last_update": datetime.date.today().toordinal() } myDB.upsert("downloads", newValueDict, controlValueDict) logger.debug("Received Directory: {0} | Name: {1} | Category: {2}".format(inputDirectory, inputName, inputCategory)) # Confirm the category by parsing directory structure inputDirectory, inputName, inputCategory, root = core.category_search(inputDirectory, inputName, inputCategory, root, core.CATEGORIES) if inputCategory == "": inputCategory = "UNCAT" usercat = inputCategory try: inputName = inputName.encode(core.SYS_ENCODING) except UnicodeError: pass try: inputDirectory = inputDirectory.encode(core.SYS_ENCODING) except UnicodeError: pass logger.debug("Determined Directory: {0} | Name: {1} | Category: {2}".format (inputDirectory, inputName, inputCategory)) # auto-detect section section = core.CFG.findsection(inputCategory).isenabled() if section is None: section = core.CFG.findsection("ALL").isenabled() if section is None: logger.error('Category:[{0}] is not defined or is not enabled. ' 'Please rename it or ensure it is enabled for the appropriate section ' 'in your autoProcessMedia.cfg and try again.'.format (inputCategory)) return [-1, ""] else: usercat = "ALL" if len(section) > 1: logger.error('Category:[{0}] is not unique, {1} are using it. ' 'Please rename it or disable all other sections using the same category name ' 'in your autoProcessMedia.cfg and try again.'.format (usercat, section.keys())) return [-1, ""] if section: sectionName = section.keys()[0] logger.info('Auto-detected SECTION:{0}'.format(sectionName)) else: logger.error("Unable to locate a section with subsection:{0} " "enabled in your autoProcessMedia.cfg, exiting!".format (inputCategory)) return [-1, ""] section = dict(section[sectionName][usercat]) # Type cast to dict() to allow effective usage of .get() Torrent_NoLink = int(section.get("Torrent_NoLink", 0)) keep_archive = int(section.get("keep_archive", 0)) extract = int(section.get('extract', 0)) uniquePath = int(section.get("unique_path", 1)) if clientAgent != 'manual': core.pause_torrent(clientAgent, inputHash, inputID, inputName) # In case input is not directory, make sure to create one. # This way Processing is isolated. if not os.path.isdir(os.path.join(inputDirectory, inputName)): basename = os.path.basename(inputDirectory) basename = core.sanitizeName(inputName) \ if inputName == basename else os.path.splitext(core.sanitizeName(inputName))[0] outputDestination = os.path.join(core.OUTPUTDIRECTORY, inputCategory, basename) elif uniquePath: outputDestination = os.path.normpath( core.os.path.join(core.OUTPUTDIRECTORY, inputCategory, core.sanitizeName(inputName))) else: outputDestination = os.path.normpath( core.os.path.join(core.OUTPUTDIRECTORY, inputCategory)) try: outputDestination = outputDestination.encode(core.SYS_ENCODING) except UnicodeError: pass if outputDestination in inputDirectory: outputDestination = inputDirectory logger.info("Output directory set to: {0}".format(outputDestination)) if core.SAFE_MODE and outputDestination == core.TORRENT_DEFAULTDIR: logger.error('The output directory:[{0}] is the Download Directory. ' 'Edit outputDirectory in autoProcessMedia.cfg. Exiting'.format (inputDirectory)) return [-1, ""] logger.debug("Scanning files in directory: {0}".format(inputDirectory)) if sectionName == 'HeadPhones': core.NOFLATTEN.extend( inputCategory) # Make sure we preserve folder structure for HeadPhones. now = datetime.datetime.now() if extract == 1: inputFiles = core.listMediaFiles(inputDirectory, archives=False) else: inputFiles = core.listMediaFiles(inputDirectory) logger.debug("Found {0} files in {1}".format(len(inputFiles), inputDirectory)) for inputFile in inputFiles: filePath = os.path.dirname(inputFile) fileName, fileExt = os.path.splitext(os.path.basename(inputFile)) fullFileName = os.path.basename(inputFile) targetFile = core.os.path.join(outputDestination, fullFileName) if inputCategory in core.NOFLATTEN: if not os.path.basename(filePath) in outputDestination: targetFile = core.os.path.join( core.os.path.join(outputDestination, os.path.basename(filePath)), fullFileName) logger.debug("Setting outputDestination to {0} to preserve folder structure".format (os.path.dirname(targetFile))) try: targetFile = targetFile.encode(core.SYS_ENCODING) except UnicodeError: pass if root == 1: if not foundFile: logger.debug("Looking for {0} in: {1}".format(inputName, inputFile)) if any([core.sanitizeName(inputName) in core.sanitizeName(inputFile), core.sanitizeName(fileName) in core.sanitizeName(inputName)]): foundFile = True logger.debug("Found file {0} that matches Torrent Name {1}".format (fullFileName, inputName)) else: continue if root == 2: mtime_lapse = now - datetime.datetime.fromtimestamp(os.path.getmtime(inputFile)) ctime_lapse = now - datetime.datetime.fromtimestamp(os.path.getctime(inputFile)) if not foundFile: logger.debug("Looking for files with modified/created dates less than 5 minutes old.") if (mtime_lapse < datetime.timedelta(minutes=5)) or (ctime_lapse < datetime.timedelta(minutes=5)): foundFile = True logger.debug("Found file {0} with date modified/created less than 5 minutes ago.".format (fullFileName)) else: continue # This file has not been recently moved or created, skip it if Torrent_NoLink == 0: try: core.copy_link(inputFile, targetFile, core.USELINK) core.rmReadOnly(targetFile) except: logger.error("Failed to link: {0} to {1}".format(inputFile, targetFile)) inputName, outputDestination = convert_to_ascii(inputName, outputDestination) if extract == 1: logger.debug('Checking for archives to extract in directory: {0}'.format(inputDirectory)) core.extractFiles(inputDirectory, outputDestination, keep_archive) if inputCategory not in core.NOFLATTEN: # don't flatten hp in case multi cd albums, and we need to copy this back later. core.flatten(outputDestination) # Now check if video files exist in destination: if sectionName in ["SickBeard", "NzbDrone", "CouchPotato"]: numVideos = len( core.listMediaFiles(outputDestination, media=True, audio=False, meta=False, archives=False)) if numVideos > 0: logger.info("Found {0} media files in {1}".format(numVideos, outputDestination)) status = 0 elif extract != 1: logger.info("Found no media files in {0}. Sending to {1} to process".format(outputDestination, sectionName)) status = 0 else: logger.warning("Found no media files in {0}".format(outputDestination)) # Only these sections can handling failed downloads # so make sure everything else gets through without the check for failed if sectionName not in ['CouchPotato', 'SickBeard', 'NzbDrone']: status = 0 logger.info("Calling {0}:{1} to post-process:{2}".format(sectionName, usercat, inputName)) if core.TORRENT_CHMOD_DIRECTORY: core.rchmod(outputDestination, core.TORRENT_CHMOD_DIRECTORY) result = [0, ""] if sectionName == 'UserScript': result = external_script(outputDestination, inputName, inputCategory, section) elif sectionName == 'CouchPotato': result = core.autoProcessMovie().process(sectionName, outputDestination, inputName, status, clientAgent, inputHash, inputCategory) elif sectionName in ['SickBeard', 'NzbDrone']: if inputHash: inputHash = inputHash.upper() result = core.autoProcessTV().processEpisode(sectionName, outputDestination, inputName, status, clientAgent, inputHash, inputCategory) elif sectionName == 'HeadPhones': result = core.autoProcessMusic().process(sectionName, outputDestination, inputName, status, clientAgent, inputCategory) elif sectionName == 'Mylar': result = core.autoProcessComics().processEpisode(sectionName, outputDestination, inputName, status, clientAgent, inputCategory) elif sectionName == 'Gamez': result = core.autoProcessGames().process(sectionName, outputDestination, inputName, status, clientAgent, inputCategory) plex_update(inputCategory) if result[0] != 0: if not core.TORRENT_RESUME_ON_FAILURE: logger.error("A problem was reported in the autoProcess* script. " "Torrent won't resume seeding (settings)") elif clientAgent != 'manual': logger.error("A problem was reported in the autoProcess* script. " "If torrent was paused we will resume seeding") core.resume_torrent(clientAgent, inputHash, inputID, inputName) else: if clientAgent != 'manual': # update download status in our DB core.update_downloadInfoStatus(inputName, 1) # remove torrent if core.USELINK == 'move-sym' and not core.DELETE_ORIGINAL == 1: logger.debug('Checking for sym-links to re-direct in: {0}'.format(inputDirectory)) for dirpath, dirs, files in os.walk(inputDirectory): for file in files: logger.debug('Checking symlink: {0}'.format(os.path.join(dirpath, file))) replace_links(os.path.join(dirpath, file)) core.remove_torrent(clientAgent, inputHash, inputID, inputName) if not sectionName == 'UserScript': # for user script, we assume this is cleaned by the script or option USER_SCRIPT_CLEAN # cleanup our processing folders of any misc unwanted files and empty directories core.cleanDir(outputDestination, sectionName, inputCategory) return result
def run(item=None): logger.info("tvalacarta.platformcode.launcher run") if item is None: # Extract item from sys.argv if sys.argv[2]: item = Item().fromurl(sys.argv[2]) params = "" # If no item, this is mainlist else: item = Item(action="selectchannel") params = "" logger.info(item.tostring()) # If item has no action, stops here if item.action == "": logger.info("Item sin accion") return try: # Action for main menu in channelselector if item.action == "selectchannel": import channelselector import plugintools # Check for updates only on first screen if config.get_setting("check_for_plugin_updates") == "true": logger.info( "tvalacarta.platformcode.launcher Check for plugin updates enabled" ) from core import updater try: config.set_setting("plugin_updates_available", "0") new_published_version_tag, number_of_updates = updater.get_available_updates( ) config.set_setting("plugin_updates_available", str(number_of_updates)) # TODO: Que channelselector devuelva items, procesados por el mismo add_items_to_kodi_directory que el resto itemlist = channelselector.mainlist( params, item.url, item.category) if new_published_version_tag != "": plugintools.show_notification( new_published_version_tag + " disponible", "Ya puedes descargar la nueva versión del plugin\n" "desde el menú Configuración") except: import traceback logger.error(traceback.format_exc()) plugintools.message("No se puede conectar", "No ha sido posible comprobar", "si hay actualizaciones") logger.error("Fallo al verificar la actualización") config.set_setting("plugin_updates_available", "0") itemlist = channelselector.mainlist( params, item.url, item.category) else: logger.info("Check for plugin updates disabled") config.set_setting("plugin_updates_available", "0") itemlist = channelselector.mainlist(params, item.url, item.category) #xbmctools.add_items_to_kodi_directory(itemlist, item) # Action for updating plugin elif item.action == "update": from core import updater updater.update(item) if config.get_system_platform() != "xbox": import xbmc xbmc.executebuiltin("Container.Refresh") # Action for channel types on channelselector: movies, series, etc. elif item.action == "channeltypes": import channelselector # TODO: Que channelselector devuelva items, procesados por el mismo add_items_to_kodi_directory que el resto itemlist = channelselector.channeltypes(params, item.url, item.category) #xbmctools.add_items_to_kodi_directory(itemlist, item) # Action for channel listing on channelselector elif item.action == "listchannels": import channelselector # TODO: Que channelselector devuelva items, procesados por el mismo add_items_to_kodi_directory que el resto itemlist = channelselector.listchannels(params, item.url, item.category) #xbmctools.add_items_to_kodi_directory(itemlist, item) elif item.action == "player_directo": from core import window_player_background from channels import directos import plugintools window = window_player_background.PlayerWindowBackground( "player_background.xml", plugintools.get_runtime_path()) window.setItemlist(directos.build_channel_list()) window.setCurrentPosition(item.position) window.doModal() del window return # Action in certain channel specified in "action" and "channel" parameters else: # Checks if channel exists channel_file = os.path.join(config.get_runtime_path(), 'channels', item.channel + ".py") logger.info("tvalacarta.platformcode.launcher channel_file=%s" % channel_file) channel = __import__('channels.%s' % item.channel, fromlist=["channels.%s" % item.channel]) logger.info( "tvalacarta.platformcode.launcher running channel {0} {1}". format(channel.__name__, channel.__file__)) # Special play action if item.action == "play": logger.info("tvalacarta.platformcode.launcher play") # First checks if channel has a "play" function if hasattr(channel, 'play'): logger.info( "tvalacarta.platformcode.launcher executing channel 'play' method" ) itemlist = channel.play(item) if len(itemlist) > 0: item = itemlist[0] xbmctools.play_video(item) else: import xbmcgui ventana_error = xbmcgui.Dialog() ok = ventana_error.ok("plugin", "No hay nada para reproducir") else: logger.info( "tvalacarta.platformcode.launcher executing core 'play' method" ) xbmctools.play_video(item) elif item.action.startswith("serie_options##"): from core import suscription import xbmcgui dia = xbmcgui.Dialog() opciones = [] suscription_item = Item(channel=item.channel, title=item.show, url=item.url, action=item.action.split("##")[1], extra=item.extra, plot=item.plot, show=item.show, thumbnail=item.thumbnail) if not suscription.already_suscribed(suscription_item): opciones.append("Activar descarga automática") else: opciones.append("Cancelar descarga automática") #opciones.append("Añadir esta serie a favoritos") opciones.append("Descargar todos los episodios") seleccion = dia.select("Elige una opción", opciones) # "Elige una opción" if seleccion == 0: if not suscription.already_suscribed(suscription_item): suscription.append_suscription(suscription_item) yes_pressed = xbmcgui.Dialog().yesno( "Descarga automática activada", "A partir de ahora los nuevos vídeos que se publiquen de este programa se descargarán automáticamente, podrás encontrarlos en la sección 'Descargas'." ) if yes_pressed: download_all_episodes(suscription_item, channel) else: suscription.remove_suscription(suscription_item) xbmcgui.Dialog().ok( "Descarga automática cancelada", "Los vídeos que hayas descargado se mantienen, pero los nuevos ya no se descargarán ellos solos." ) elif seleccion == 1: downloadtools.download_all_episodes(item, channel) ''' elif seleccion==1: from core import favoritos from core import downloadtools import xbmc keyboard = xbmc.Keyboard(downloadtools.limpia_nombre_excepto_1(item.show)+" ["+item.channel+"]") keyboard.doModal() if keyboard.isConfirmed(): title = keyboard.getText() favoritos.savebookmark(titulo=title,url=item.url,thumbnail=item.thumbnail,server="",plot=item.plot,fulltitle=title) advertencia = xbmcgui.Dialog() resultado = advertencia.ok(config.get_localized_string(30102) , title , config.get_localized_string(30108)) # 'se ha añadido a favoritos' return ''' elif item.action == "search": logger.info("tvalacarta.platformcode.launcher search") import xbmc keyboard = xbmc.Keyboard("") keyboard.doModal() itemlist = [] if (keyboard.isConfirmed()): tecleado = keyboard.getText() #tecleado = tecleado.replace(" ", "+") itemlist = channel.search(item, tecleado) if itemlist is None: itemlist = [] xbmctools.add_items_to_kodi_directory(itemlist, item) else: logger.info( "tvalacarta.platformcode.launcher executing channel '" + item.action + "' method") exec "itemlist = channel." + item.action + "(item)" if itemlist is None: itemlist = [] # Activa el modo biblioteca para todos los canales genéricos, para que se vea el argumento handle = sys.argv[1] xbmcplugin.setContent(int(handle), "movies") # Añade los items a la lista de XBMC xbmctools.add_items_to_kodi_directory(itemlist, item) except UserException, e: import xbmcgui xbmcgui.Dialog().ok("Se ha producido un error", e.value)
def initialize(section=None): global NZBGET_POSTPROCESS_ERROR, NZBGET_POSTPROCESS_NONE, NZBGET_POSTPROCESS_PARCHECK, NZBGET_POSTPROCESS_SUCCESS, \ NZBTOMEDIA_TIMEOUT, FORKS, FORK_DEFAULT, FORK_FAILED_TORRENT, FORK_FAILED, \ NZBTOMEDIA_BRANCH, NZBTOMEDIA_VERSION, NEWEST_VERSION, NEWEST_VERSION_STRING, VERSION_NOTIFY, SYS_ARGV, CFG, \ SABNZB_NO_OF_ARGUMENTS, SABNZB_0717_NO_OF_ARGUMENTS, CATEGORIES, TORRENT_CLIENTAGENT, USELINK, OUTPUTDIRECTORY, \ NOFLATTEN, UTORRENTPWD, UTORRENTUSR, UTORRENTWEBUI, DELUGEHOST, DELUGEPORT, DELUGEUSR, DELUGEPWD, VLEVEL, \ TRANSMISSIONHOST, TRANSMISSIONPORT, TRANSMISSIONPWD, TRANSMISSIONUSR, COMPRESSEDCONTAINER, MEDIACONTAINER, \ METACONTAINER, SECTIONS, ALL_FORKS, TEST_FILE, GENERALOPTS, LOG_GIT, GROUPS, SEVENZIP, CONCAT, VCRF, \ __INITIALIZED__, AUTO_UPDATE, APP_FILENAME, USER_DELAY, APP_NAME, TRANSCODE, DEFAULTS, GIT_PATH, GIT_USER, \ GIT_BRANCH, GIT_REPO, SYS_ENCODING, NZB_CLIENTAGENT, SABNZBDHOST, SABNZBDPORT, SABNZBDAPIKEY, \ DUPLICATE, IGNOREEXTENSIONS, VEXTENSION, OUTPUTVIDEOPATH, PROCESSOUTPUT, VCODEC, VCODEC_ALLOW, VPRESET, \ VFRAMERATE, LOG_DB, VBITRATE, VRESOLUTION, ALANGUAGE, AINCLUDE, ACODEC, ACODEC_ALLOW, ABITRATE, FAILED, \ ACODEC2, ACODEC2_ALLOW, ABITRATE2, ACODEC3, ACODEC3_ALLOW, ABITRATE3, ALLOWSUBS, SEXTRACT, SEMBED, SLANGUAGES, \ SINCLUDE, SUBSDIR, SCODEC, OUTPUTFASTSTART, OUTPUTQUALITYPERCENT, BURN, GETSUBS, HWACCEL, LOG_DIR, LOG_FILE, \ NICENESS, LOG_DEBUG, FORCE_CLEAN, FFMPEG_PATH, FFMPEG, FFPROBE, AUDIOCONTAINER, EXTCONTAINER, TORRENT_CLASS, \ DELETE_ORIGINAL, TORRENT_CHMOD_DIRECTORY, PASSWORDSFILE, USER_DELAY, USER_SCRIPT, USER_SCRIPT_CLEAN, USER_SCRIPT_MEDIAEXTENSIONS, \ USER_SCRIPT_PARAM, USER_SCRIPT_RUNONCE, USER_SCRIPT_SUCCESSCODES, DOWNLOADINFO, CHECK_MEDIA, SAFE_MODE, \ TORRENT_DEFAULTDIR, TORRENT_RESUME_ON_FAILURE, NZB_DEFAULTDIR, REMOTEPATHS, LOG_ENV, PID_FILE, MYAPP, ACHANNELS, ACHANNELS2, ACHANNELS3, \ PLEXSSL, PLEXHOST, PLEXPORT, PLEXTOKEN, PLEXSEC, TORRENT_RESUME if __INITIALIZED__: return False if os.environ.has_key('NTM_LOGFILE'): LOG_FILE = os.environ['NTM_LOGFILE'] LOG_DIR = os.path.split(LOG_FILE)[0] if not makeDir(LOG_DIR): print("No log folder, logging to screen only") MYAPP = RunningProcess() while MYAPP.alreadyrunning(): print("Waiting for existing session to end") time.sleep(30) try: locale.setlocale(locale.LC_ALL, "") SYS_ENCODING = locale.getpreferredencoding() except (locale.Error, IOError): pass # For OSes that are poorly configured I'll just randomly force UTF-8 if not SYS_ENCODING or SYS_ENCODING in ('ANSI_X3.4-1968', 'US-ASCII', 'ASCII'): SYS_ENCODING = 'UTF-8' if not hasattr(sys, "setdefaultencoding"): reload(sys) try: # pylint: disable=E1101 # On non-unicode builds this will raise an AttributeError, if encoding type is not valid it throws a LookupError sys.setdefaultencoding(SYS_ENCODING) except: print 'Sorry, you MUST add the nzbToMedia folder to the PYTHONPATH environment variable' print 'or find another way to force Python to use ' + SYS_ENCODING + ' for string encoding.' if os.environ.has_key('NZBOP_SCRIPTDIR'): sys.exit(NZBGET_POSTPROCESS_ERROR) else: sys.exit(1) # init logging logger.ntm_log_instance.initLogging() # run migrate to convert old cfg to new style cfg plus fix any cfg missing values/options. if not config.migrate(): logger.error("Unable to migrate config file %s, exiting ..." % (CONFIG_FILE)) if os.environ.has_key('NZBOP_SCRIPTDIR'): pass # We will try and read config from Environment. else: sys.exit(-1) # run migrate to convert NzbGet data from old cfg style to new cfg style if os.environ.has_key('NZBOP_SCRIPTDIR'): CFG = config.addnzbget() else: # load newly migrated config logger.info("Loading config from [%s]" % (CONFIG_FILE)) CFG = config() # Enable/Disable DEBUG Logging LOG_DEBUG = int(CFG['General']['log_debug']) LOG_DB = int(CFG['General']['log_db']) LOG_ENV = int(CFG['General']['log_env']) LOG_GIT = int(CFG['General']['log_git']) if LOG_ENV: for item in os.environ: logger.info("%s: %s" % (item, os.environ[item]), "ENVIRONMENT") # initialize the main SB database nzbToMediaDB.upgradeDatabase(nzbToMediaDB.DBConnection(), mainDB.InitialSchema) # Set Version and GIT variables NZBTOMEDIA_VERSION = '10.15' VERSION_NOTIFY = int(CFG['General']['version_notify']) AUTO_UPDATE = int(CFG['General']['auto_update']) GIT_REPO = 'nzbToMedia' GIT_PATH = CFG['General']['git_path'] GIT_USER = CFG['General']['git_user'] or 'clinton-hall' GIT_BRANCH = CFG['General']['git_branch'] or 'master' FORCE_CLEAN = int(CFG["General"]["force_clean"]) FFMPEG_PATH = CFG["General"]["ffmpeg_path"] CHECK_MEDIA = int(CFG["General"]["check_media"]) SAFE_MODE = int(CFG["General"]["safe_mode"]) # Check for updates via GitHUB if versionCheck.CheckVersion().check_for_new_version(): if AUTO_UPDATE == 1: logger.info("Auto-Updating nzbToMedia, Please wait ...") updated = versionCheck.CheckVersion().update() if updated: # restart nzbToMedia try: del MYAPP except: pass restart() else: logger.error("Update wasn't successful, not restarting. Check your log for more information.") # Set Current Version logger.info( 'nzbToMedia Version:' + NZBTOMEDIA_VERSION + ' Branch:' + GIT_BRANCH + ' (' + platform.system() + ' ' + platform.release() + ')') if int(CFG["WakeOnLan"]["wake"]) == 1: WakeUp() NZB_CLIENTAGENT = CFG["Nzb"]["clientAgent"] # sabnzbd SABNZBDHOST = CFG["Nzb"]["sabnzbd_host"] SABNZBDPORT = int(CFG["Nzb"]["sabnzbd_port"]) SABNZBDAPIKEY = CFG["Nzb"]["sabnzbd_apikey"] NZB_DEFAULTDIR = CFG["Nzb"]["default_downloadDirectory"] GROUPS = CFG["Custom"]["remove_group"] if isinstance(GROUPS, str): GROUPS = GROUPS.split(',') if GROUPS == ['']: GROUPS = None TORRENT_CLIENTAGENT = CFG["Torrent"]["clientAgent"] # utorrent | deluge | transmission | rtorrent | vuze |other USELINK = CFG["Torrent"]["useLink"] # no | hard | sym OUTPUTDIRECTORY = CFG["Torrent"]["outputDirectory"] # /abs/path/to/complete/ TORRENT_DEFAULTDIR = CFG["Torrent"]["default_downloadDirectory"] CATEGORIES = (CFG["Torrent"]["categories"]) # music,music_videos,pictures,software NOFLATTEN = (CFG["Torrent"]["noFlatten"]) if isinstance(NOFLATTEN, str): NOFLATTEN = NOFLATTEN.split(',') if isinstance(CATEGORIES, str): CATEGORIES = CATEGORIES.split(',') DELETE_ORIGINAL = int(CFG["Torrent"]["deleteOriginal"]) TORRENT_CHMOD_DIRECTORY = int(str(CFG["Torrent"]["chmodDirectory"]), 8) TORRENT_RESUME_ON_FAILURE = int(CFG["Torrent"]["resumeOnFailure"]) TORRENT_RESUME = int(CFG["Torrent"]["resume"]) UTORRENTWEBUI = CFG["Torrent"]["uTorrentWEBui"] # http://localhost:8090/gui/ UTORRENTUSR = CFG["Torrent"]["uTorrentUSR"] # mysecretusr UTORRENTPWD = CFG["Torrent"]["uTorrentPWD"] # mysecretpwr TRANSMISSIONHOST = CFG["Torrent"]["TransmissionHost"] # localhost TRANSMISSIONPORT = int(CFG["Torrent"]["TransmissionPort"]) TRANSMISSIONUSR = CFG["Torrent"]["TransmissionUSR"] # mysecretusr TRANSMISSIONPWD = CFG["Torrent"]["TransmissionPWD"] # mysecretpwr DELUGEHOST = CFG["Torrent"]["DelugeHost"] # localhost DELUGEPORT = int(CFG["Torrent"]["DelugePort"]) # 8084 DELUGEUSR = CFG["Torrent"]["DelugeUSR"] # mysecretusr DELUGEPWD = CFG["Torrent"]["DelugePWD"] # mysecretpwr REMOTEPATHS = CFG["Network"]["mount_points"] or [] if REMOTEPATHS: if isinstance(REMOTEPATHS, list): REMOTEPATHS = ','.join(REMOTEPATHS) # fix in case this imported as list. REMOTEPATHS = [ tuple(item.split(',')) for item in REMOTEPATHS.split('|') ] # /volume1/Public/,E:\|/volume2/share/,\\NAS\ REMOTEPATHS = [ (local.strip(), remote.strip()) for local, remote in REMOTEPATHS ] # strip trailing and leading whitespaces PLEXSSL = int(CFG["Plex"]["plex_ssl"]) PLEXHOST = CFG["Plex"]["plex_host"] PLEXPORT = CFG["Plex"]["plex_port"] PLEXTOKEN = CFG["Plex"]["plex_token"] PLEXSEC = CFG["Plex"]["plex_sections"] or [] if PLEXSEC: if isinstance(PLEXSEC, list): PLEXSEC = ','.join(PLEXSEC) # fix in case this imported as list. PLEXSEC = [ tuple(item.split(',')) for item in PLEXSEC.split('|') ] devnull = open(os.devnull, 'w') try: subprocess.Popen(["nice"], stdout=devnull, stderr=devnull).communicate() NICENESS.extend(['nice', '-n%s' % (int(CFG["Posix"]["niceness"]))]) except: pass try: subprocess.Popen(["ionice"], stdout=devnull, stderr=devnull).communicate() try: NICENESS.extend(['ionice', '-c%s' % (int(CFG["Posix"]["ionice_class"]))]) except: pass try: if 'ionice' in NICENESS: NICENESS.extend(['-n%s' % (int(CFG["Posix"]["ionice_classdata"]))]) else: NICENESS.extend(['ionice', '-n%s' % (int(CFG["Posix"]["ionice_classdata"]))]) except: pass except: pass devnull.close() COMPRESSEDCONTAINER = [re.compile('.r\d{2}$', re.I), re.compile('.part\d+.rar$', re.I), re.compile('.rar$', re.I)] COMPRESSEDCONTAINER += [re.compile('%s$' % ext, re.I) for ext in CFG["Extensions"]["compressedExtensions"]] MEDIACONTAINER = CFG["Extensions"]["mediaExtensions"] AUDIOCONTAINER = CFG["Extensions"]["audioExtensions"] METACONTAINER = CFG["Extensions"]["metaExtensions"] # .nfo,.sub,.srt if isinstance(COMPRESSEDCONTAINER, str): COMPRESSEDCONTAINER = COMPRESSEDCONTAINER.split(',') if isinstance(MEDIACONTAINER, str): MEDIACONTAINER = MEDIACONTAINER.split(',') if isinstance(AUDIOCONTAINER, str): AUDIOCONTAINER = AUDIOCONTAINER.split(',') if isinstance(METACONTAINER, str): METACONTAINER = METACONTAINER.split(',') GETSUBS = int(CFG["Transcoder"]["getSubs"]) TRANSCODE = int(CFG["Transcoder"]["transcode"]) DUPLICATE = int(CFG["Transcoder"]["duplicate"]) CONCAT = int(CFG["Transcoder"]["concat"]) IGNOREEXTENSIONS = (CFG["Transcoder"]["ignoreExtensions"]) if isinstance(IGNOREEXTENSIONS, str): IGNOREEXTENSIONS = IGNOREEXTENSIONS.split(',') OUTPUTFASTSTART = int(CFG["Transcoder"]["outputFastStart"]) GENERALOPTS = (CFG["Transcoder"]["generalOptions"]) if isinstance(GENERALOPTS, str): GENERALOPTS = GENERALOPTS.split(',') if GENERALOPTS == ['']: GENERALOPTS = [] if not '-fflags' in GENERALOPTS: GENERALOPTS.append('-fflags') if not '+genpts' in GENERALOPTS: GENERALOPTS.append('+genpts') try: OUTPUTQUALITYPERCENT = int(CFG["Transcoder"]["outputQualityPercent"]) except: pass OUTPUTVIDEOPATH = CFG["Transcoder"]["outputVideoPath"] PROCESSOUTPUT = int(CFG["Transcoder"]["processOutput"]) ALANGUAGE = CFG["Transcoder"]["audioLanguage"] AINCLUDE = int(CFG["Transcoder"]["allAudioLanguages"]) SLANGUAGES = CFG["Transcoder"]["subLanguages"] if isinstance(SLANGUAGES, str): SLANGUAGES = SLANGUAGES.split(',') if SLANGUAGES == ['']: SLANGUAGES = [] SINCLUDE = int(CFG["Transcoder"]["allSubLanguages"]) SEXTRACT = int(CFG["Transcoder"]["extractSubs"]) SEMBED = int(CFG["Transcoder"]["embedSubs"]) SUBSDIR = CFG["Transcoder"]["externalSubDir"] VEXTENSION = CFG["Transcoder"]["outputVideoExtension"].strip() VCODEC = CFG["Transcoder"]["outputVideoCodec"].strip() VCODEC_ALLOW = CFG["Transcoder"]["VideoCodecAllow"].strip() if isinstance(VCODEC_ALLOW, str): VCODEC_ALLOW = VCODEC_ALLOW.split(',') if VCODEC_ALLOW == ['']: VCODEC_ALLOW = [] VPRESET = CFG["Transcoder"]["outputVideoPreset"].strip() try: VFRAMERATE = float(CFG["Transcoder"]["outputVideoFramerate"].strip()) except: pass try: VCRF = int(CFG["Transcoder"]["outputVideoCRF"].strip()) except: pass try: VLEVEL = CFG["Transcoder"]["outputVideoLevel"].strip() except: pass try: VBITRATE = int((CFG["Transcoder"]["outputVideoBitrate"].strip()).replace('k','000')) except: pass VRESOLUTION = CFG["Transcoder"]["outputVideoResolution"] ACODEC = CFG["Transcoder"]["outputAudioCodec"].strip() ACODEC_ALLOW = CFG["Transcoder"]["AudioCodecAllow"].strip() if isinstance(ACODEC_ALLOW, str): ACODEC_ALLOW = ACODEC_ALLOW.split(',') if ACODEC_ALLOW == ['']: ACODEC_ALLOW = [] try: ACHANNELS = int(CFG["Transcoder"]["outputAudioChannels"].strip()) except: pass try: ABITRATE = int((CFG["Transcoder"]["outputAudioBitrate"].strip()).replace('k','000')) except: pass ACODEC2 = CFG["Transcoder"]["outputAudioTrack2Codec"].strip() ACODEC2_ALLOW = CFG["Transcoder"]["AudioCodec2Allow"].strip() if isinstance(ACODEC2_ALLOW, str): ACODEC2_ALLOW = ACODEC2_ALLOW.split(',') if ACODEC2_ALLOW == ['']: ACODEC2_ALLOW = [] try: ACHANNELS2 = int(CFG["Transcoder"]["outputAudioTrack2Channels"].strip()) except: pass try: ABITRATE2 = int((CFG["Transcoder"]["outputAudioTrack2Bitrate"].strip()).replace('k','000')) except: pass ACODEC3 = CFG["Transcoder"]["outputAudioOtherCodec"].strip() ACODEC3_ALLOW = CFG["Transcoder"]["AudioOtherCodecAllow"].strip() if isinstance(ACODEC3_ALLOW, str): ACODEC3_ALLOW = ACODEC3_ALLOW.split(',') if ACODEC3_ALLOW == ['']: ACODEC3_ALLOW = [] try: ACHANNELS3 = int(CFG["Transcoder"]["outputAudioOtherChannels"].strip()) except: pass try: ABITRATE3 = int((CFG["Transcoder"]["outputAudioOtherBitrate"].strip()).replace('k','000')) except: pass SCODEC = CFG["Transcoder"]["outputSubtitleCodec"].strip() BURN = int(CFG["Transcoder"]["burnInSubtitle"].strip()) DEFAULTS = CFG["Transcoder"]["outputDefault"].strip() HWACCEL = int(CFG["Transcoder"]["hwAccel"]) allow_subs = ['.mkv','.mp4', '.m4v', 'asf', 'wma', 'wmv'] codec_alias = { 'libx264':['libx264', 'h264', 'h.264', 'AVC', 'MPEG-4'], 'libmp3lame':['libmp3lame', 'mp3'], 'libfaac':['libfaac', 'aac', 'faac'] } transcode_defaults = { 'iPad':{ 'VEXTENSION':'.mp4','VCODEC':'libx264','VPRESET':None,'VFRAMERATE':None,'VBITRATE':None,'VCRF':None,'VLEVEL':None, 'VRESOLUTION':None,'VCODEC_ALLOW':['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC':'aac','ACODEC_ALLOW':['libfaac'],'ABITRATE':None, 'ACHANNELS':2, 'ACODEC2':'ac3','ACODEC2_ALLOW':['ac3'],'ABITRATE2':None, 'ACHANNELS2':6, 'ACODEC3':None,'ACODEC3_ALLOW':[],'ABITRATE3':None, 'ACHANNELS3':None, 'SCODEC':'mov_text' }, 'iPad-1080p':{ 'VEXTENSION':'.mp4','VCODEC':'libx264','VPRESET':None,'VFRAMERATE':None,'VBITRATE':None,'VCRF':None,'VLEVEL':None, 'VRESOLUTION':'1920:1080','VCODEC_ALLOW':['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC':'aac','ACODEC_ALLOW':['libfaac'],'ABITRATE':None, 'ACHANNELS':2, 'ACODEC2':'ac3','ACODEC2_ALLOW':['ac3'],'ABITRATE2':None, 'ACHANNELS2':6, 'ACODEC3':None,'ACODEC3_ALLOW':[],'ABITRATE3':None, 'ACHANNELS3':None, 'SCODEC':'mov_text' }, 'iPad-720p':{ 'VEXTENSION':'.mp4','VCODEC':'libx264','VPRESET':None,'VFRAMERATE':None,'VBITRATE':None,'VCRF':None,'VLEVEL':None, 'VRESOLUTION':'1280:720','VCODEC_ALLOW':['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC':'aac','ACODEC_ALLOW':['libfaac'],'ABITRATE':None, 'ACHANNELS':2, 'ACODEC2':'ac3','ACODEC2_ALLOW':['ac3'],'ABITRATE2':None, 'ACHANNELS2':6, 'ACODEC3':None,'ACODEC3_ALLOW':[],'ABITRATE3':None, 'ACHANNELS3':None, 'SCODEC':'mov_text' }, 'Apple-TV':{ 'VEXTENSION':'.mp4','VCODEC':'libx264','VPRESET':None,'VFRAMERATE':None,'VBITRATE':None,'VCRF':None,'VLEVEL':None, 'VRESOLUTION':'1280:720','VCODEC_ALLOW':['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC':'ac3','ACODEC_ALLOW':['ac3'],'ABITRATE':None, 'ACHANNELS':6, 'ACODEC2':'aac','ACODEC2_ALLOW':['libfaac'],'ABITRATE2':None, 'ACHANNELS2':2, 'ACODEC3':None,'ACODEC3_ALLOW':[],'ABITRATE3':None, 'ACHANNELS3':None, 'SCODEC':'mov_text' }, 'iPod':{ 'VEXTENSION':'.mp4','VCODEC':'libx264','VPRESET':None,'VFRAMERATE':None,'VBITRATE':None,'VCRF':None,'VLEVEL':None, 'VRESOLUTION':'1280:720','VCODEC_ALLOW':['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC':'aac','ACODEC_ALLOW':['libfaac'],'ABITRATE':128000, 'ACHANNELS':2, 'ACODEC2':None,'ACODEC2_ALLOW':[],'ABITRATE2':None, 'ACHANNELS2':None, 'ACODEC3':None,'ACODEC3_ALLOW':[],'ABITRATE3':None, 'ACHANNELS3':None, 'SCODEC':'mov_text' }, 'iPhone':{ 'VEXTENSION':'.mp4','VCODEC':'libx264','VPRESET':None,'VFRAMERATE':None,'VBITRATE':None,'VCRF':None,'VLEVEL':None, 'VRESOLUTION':'460:320','VCODEC_ALLOW':['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC':'aac','ACODEC_ALLOW':['libfaac'],'ABITRATE':128000, 'ACHANNELS':2, 'ACODEC2':None,'ACODEC2_ALLOW':[],'ABITRATE2':None, 'ACHANNELS2':None, 'ACODEC3':None,'ACODEC3_ALLOW':[],'ABITRATE3':None, 'ACHANNELS3':None, 'SCODEC':'mov_text' }, 'PS3':{ 'VEXTENSION':'.mp4','VCODEC':'libx264','VPRESET':None,'VFRAMERATE':None,'VBITRATE':None,'VCRF':None,'VLEVEL':None, 'VRESOLUTION':None,'VCODEC_ALLOW':['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC':'ac3','ACODEC_ALLOW':['ac3'],'ABITRATE':None, 'ACHANNELS':6, 'ACODEC2':'aac','ACODEC2_ALLOW':['libfaac'],'ABITRATE2':None, 'ACHANNELS2':2, 'ACODEC3':None,'ACODEC3_ALLOW':[],'ABITRATE3':None, 'ACHANNELS3':None, 'SCODEC':'mov_text' }, 'xbox':{ 'VEXTENSION':'.mp4','VCODEC':'libx264','VPRESET':None,'VFRAMERATE':None,'VBITRATE':None,'VCRF':None,'VLEVEL':None, 'VRESOLUTION':None,'VCODEC_ALLOW':['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC':'ac3','ACODEC_ALLOW':['ac3'],'ABITRATE':None, 'ACHANNELS':6, 'ACODEC2':None,'ACODEC2_ALLOW':[],'ABITRATE2':None, 'ACHANNELS2':None, 'ACODEC3':None,'ACODEC3_ALLOW':[],'ABITRATE3':None, 'ACHANNELS3':None, 'SCODEC':'mov_text' }, 'Roku-480p':{ 'VEXTENSION':'.mp4','VCODEC':'libx264','VPRESET':None,'VFRAMERATE':None,'VBITRATE':None,'VCRF':None,'VLEVEL':None, 'VRESOLUTION':None,'VCODEC_ALLOW':['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC':'aac','ACODEC_ALLOW':['libfaac'],'ABITRATE':128000, 'ACHANNELS':2, 'ACODEC2':'ac3','ACODEC2_ALLOW':['ac3'],'ABITRATE2':None, 'ACHANNELS2':6, 'ACODEC3':None,'ACODEC3_ALLOW':[],'ABITRATE3':None, 'ACHANNELS3':None, 'SCODEC':'mov_text' }, 'Roku-720p':{ 'VEXTENSION':'.mp4','VCODEC':'libx264','VPRESET':None,'VFRAMERATE':None,'VBITRATE':None,'VCRF':None,'VLEVEL':None, 'VRESOLUTION':None,'VCODEC_ALLOW':['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC':'aac','ACODEC_ALLOW':['libfaac'],'ABITRATE':128000, 'ACHANNELS':2, 'ACODEC2':'ac3','ACODEC2_ALLOW':['ac3'],'ABITRATE2':None, 'ACHANNELS2':6, 'ACODEC3':None,'ACODEC3_ALLOW':[],'ABITRATE3':None, 'ACHANNELS3':None, 'SCODEC':'mov_text' }, 'Roku-1080p':{ 'VEXTENSION':'.mp4','VCODEC':'libx264','VPRESET':None,'VFRAMERATE':None,'VBITRATE':None,'VCRF':None,'VLEVEL':None, 'VRESOLUTION':None,'VCODEC_ALLOW':['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC':'aac','ACODEC_ALLOW':['libfaac'],'ABITRATE':160000, 'ACHANNELS':2, 'ACODEC2':'ac3','ACODEC2_ALLOW':['ac3'],'ABITRATE2':None, 'ACHANNELS2':6, 'ACODEC3':None,'ACODEC3_ALLOW':[],'ABITRATE3':None, 'ACHANNELS3':None, 'SCODEC':'mov_text' }, 'mkv':{ 'VEXTENSION':'.mkv','VCODEC':'libx264','VPRESET':None,'VFRAMERATE':None,'VBITRATE':None,'VCRF':None,'VLEVEL':None, 'VRESOLUTION':None,'VCODEC_ALLOW':['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4', 'mpeg2video'], 'ACODEC':'dts','ACODEC_ALLOW':['libfaac', 'dts', 'ac3', 'mp2', 'mp3'],'ABITRATE':None, 'ACHANNELS':8, 'ACODEC2':None,'ACODEC2_ALLOW':[],'ABITRATE2':None, 'ACHANNELS2':None, 'ACODEC3':'ac3','ACODEC3_ALLOW':['libfaac', 'dts', 'ac3', 'mp2', 'mp3'],'ABITRATE3':None, 'ACHANNELS3':8, 'SCODEC':'mov_text' }, 'mp4-scene-release':{ 'VEXTENSION':'.mp4','VCODEC':'libx264','VPRESET':None,'VFRAMERATE':None,'VBITRATE':None,'VCRF':19,'VLEVEL':'3.1', 'VRESOLUTION':None,'VCODEC_ALLOW':['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4', 'mpeg2video'], 'ACODEC':'dts','ACODEC_ALLOW':['libfaac', 'dts', 'ac3', 'mp2', 'mp3'],'ABITRATE':None, 'ACHANNELS':8, 'ACODEC2':None,'ACODEC2_ALLOW':[],'ABITRATE2':None, 'ACHANNELS2':None, 'ACODEC3':'ac3','ACODEC3_ALLOW':['libfaac', 'dts', 'ac3', 'mp2', 'mp3'],'ABITRATE3':None, 'ACHANNELS3':8, 'SCODEC':'mov_text' } } if DEFAULTS and DEFAULTS in transcode_defaults: VEXTENSION = transcode_defaults[DEFAULTS]['VEXTENSION'] VCODEC = transcode_defaults[DEFAULTS]['VCODEC'] VPRESET = transcode_defaults[DEFAULTS]['VPRESET'] VFRAMERATE = transcode_defaults[DEFAULTS]['VFRAMERATE'] VBITRATE = transcode_defaults[DEFAULTS]['VBITRATE'] VRESOLUTION = transcode_defaults[DEFAULTS]['VRESOLUTION'] VCRF = transcode_defaults[DEFAULTS]['VCRF'] VLEVEL = transcode_defaults[DEFAULTS]['VLEVEL'] VCODEC_ALLOW = transcode_defaults[DEFAULTS]['VCODEC_ALLOW'] ACODEC = transcode_defaults[DEFAULTS]['ACODEC'] ACODEC_ALLOW = transcode_defaults[DEFAULTS]['ACODEC_ALLOW'] ACHANNELS = transcode_defaults[DEFAULTS]['ACHANNELS'] ABITRATE = transcode_defaults[DEFAULTS]['ABITRATE'] ACODEC2 = transcode_defaults[DEFAULTS]['ACODEC2'] ACODEC2_ALLOW = transcode_defaults[DEFAULTS]['ACODEC2_ALLOW'] ACHANNELS2 = transcode_defaults[DEFAULTS]['ACHANNELS2'] ABITRATE2 = transcode_defaults[DEFAULTS]['ABITRATE2'] ACODEC3 = transcode_defaults[DEFAULTS]['ACODEC3'] ACODEC3_ALLOW = transcode_defaults[DEFAULTS]['ACODEC3_ALLOW'] ACHANNELS3 = transcode_defaults[DEFAULTS]['ACHANNELS3'] ABITRATE3 = transcode_defaults[DEFAULTS]['ABITRATE3'] SCODEC = transcode_defaults[DEFAULTS]['SCODEC'] transcode_defaults = {} # clear memory if transcode_defaults in ['mp4-scene-release'] and not OUTPUTQUALITYPERCENT: OUTPUTQUALITYPERCENT = 100 if VEXTENSION in allow_subs: ALLOWSUBS = 1 if not VCODEC_ALLOW and VCODEC: VCODEC_ALLOW.extend([VCODEC]) for codec in VCODEC_ALLOW: if codec in codec_alias: extra = [ item for item in codec_alias[codec] if item not in VCODEC_ALLOW ] VCODEC_ALLOW.extend(extra) if not ACODEC_ALLOW and ACODEC: ACODEC_ALLOW.extend([ACODEC]) for codec in ACODEC_ALLOW: if codec in codec_alias: extra = [ item for item in codec_alias[codec] if item not in ACODEC_ALLOW ] ACODEC_ALLOW.extend(extra) if not ACODEC2_ALLOW and ACODEC2: ACODEC2_ALLOW.extend([ACODEC2]) for codec in ACODEC2_ALLOW: if codec in codec_alias: extra = [ item for item in codec_alias[codec] if item not in ACODEC2_ALLOW ] ACODEC2_ALLOW.extend(extra) if not ACODEC3_ALLOW and ACODEC3: ACODEC3_ALLOW.extend([ACODEC3]) for codec in ACODEC3_ALLOW: if codec in codec_alias: extra = [ item for item in codec_alias[codec] if item not in ACODEC3_ALLOW ] ACODEC3_ALLOW.extend(extra) codec_alias = {} # clear memory PASSWORDSFILE = CFG["passwords"]["PassWordFile"] # Setup FFMPEG, FFPROBE and SEVENZIP locations if platform.system() == 'Windows': FFMPEG = os.path.join(FFMPEG_PATH, 'ffmpeg.exe') FFPROBE = os.path.join(FFMPEG_PATH, 'ffprobe.exe') SEVENZIP = os.path.join(PROGRAM_DIR, 'core', 'extractor', 'bin', platform.machine(), '7z.exe') if not (os.path.isfile(FFMPEG)): # problem FFMPEG = None logger.warning("Failed to locate ffmpeg.exe. Transcoding disabled!") logger.warning("Install ffmpeg with x264 support to enable this feature ...") if not (os.path.isfile(FFPROBE)): FFPROBE = None if CHECK_MEDIA: logger.warning("Failed to locate ffprobe.exe. Video corruption detection disabled!") logger.warning("Install ffmpeg with x264 support to enable this feature ...") else: try: SEVENZIP = subprocess.Popen(['which', '7z'], stdout=subprocess.PIPE).communicate()[0].strip() except: pass if not SEVENZIP: try: SEVENZIP = subprocess.Popen(['which', '7zr'], stdout=subprocess.PIPE).communicate()[0].strip() except: pass if not SEVENZIP: try: SEVENZIP = subprocess.Popen(['which', '7za'], stdout=subprocess.PIPE).communicate()[0].strip() except: pass if not SEVENZIP: SEVENZIP = None logger.warning("Failed to locate 7zip. Transcosing of disk images and extraction of .7z files will not be possible!") if os.path.isfile(os.path.join(FFMPEG_PATH, 'ffmpeg')) or os.access(os.path.join(FFMPEG_PATH, 'ffmpeg'), os.X_OK): FFMPEG = os.path.join(FFMPEG_PATH, 'ffmpeg') elif os.path.isfile(os.path.join(FFMPEG_PATH, 'avconv')) or os.access(os.path.join(FFMPEG_PATH, 'avconv'), os.X_OK): FFMPEG = os.path.join(FFMPEG_PATH, 'avconv') else: try: FFMPEG = subprocess.Popen(['which', 'ffmpeg'], stdout=subprocess.PIPE).communicate()[0].strip() except: pass if not FFMPEG: try: FFMPEG = subprocess.Popen(['which', 'avconv'], stdout=subprocess.PIPE).communicate()[0].strip() except: pass if not FFMPEG: FFMPEG = None logger.warning("Failed to locate ffmpeg. Transcoding disabled!") logger.warning("Install ffmpeg with x264 support to enable this feature ...") if os.path.isfile(os.path.join(FFMPEG_PATH, 'ffprobe')) or os.access(os.path.join(FFMPEG_PATH, 'ffprobe'), os.X_OK): FFPROBE = os.path.join(FFMPEG_PATH, 'ffprobe') elif os.path.isfile(os.path.join(FFMPEG_PATH, 'avprobe')) or os.access(os.path.join(FFMPEG_PATH, 'avprobe'), os.X_OK): FFPROBE = os.path.join(FFMPEG_PATH, 'avprobe') else: try: FFPROBE = subprocess.Popen(['which', 'ffprobe'], stdout=subprocess.PIPE).communicate()[0].strip() except: pass if not FFPROBE: try: FFPROBE = subprocess.Popen(['which', 'avprobe'], stdout=subprocess.PIPE).communicate()[0].strip() except: pass if not FFPROBE: FFPROBE = None if CHECK_MEDIA: logger.warning("Failed to locate ffprobe. Video corruption detection disabled!") logger.warning("Install ffmpeg with x264 support to enable this feature ...") # check for script-defied section and if None set to allow sections SECTIONS = CFG[tuple(x for x in CFG if CFG[x].sections and CFG[x].isenabled()) if not section else (section,)] for section,subsections in SECTIONS.items(): CATEGORIES.extend([subsection for subsection in subsections if CFG[section][subsection].isenabled()]) CATEGORIES = list(set(CATEGORIES)) # create torrent class TORRENT_CLASS = create_torrent_class(TORRENT_CLIENTAGENT) # finished initalizing return True
def main(args): # Initialize the config core.initialize() # clientAgent for Torrents clientAgent = core.TORRENT_CLIENTAGENT logger.info("#########################################################") logger.info("## ..::[{0}]::.. ##".format(os.path.basename(__file__))) logger.info("#########################################################") # debug command line options logger.debug("Options passed into TorrentToMedia: {0}".format(args)) # Post-Processing Result result = [0, ""] try: inputDirectory, inputName, inputCategory, inputHash, inputID = core.parse_args(clientAgent, args) except: logger.error("There was a problem loading variables") return -1 if inputDirectory and inputName and inputHash and inputID: result = processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, clientAgent) else: # Perform Manual Post-Processing logger.warning("Invalid number of arguments received from client, Switching to manual run mode ...") for section, subsections in core.SECTIONS.items(): for subsection in subsections: if not core.CFG[section][subsection].isenabled(): continue for dirName in core.getDirs(section, subsection, link='hard'): logger.info("Starting manual run for {0}:{1} - Folder:{2}".format (section, subsection, dirName)) logger.info("Checking database for download info for {0} ...".format (os.path.basename(dirName))) core.DOWNLOADINFO = core.get_downloadInfo(os.path.basename(dirName), 0) if core.DOWNLOADINFO: clientAgent = text_type(core.DOWNLOADINFO[0].get('client_agent', 'manual')) inputHash = text_type(core.DOWNLOADINFO[0].get('input_hash', '')) inputID = text_type(core.DOWNLOADINFO[0].get('input_id', '')) logger.info("Found download info for {0}, " "setting variables now ...".format(os.path.basename(dirName))) else: logger.info('Unable to locate download info for {0}, ' 'continuing to try and process this release ...'.format (os.path.basename(dirName))) clientAgent = 'manual' inputHash = '' inputID = '' if clientAgent.lower() not in core.TORRENT_CLIENTS: continue try: dirName = dirName.encode(core.SYS_ENCODING) except UnicodeError: pass inputName = os.path.basename(dirName) try: inputName = inputName.encode(core.SYS_ENCODING) except UnicodeError: pass results = processTorrent(dirName, inputName, subsection, inputHash or None, inputID or None, clientAgent) if results[0] != 0: logger.error("A problem was reported when trying to perform a manual run for {0}:{1}.".format (section, subsection)) result = results if result[0] == 0: logger.info("The {0} script completed successfully.".format(args[0])) else: logger.error("A problem was reported in the {0} script.".format(args[0])) del core.MYAPP return result[0]
if serie[2].strip() == 'animeid': from seriesly.channels import animeid itemlist = animeid.episodios(item) if serie[2].strip() == 'moviezet': from seriesly.channels import moviezet itemlist = moviezet.serie(item) except: import traceback from pprint import pprint exc_type, exc_value, exc_tb = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_tb) for line in lines: line_splits = line.split("\n") for line_split in line_splits: logger.error(line_split) itemlist = [] else: logger.info("[library_service.py] No actualiza " + serie[0] + " (no existe el directorio)") itemlist = [] for item in itemlist: #logger.info("item="+item.tostring()) try: item.show = serie[0].strip() library.savelibrary(titulo=item.title, url=item.url, thumbnail=item.thumbnail, server=item.server, plot=item.plot,
def Transcode_directory(dirName): if not core.FFMPEG: return 1, dirName logger.info("Checking for files to be transcoded") final_result = 0 # initialize as successful if core.OUTPUTVIDEOPATH: newDir = core.OUTPUTVIDEOPATH makeDir(newDir) name = os.path.splitext(os.path.split(dirName)[1])[0] newDir = os.path.join(newDir, name) makeDir(newDir) else: newDir = dirName if platform.system() == 'Windows': bitbucket = open('NUL') else: bitbucket = open('/dev/null') movieName = os.path.splitext(os.path.split(dirName)[1])[0] List = core.listMediaFiles(dirName, media=True, audio=False, meta=False, archives=False) List, remList, newList, success = processList(List, newDir, bitbucket) if not success: bitbucket.close() return 1, dirName for file in List: if isinstance(file, str) and os.path.splitext(file)[1] in core.IGNOREEXTENSIONS: continue command = buildCommands(file, newDir, movieName, bitbucket) newfilePath = command[-1] # transcoding files may remove the original file, so make sure to extract subtitles first if core.SEXTRACT and isinstance(file, str): extract_subs(file, newfilePath, bitbucket) try: # Try to remove the file that we're transcoding to just in case. (ffmpeg will return an error if it already exists for some reason) os.remove(newfilePath) except OSError as e: if e.errno != errno.ENOENT: # Ignore the error if it's just telling us that the file doesn't exist logger.debug("Error when removing transcoding target: {0}".format(e)) except Exception as e: logger.debug("Error when removing transcoding target: {0}".format(e)) logger.info("Transcoding video: {0}".format(newfilePath)) print_cmd(command) result = 1 # set result to failed in case call fails. try: if isinstance(file, str): proc = subprocess.Popen(command, stdout=bitbucket, stderr=bitbucket) else: img, data = iteritems(file).next() proc = subprocess.Popen(command, stdout=bitbucket, stderr=bitbucket, stdin=subprocess.PIPE) for vob in data['files']: procin = zip_out(vob, img, bitbucket) if procin: shutil.copyfileobj(procin.stdout, proc.stdin) procin.stdout.close() proc.communicate() result = proc.returncode except: logger.error("Transcoding of video {0} has failed".format(newfilePath)) if core.SUBSDIR and result == 0 and isinstance(file, str): for sub in get_subs(file): name = os.path.splitext(os.path.split(file)[1])[0] subname = os.path.split(sub)[1] newname = os.path.splitext(os.path.split(newfilePath)[1])[0] newpath = os.path.join(core.SUBSDIR, subname.replace(name, newname)) if not os.path.isfile(newpath): os.rename(sub, newpath) if result == 0: try: shutil.copymode(file, newfilePath) except: pass logger.info("Transcoding of video to {0} succeeded".format(newfilePath)) if os.path.isfile(newfilePath) and (file in newList or not core.DUPLICATE): try: os.unlink(file) except: pass else: logger.error("Transcoding of video to {0} failed with result {1}".format(newfilePath, result)) # this will be 0 (successful) it all are successful, else will return a positive integer for failure. final_result = final_result + result if final_result == 0 and not core.DUPLICATE: for file in remList: try: os.unlink(file) except: pass if not os.listdir(newDir): # this is an empty directory and we didn't transcode into it. os.rmdir(newDir) newDir = dirName if not core.PROCESSOUTPUT and core.DUPLICATE: # We postprocess the original files to CP/SB newDir = dirName bitbucket.close() return final_result, newDir
30050) # "No se puede conectar con el sitio web" platformtools.dialog_ok("plugin", texto) # Grab server response errors elif hasattr(e, 'code'): logger.info( "streamondemand.platformcode.launcher codigo de error HTTP : %d" % e.code) # "El sitio web no funciona correctamente (error http %d)" platformtools.dialog_ok( "plugin", config.get_localized_string(30051) % e.code) except: import traceback logger.error("streamondemand.platformcode.launcher " + traceback.format_exc()) patron = 'File "' + os.path.join(config.get_runtime_path(), "channels", "").replace("\\", "\\\\") + '([^.]+)\.py"' canal = scrapertools.find_single_match(traceback.format_exc(), patron) try: import xbmc xbmc_version = int( xbmc.getInfoLabel("System.BuildVersion").split(".", 1)[0]) if xbmc_version > 13: log_name = "kodi.log" else: log_name = "xbmc.log" log_message = "Ruta: " + xbmc.translatePath(
def main(): logger.info("pelisalacarta.library_service Actualizando series...") p_dialog = None try: if config.get_setting("updatelibrary") == "true": heading = 'Actualizando biblioteca....' p_dialog = platformtools.dialog_progress_bg( 'pelisalacarta', heading) p_dialog.update(0, '') show_list = [] for path, folders, files in filetools.walk(library.TVSHOWS_PATH): show_list.extend([ filetools.join(path, f) for f in files if f == "tvshow.json" ]) # fix float porque la division se hace mal en python 2.x t = float(100) / len(show_list) for i, tvshow_file in enumerate(show_list): serie = Item().fromjson(filetools.read(tvshow_file)) path = filetools.dirname(tvshow_file) logger.info("pelisalacarta.library_service serie=" + serie.contentSerieName) logger.info("pelisalacarta.library_service Actualizando " + path) logger.info("pelisalacarta.library_service url " + serie.url) show_name = serie.contentTitle if show_name == "": show_name = serie.show p_dialog.update(int(math.ceil((i + 1) * t)), heading, show_name) # si la serie esta activa se actualiza if serie.active: try: pathchannels = filetools.join( config.get_runtime_path(), "channels", serie.channel + '.py') logger.info( "pelisalacarta.library_service Cargando canal: " + pathchannels + " " + serie.channel) obj = imp.load_source(serie.channel, pathchannels) itemlist = obj.episodios(serie) try: library.save_library_episodes( path, itemlist, serie, True) except Exception as ex: logger.info( "pelisalacarta.library_service Error al guardar los capitulos de la serie" ) template = "An exception of type {0} occured. Arguments:\n{1!r}" message = template.format( type(ex).__name__, ex.args) logger.info(message) except Exception as ex: logger.error( "Error al obtener los episodios de: {0}".format( serie.show)) template = "An exception of type {0} occured. Arguments:\n{1!r}" message = template.format(type(ex).__name__, ex.args) logger.info(message) p_dialog.close() else: logger.info( "No actualiza la biblioteca, está desactivado en la configuración de pelisalacarta" ) except Exception as ex: logger.info( "pelisalacarta.library_service Se ha producido un error al actualizar las series" ) template = "An exception of type {0} occured. Arguments:\n{1!r}" message = template.format(type(ex).__name__, ex.args) logger.info(message) if p_dialog: p_dialog.close()
if config.DISABLED_FEEDS: filenames = [ filename for filename in filenames if os.path.splitext(os.path.split(filename)[-1])[0] not in re.split(r"[^\w]+", config.DISABLED_FEEDS) ] for i in xrange(len(filenames)): filename = filenames[i] try: module = __import__(os.path.basename(filename).split(".py")[0]) except (ImportError, SyntaxError), ex: logger.error( "something went wrong during import of feed file '%s' ('%s')" % (filename, ex)) continue for name, function in inspect.getmembers(module, inspect.isfunction): if name == "fetch": logger.info("'%s'%s" % (module.__url__, " " * 20 if len(module.__url__) < 20 else "")) sys.stdout.write( "progress: %d/%d (%d%%)\r" % (i, len(filenames), i * 100 / len(filenames))) sys.stdout.flush() if config.DISABLED_TRAILS_INFO_REGEX and re.search(
def run(): logger.info("streamondemand.platformcode.launcher run") # Extract item from sys.argv if sys.argv[2]: item = Item().fromurl(sys.argv[2]) # If no item, this is mainlist else: item = Item(channel="channelselector", action="getmainlist", viewmode="movie") logger.info("streamondemand.platformcode.launcher " + item.tostring()) try: # If item has no action, stops here if item.action == "": logger.info("streamondemand.platformcode.launcher Item sin accion") return # Action for main menu in channelselector if item.action == "getmainlist": import channelselector # Check for updates only on first screen if config.get_setting("check_for_plugin_updates") == "true": logger.info( "streamondemand.platformcode.launcher Check for plugin updates enabled" ) from core import updater try: config.set_setting("plugin_updates_available", "0") version = updater.checkforupdates() itemlist = channelselector.getmainlist() if version: config.set_setting("plugin_updates_available", "1") platformtools.dialog_ok( "Versione " + version + " disponible", "E' possibile fare il download della nuova versione\n" "selezionare la relativa voce nel menu principale") itemlist = channelselector.getmainlist() itemlist.insert( 0, Item(title="Download versione " + version, version=version, channel="updater", action="update", thumbnail=channelselector.get_thumb( "squares", "thumb_actualizar.png"))) except: import traceback logger.info(traceback.format_exc()) platformtools.dialog_ok( "Impossibile connettersi", "Non è stato possibile verificare", "aggiornamenti") logger.info( "cpelisalacarta.platformcode.launcher Fallo al verificar la actualización" ) config.set_setting("plugin_updates_available", "0") itemlist = channelselector.getmainlist() else: logger.info( "streamondemand.platformcode.launcher Check for plugin updates disabled" ) config.set_setting("plugin_updates_available", "0") itemlist = channelselector.getmainlist() platformtools.render_items(itemlist, item) # Action for updating plugin elif item.action == "update": from core import updater updater.update(item) config.set_setting("plugin_updates_available", "0") if config.get_system_platform() != "xbox": import xbmc xbmc.executebuiltin("Container.Refresh") # Action for channel types on channelselector: movies, series, etc. elif item.action == "getchanneltypes": import channelselector itemlist = channelselector.getchanneltypes() platformtools.render_items(itemlist, item) # Action for channel listing on channelselector elif item.action == "filterchannels": import channelselector itemlist = channelselector.filterchannels(item.channel_type) platformtools.render_items(itemlist, item) # Special action for playing a video from the library elif item.action == "play_from_library": play_from_library(item) return # Action in certain channel specified in "action" and "channel" parameters else: # Entry point for a channel is the "mainlist" action, so here we check parental control if item.action == "mainlist": # Parental control can_open_channel = False # If it is an adult channel, and user has configured pin, asks for it if channeltools.is_adult(item.channel) and config.get_setting( "adult_pin") != "": tecleado = platformtools.dialog_input( "", "PIN per canali per adulti", True) if tecleado is not None: if tecleado == config.get_setting("adult_pin"): can_open_channel = True # All the other cases can open the channel else: can_open_channel = True if not can_open_channel: return # Actualiza el canal individual if item.action == "mainlist" and item.channel != "channelselector" and config.get_setting( "check_for_channel_updates") == "true": from core import updater updater.update_channel(item.channel) # Checks if channel exists channel_file = os.path.join(config.get_runtime_path(), 'channels', item.channel + ".py") logger.info( "streamondemand.platformcode.launcher channel_file=%s" % channel_file) channel = None if item.channel in [ "personal", "personal2", "personal3", "personal4", "personal5" ]: import channels.personal as channel elif os.path.exists(channel_file): try: channel = __import__('channels.%s' % item.channel, None, None, ["channels.%s" % item.channel]) except ImportError: exec "import channels." + item.channel + " as channel" logger.info( "streamondemand.platformcode.launcher running channel " + channel.__name__ + " " + channel.__file__) # Special play action if item.action == "play": logger.info("streamondemand.platformcode.launcher play") # logger.debug("item_toPlay: " + "\n" + item.tostring('\n')) # First checks if channel has a "play" function if hasattr(channel, 'play'): logger.info( "streamondemand.platformcode.launcher executing channel 'play' method" ) itemlist = channel.play(item) b_favourite = item.isFavourite # Play should return a list of playable URLS if len(itemlist) > 0: item = itemlist[0] if b_favourite: item.isFavourite = True platformtools.play_video(item) #Permitir varias calidades desde play en el canal elif len(itemlist) > 0 and isinstance(itemlist[0], list): item.video_urls = itemlist platformtools.play_video(item) # If not, shows user an error message else: platformtools.dialog_ok("plugin", "Niente da riprodurre") # If player don't have a "play" function, not uses the standard play from platformtools else: logger.info( "streamondemand.platformcode.launcher executing core 'play' method" ) platformtools.play_video(item) # Special action for findvideos, where the plugin looks for known urls elif item.action == "findvideos": # First checks if channel has a "findvideos" function if hasattr(channel, 'findvideos'): itemlist = getattr(channel, item.action)(item) # If not, uses the generic findvideos function else: logger.info( "streamondemand.platformcode.launcher no channel 'findvideos' method, " "executing core method") from core import servertools itemlist = servertools.find_video_items(item) if config.get_setting('filter_servers') == 'true': itemlist = filtered_servers(itemlist) from platformcode import subtitletools subtitletools.saveSubtitleName(item) platformtools.render_items(itemlist, item) # Special action for adding a movie to the library elif item.action == "add_pelicula_to_library": library.add_pelicula_to_library(item) # Special action for adding a serie to the library elif item.action == "add_serie_to_library": library.add_serie_to_library(item, channel) # Special action for downloading all episodes from a serie elif item.action == "download_all_episodes": from channels import descargas item.action = item.extra del item.extra descargas.save_download(item) # Special action for searching, first asks for the words then call the "search" function elif item.action == "search": logger.info("streamondemand.platformcode.launcher search") last_search = "" last_search_active = config.get_setting( "last_search", "buscador") if last_search_active: try: current_saved_searches_list = list( config.get_setting("saved_searches_list", "buscador")) last_search = current_saved_searches_list[0] except: pass tecleado = platformtools.dialog_input(last_search) if tecleado is not None: if last_search_active: from channels import buscador buscador.save_search(tecleado) # TODO revisar 'personal.py' porque no tiene función search y daría problemas # DrZ3r0 itemlist = channel.search(item, tecleado.replace(" ", "+")) else: itemlist = [] platformtools.render_items(itemlist, item) # For all other actions else: logger.info( "streamondemand.platformcode.launcher executing channel '" + item.action + "' method") itemlist = getattr(channel, item.action)(item) platformtools.render_items(itemlist, item) except urllib2.URLError, e: import traceback logger.error("streamondemand.platformcode.launcher " + traceback.format_exc()) # Grab inner and third party errors if hasattr(e, 'reason'): logger.info( "streamondemand.platformcode.launcher Razon del error, codigo: " + str(e.reason[0]) + ", Razon: " + str(e.reason[1])) texto = config.get_localized_string( 30050) # "No se puede conectar con el sitio web" platformtools.dialog_ok("plugin", texto) # Grab server response errors elif hasattr(e, 'code'): logger.info( "streamondemand.platformcode.launcher codigo de error HTTP : %d" % e.code) # "El sitio web no funciona correctamente (error http %d)" platformtools.dialog_ok( "plugin", config.get_localized_string(30051) % e.code)
def play(item): logger.info("[streamondemand-pureita cineblog01] play") itemlist = [] ### Handling new cb01 wrapper if host[9:] + "/film/" in item.url: iurl = httptools.downloadpage(item.url, only_headers=True, follow_redirects=False).headers.get( "location", "") logger.info("/film/ wrapper: %s" % iurl) if iurl: item.url = iurl if '/goto/' in item.url: item.url = item.url.split('/goto/')[-1].decode('base64') item.url = item.url.replace('http://cineblog01.uno', 'http://k4pp4.pw') logger.debug( "##############################################################") if "go.php" in item.url: data = httptools.downloadpage(item.url, headers=headers).data try: data = scrapertools.get_match(data, 'window.location.href = "([^"]+)";') except IndexError: try: # data = scrapertools.get_match(data, r'<a href="([^"]+)">clicca qui</a>') # In alternativa, dato che a volte compare "Clicca qui per proseguire": data = scrapertools.get_match( data, r'<a href="([^"]+)".*?class="btn-wrapper">.*?licca.*?</a>') except IndexError: data = httptools.downloadpage( item.url, only_headers=True, follow_redirects=False).headers.get("location", "") while 'vcrypt' in data: data = httptools.downloadpage(data, only_headers=True, follow_redirects=False).headers.get( "location", "") logger.debug("##### play go.php data ##\n%s\n##" % data) elif "/link/" in item.url: data = httptools.downloadpage(item.url, headers=headers).data from lib import jsunpack try: data = scrapertools.get_match( data, "(eval\(function\(p,a,c,k,e,d.*?)</script>") data = jsunpack.unpack(data) logger.debug("##### play /link/ unpack ##\n%s\n##" % data) except IndexError: logger.debug("##### The content is yet unpacked ##\n%s\n##" % data) data = scrapertools.find_single_match( data, 'var link(?:\s)?=(?:\s)?"([^"]+)";') while 'vcrypt' in data: data = httptools.downloadpage(data, only_headers=True, follow_redirects=False).headers.get( "location", "") if not "http" in data: data = urlparse.urljoin("http://swzz.xyz", data) data = httptools.downloadpage(data, headers=headers).data logger.debug("##### play /link/ data ##\n%s\n##" % data) else: data = item.url logger.debug("##### play else data ##\n%s\n##" % data) logger.debug( "##############################################################") try: itemlist = servertools.find_video_items(data=data) for videoitem in itemlist: videoitem.title = item.show videoitem.fulltitle = item.fulltitle videoitem.show = item.show videoitem.thumbnail = item.thumbnail videoitem.channel = __channel__ except AttributeError: logger.error("vcrypt data doesn't contain expected URL") return itemlist
def resolve_video_urls_for_playing(server, url, video_password="", muestra_dialogo=False): logger.info("[servertools.py] resolve_video_urls_for_playing, server=" + server + ", url=" + url) video_urls = [] torrent = False server = server.lower() # Si el vídeo es "directo", no hay que buscar más if server == "directo" or server == "local": logger.info("[servertools.py] server=directo, la url es la buena") try: import urlparse parsed_url = urlparse.urlparse(url) logger.info("parsed_url=" + str(parsed_url)) extension = parsed_url.path[-4:] except: extension = url[-4:] video_urls = [["%s [%s]" % (extension, server), url]] return video_urls, True, "" # Averigua las URL de los vídeos else: #if server=="torrent": # server="filenium" # torrent = True # Carga el conector try: # Muestra un diálogo de progreso if muestra_dialogo: import xbmcgui progreso = xbmcgui.DialogProgress() progreso.create("pelisalacarta", "Conectando con " + server) # Sustituye el código por otro "Plex compatible" #exec "from servers import "+server+" as server_connector" servers_module = __import__("servers." + server) server_connector = getattr(servers_module, server) logger.info("[servertools.py] servidor de " + server + " importado") if muestra_dialogo: progreso.update(20, "Conectando con " + server) # Si tiene una función para ver si el vídeo existe, lo comprueba ahora if hasattr(server_connector, 'test_video_exists'): logger.info("[servertools.py] invocando a " + server + ".test_video_exists") puedes, motivo = server_connector.test_video_exists( page_url=url) # Si la funcion dice que no existe, fin if not puedes: logger.info( "[servertools.py] test_video_exists dice que el video no existe" ) if muestra_dialogo: progreso.close() return video_urls, puedes, motivo else: logger.info( "[servertools.py] test_video_exists dice que el video SI existe" ) # Obtiene enlaces free if server in FREE_SERVERS: logger.info("[servertools.py] invocando a " + server + ".get_video_url") video_urls = server_connector.get_video_url( page_url=url, video_password=video_password) # Si no se encuentran vídeos en modo free, es porque el vídeo no existe if len(video_urls) == 0: if muestra_dialogo: progreso.close() return video_urls, False, "No se puede encontrar el vídeo en " + server # Obtiene enlaces premium si tienes cuenta en el server if server in PREMIUM_SERVERS and config.get_setting( server + "premium") == "true": video_urls = server_connector.get_video_url( page_url=url, premium=(config.get_setting(server + "premium") == "true"), user=config.get_setting(server + "user"), password=config.get_setting(server + "password"), video_password=video_password) # Si no se encuentran vídeos en modo premium directo, es porque el vídeo no existe if len(video_urls) == 0: if muestra_dialogo: progreso.close() return video_urls, False, "No se puede encontrar el vídeo en " + server # Obtiene enlaces filenium si tienes cuenta if server in FILENIUM_SERVERS and config.get_setting( "fileniumpremium") == "true": # Muestra un diálogo de progreso if muestra_dialogo: progreso.update(40, "Conectando con Filenium") from servers import filenium as gen_conector video_gen = gen_conector.get_video_url( page_url=url, premium=(config.get_setting("fileniumpremium") == "true"), user=config.get_setting("fileniumuser"), password=config.get_setting("fileniumpassword"), video_password=video_password) extension = gen_conector.get_file_extension(video_gen) logger.info("[xbmctools.py] filenium url=" + video_gen) video_urls.append( [extension + " [" + server + "][filenium]", video_gen]) # Obtiene enlaces realdebrid si tienes cuenta if server in REALDEBRID_SERVERS and config.get_setting( "realdebridpremium") == "true": # Muestra un diálogo de progreso if muestra_dialogo: progreso.update(60, "Conectando con Real-Debrid") from servers import realdebrid as gen_conector video_gen = gen_conector.get_video_url( page_url=url, premium=( config.get_setting("realdebridpremium") == "true"), user=config.get_setting("realdebriduser"), password=config.get_setting("realdebridpassword"), video_password=video_password) logger.info("[xbmctools.py] realdebrid url=" + video_gen) if not "REAL-DEBRID" in video_gen: video_urls.append([ "." + video_gen.rsplit('.', 1)[1] + " [realdebrid]", video_gen ]) else: if muestra_dialogo: progreso.close() # Si RealDebrid da error pero tienes un enlace válido, no te dice nada if len(video_urls) == 0: return video_urls, False, video_gen # Obtiene enlaces alldebrid si tienes cuenta if server in ALLDEBRID_SERVERS and config.get_setting( "alldebridpremium") == "true": # Muestra un diálogo de progreso if muestra_dialogo: progreso.update(80, "Conectando con All-Debrid") from servers import alldebrid as gen_conector video_gen = gen_conector.get_video_url( page_url=url, premium=(config.get_setting("alldebridpremium") == "true"), user=config.get_setting("alldebriduser"), password=config.get_setting("alldebridpassword"), video_password=video_password) logger.info("[xbmctools.py] alldebrid url=" + video_gen) if video_gen.startswith("http"): video_urls.append([ "." + video_gen.rsplit('.', 1)[1] + " [alldebrid]", video_gen ]) else: # Si Alldebrid da error pero tienes un enlace válido, no te dice nada if len(video_urls) == 0: return [], False, video_gen.strip() if muestra_dialogo: progreso.update(100, "Proceso finalizado") # Cierra el diálogo de progreso if muestra_dialogo: progreso.close() # Llegas hasta aquí y no tienes ningún enlace para ver, así que no vas a poder ver el vídeo if len(video_urls) == 0: # ¿Cual es el motivo? # 1) No existe -> Ya está controlado # 2) No tienes alguna de las cuentas premium compatibles # Lista de las cuentas que soportan este servidor listapremium = "" if server in ALLDEBRID_SERVERS: listapremium += "All-Debrid o " if server in FILENIUM_SERVERS: listapremium += "Filenium o " if server in REALDEBRID_SERVERS: listapremium += "Real-Debrid o " if server in PREMIUM_SERVERS: listapremium += server + " o " listapremium = listapremium[:-3] return video_urls, False, "Para ver un vídeo en " + server + " necesitas<br/>una cuenta en " + listapremium except: if muestra_dialogo: progreso.close() import traceback from pprint import pprint exc_type, exc_value, exc_tb = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_tb) for line in lines: line_splits = line.split("\n") for line_split in line_splits: logger.error(line_split) return video_urls, False, "Se ha producido un error en<br/>el conector con " + server return video_urls, True, ""
def find_and_set_infoLabels(item): """ función que se llama para buscar y setear los infolabels :param item: :return: boleano que indica si se ha podido encontrar el 'code' """ global scraper scraper = None # logger.debug("item:\n" + item.tostring('\n')) list_opciones_cuadro = ["Introducir otro nombre", "Completar información"] # Si se añaden más scrapers hay q declararlos aqui-> "modulo_scraper": "Texto_en_cuadro" scrapers_disponibles = {'tmdb': "Buscar en TheMovieDB.org", 'tvdb': "Buscar en TheTvDB.com"} # Obtener el Scraper por defecto de la configuracion segun el tipo de contenido if item.contentType == "movie": scraper_actual = ['tmdb'][config.get_setting("scraper_movies", "biblioteca")] tipo_contenido = "película" title = item.contentTitle # Completar lista de opciones para este tipo de contenido list_opciones_cuadro.append(scrapers_disponibles['tmdb']) else: scraper_actual = ['tmdb', 'tvdb'][config.get_setting("scraper_tvshows", "biblioteca")] tipo_contenido = "serie" title = item.contentSerieName # Completar lista de opciones para este tipo de contenido list_opciones_cuadro.append(scrapers_disponibles['tmdb']) list_opciones_cuadro.append(scrapers_disponibles['tvdb']) # Importamos el scraper try: scraper = __import__('core.%s' % scraper_actual, fromlist=["core.%s" % scraper_actual]) except ImportError: exec "import core." + scraper_actual + " as scraper" except: import traceback logger.error(traceback.format_exc()) while scraper: # Llamamos a la funcion find_and_set_infoLabels del scraper seleccionado scraper_result = scraper.find_and_set_infoLabels(item) # Verificar si existe 'code' if scraper_result and item.infoLabels['code']: # code correcto logger.info("Identificador encontrado: %s" % item.infoLabels['code']) scraper.completar_codigos(item) return True elif scraper_result: # Contenido encontrado pero no hay 'code' msg = "Identificador no encontrado para: %s" % title else: # Contenido no encontrado msg = "No se ha encontrado informacion para: %s" % title logger.info(msg) # Mostrar cuadro con otras opciones: if scrapers_disponibles[scraper_actual] in list_opciones_cuadro: list_opciones_cuadro.remove(scrapers_disponibles[scraper_actual]) index = platformtools.dialog_select(msg, list_opciones_cuadro) if index < 0: logger.debug("Se ha pulsado 'cancelar' en la ventana '%s'" % msg) return False elif index == 0: # Pregunta el titulo title = platformtools.dialog_input(title, "Introduzca el nombre de la %s a buscar" % tipo_contenido) if title: if item.contentType == "movie": item.contentTitle = title else: item.contentSerieName = title else: logger.debug("he pulsado 'cancelar' en la ventana 'Introduzca el nombre correcto'") return False elif index == 1: # Hay q crear un cuadro de dialogo para introducir los datos logger.info("Completar información") if cuadro_completar(item): # code correcto logger.info("Identificador encontrado: %s" % str(item.infoLabels['code'])) return True # raise elif list_opciones_cuadro[index] in scrapers_disponibles.values(): # Obtener el nombre del modulo del scraper for k, v in scrapers_disponibles.items(): if list_opciones_cuadro[index] == v: if scrapers_disponibles[scraper_actual] not in list_opciones_cuadro: list_opciones_cuadro.append(scrapers_disponibles[scraper_actual]) # Importamos el scraper k scraper_actual = k try: scraper = None scraper = __import__('core.%s' % scraper_actual, fromlist=["core.%s" % scraper_actual]) except ImportError: exec "import core." + scraper_actual + " as scraper_module" break logger.error("Error al importar el modulo scraper %s" % scraper_actual)
def resolve_video_urls_for_playing(server,url,video_password="",muestra_dialogo=False): video_urls = [] # Si el vídeo es "directo", no hay que buscar más if server=="directo" or server=="local": if url.startswith("rtmp"): video_urls = [[ "%s [%s]" % (url[:4],server) , url ]] else: video_urls = [[ "%s [%s]" % (url[-4:],server) , url ]] return video_urls,True,"" # Averigua las URL de los vídeos else: # Carga el conector try: # Muestra un diálogo de progreso if muestra_dialogo: import xbmcgui progreso = xbmcgui.DialogProgress() progreso.create( "pelisalacarta" , "Conectando con "+server) exec "from servers import "+server+" as server_connector" if muestra_dialogo: progreso.update( 25 , "Conectando con "+server) # Si tiene una función para ver si el vídeo existe, lo comprueba ahora if hasattr(server_connector, 'test_video_exists'): puedes,motivo = server_connector.test_video_exists( page_url=url ) # Si la funcion dice que no existe, fin if not puedes: if muestra_dialogo: progreso.close() return video_urls,puedes,motivo # Obtiene enlaces free if server in FREE_SERVERS: video_urls = server_connector.get_video_url( page_url=url , video_password=video_password ) # Si no se encuentran vídeos en modo free, es porque el vídeo no existe if len(video_urls)==0: if muestra_dialogo: progreso.close() return video_urls,False,"No se puede encontrar el vídeo en "+server # Obtiene enlaces premium si tienes cuenta en el server if server in PREMIUM_SERVERS and config.get_setting(server+"premium")=="true": video_urls = server_connector.get_video_url( page_url=url , premium=(config.get_setting(server+"premium")=="true") , user=config.get_setting(server+"user") , password=config.get_setting(server+"password"), video_password=video_password ) # Si no se encuentran vídeos en modo premium directo, es porque el vídeo no existe if len(video_urls)==0: if muestra_dialogo: progreso.close() return video_urls,False,"No se puede encontrar el vídeo en "+server # Obtiene enlaces filenium si tienes cuenta if server in FILENIUM_SERVERS and config.get_setting("fileniumpremium")=="true": # Muestra un diálogo de progreso if muestra_dialogo: progreso.update( 50 , "Conectando con Filenium") exec "from servers import filenium as gen_conector" video_gen = gen_conector.get_video_url( page_url=url , premium=(config.get_setting("fileniumpremium")=="true") , user=config.get_setting("fileniumuser") , password=config.get_setting("fileniumpassword"), video_password=video_password ) logger.info("[xbmctools.py] filenium url="+video_gen) video_urls.append( [ "[filenium]", video_gen ] ) # Obtiene enlaces realdebrid si tienes cuenta if server in REALDEBRID_SERVERS and config.get_setting("realdebridpremium")=="true": # Muestra un diálogo de progreso if muestra_dialogo: progreso.update( 75 , "Conectando con Real-Debrid") exec "from servers import realdebrid as gen_conector" video_gen = gen_conector.get_video_url( page_url=url , premium=(config.get_setting("realdebridpremium")=="true") , user=config.get_setting("realdebriduser") , password=config.get_setting("realdebridpassword"), video_password=video_password ) logger.info("[xbmctools.py] realdebrid url="+video_gen) if not "REAL-DEBRID" in video_gen: video_urls.append( [ "."+video_gen.rsplit('.',1)[1]+" [realdebrid]", video_gen ] ) else: if muestra_dialogo: progreso.close() # Si RealDebrid da error pero tienes un enlace válido, no te dice nada if len(video_urls)==0: return video_urls,False,video_gen if muestra_dialogo: progreso.update( 100 , "Proceso finalizado") # Cierra el diálogo de progreso if muestra_dialogo: progreso.close() # Llegas hasta aquí y no tienes ningún enlace para ver, así que no vas a poder ver el vídeo if len(video_urls)==0: # ¿Cual es el motivo? # 1) No existe -> Ya está controlado # 2) No tienes alguna de las cuentas premium compatibles # Lista de las cuentas que soportan este servidor listapremium = "" if server in FILENIUM_SERVERS: listapremium+="Filenium o " if server in REALDEBRID_SERVERS: listapremium+="Real-Debrid o " if server in PREMIUM_SERVERS: listapremium+=server+" o " listapremium = listapremium[:-3] return video_urls,False,"Para ver un vídeo en "+server+" necesitas<br/>una cuenta en "+listapremium except: if muestra_dialogo: progreso.close() import traceback from pprint import pprint exc_type, exc_value, exc_tb = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_tb) for line in lines: line_splits = line.split("\n") for line_split in line_splits: logger.error(line_split) return video_urls,False,"Se ha producido un error en<br/>el conector con "+server try: progreso.close() except: pass return video_urls,True,""
30050) # "No se puede conectar con el sitio web" platformtools.dialog_ok("plugin", texto) # Grab server response errors elif hasattr(e, 'code'): logger.info( "pelisalacarta.platformcode.launcher codigo de error HTTP : %d" % e.code) # "El sitio web no funciona correctamente (error http %d)" platformtools.dialog_ok( "plugin", config.get_localized_string(30051) % e.code) except: import traceback logger.error("pelisalacarta.platformcode.launcher " + traceback.format_exc()) patron = 'File "' + os.path.join(config.get_runtime_path(), "channels", "").replace("\\", "\\\\") + '([^.]+)\.py"' canal = scrapertools.find_single_match(traceback.format_exc(), patron) try: import xbmc xbmc_version = int( xbmc.getInfoLabel("System.BuildVersion").split(".", 1)[0]) if xbmc_version > 13: log_name = "kodi.log" else: log_name = "xbmc.log" log_message = "Ruta: " + xbmc.translatePath(
def get_episodios(item): logger.info("url=" + item.url) itemlist = [] data = re.sub(r'\n|\r|\t|\s{2}|<!--.*?-->|<i class="icon[^>]+"></i>', "", httptools.downloadpage(item.url).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") logger.debug("data=" + data) patron = '<ul class="buscar-list">(.*?)</ul>' #logger.info("[newpct1.py] patron=" + patron) fichas = scrapertools.get_match(data, patron) #logger.info("[newpct1.py] matches=" + str(len(fichas))) #<li><a href="http://www.newpct1.com/serie/forever/capitulo-101/" title="Serie Forever 1x01"><img src="http://www.newpct1.com/pictures/c/minis/1880_forever.jpg" alt="Serie Forever 1x01"></a> <div class="info"> <a href="http://www.newpct1.com/serie/forever/capitulo-101/" title="Serie Forever 1x01"><h2 style="padding:0;">Serie <strong style="color:red;background:none;">Forever - Temporada 1 </strong> - Temporada<span style="color:red;background:none;">[ 1 ]</span>Capitulo<span style="color:red;background:none;">[ 01 ]</span><span style="color:red;background:none;padding:0px;">Espa�ol Castellano</span> Calidad <span style="color:red;background:none;">[ HDTV ]</span></h2></a> <span>27-10-2014</span> <span>450 MB</span> <span class="color"><ahref="http://www.newpct1.com/serie/forever/capitulo-101/" title="Serie Forever 1x01"> Descargar</a> </div></li> #logger.info("[newpct1.py] get_episodios: " + fichas) patron = '<li[^>]*><a href="([^"]+).*?' #url patron += '<img src="([^"]+)".*?' #thumbnail patron += '<h2 style="padding(.*?)/h2>' #titulo, idioma y calidad matches = re.compile(patron, re.DOTALL).findall(fichas) #logger.info("[newpct1.py] get_episodios matches: " + str(len(matches))) for scrapedurl, scrapedthumbnail, scrapedinfo in matches: try: url = scrapedurl if '</span>' in scrapedinfo: #logger.info("[newpct1.py] get_episodios: scrapedinfo="+scrapedinfo) try: #<h2 style="padding:0;">Serie <strong style="color:red;background:none;">The Big Bang Theory - Temporada 6 </strong> - Temporada<span style="color:red;background:none;">[ 6 ]</span>Capitulo<span style="color:red;background:none;">[ 03 ]</span><span style="color:red;background:none;padding:0px;">Español Castellano</span> Calidad <span style="color:red;background:none;">[ HDTV ]</span></h2> patron = '<span style=".*?">\[\s*(.*?)\]</span>.*?' #temporada patron += '<span style=".*?">\[\s*(.*?)\].*?' #capitulo patron += ';([^/]+)' #idioma info_extra = re.compile(patron, re.DOTALL).findall(scrapedinfo) (temporada, capitulo, idioma) = info_extra[0] except: # <h2 style="padding:0;">Serie <strong style="color:red;background:none;">The Affair Temporada 3 Capitulo 5</strong> - <span style="color:red;background:none;padding:0px;">Español Castellano</span> Calidad <span style="color:red;background:none;">[ HDTV ]</span></h2> patron = '<strong style=".*?">([^<]+).*?' # temporada y capitulo patron += '<span style=".*?">([^<]+)' info_extra = re.compile(patron, re.DOTALL).findall(scrapedinfo) (temporada_capitulo, idioma) = info_extra[0] if re.search(r'(?i)Capitulos', temporada_capitulo): temporada = scrapertools.find_single_match( temporada_capitulo, 'Temp.*?\s*([\d]+)') cap1, cap2 = scrapertools.find_single_match( temporada_capitulo, 'Cap.*?\s*(\d+).*?(\d+)') capitulo = "" else: temporada, capitulo = scrapertools.get_season_and_episode( temporada_capitulo).split('x') #logger.info("[newpct1.py] get_episodios: temporada=" + temporada) #logger.info("[newpct1.py] get_episodios: capitulo=" + capitulo) logger.debug("idioma=" + idioma) if '">' in idioma: idioma = " [" + scrapertools.find_single_match( idioma, '">([^<]+)').strip() + "]" elif ' ' in idioma: idioma = " [" + scrapertools.find_single_match( idioma, ' ([^<]+)').strip() + "]" '''else: idioma=""''' if capitulo: title = item.title + " (" + temporada.strip( ) + "x" + capitulo.strip() + ") " + idioma else: title = item.title + " (Del %sx%s al %sx%s) %s" % ( temporada, cap1, temporada, cap2, idioma) else: #<h2 style="padding:0;">The Big Bang Theory - Temporada 6 [HDTV][Cap.602][Español Castellano]</h2> #<h2 style="padding:0;">The Beast - Temporada 1 [HDTV] [Capítulo 13] [Español]</h2 #<h2 style="padding:0;">The Beast - Temp.1 [DVD-DVB][Cap.103][Spanish]</h2> try: temp, cap = scrapertools.get_season_and_episode( scrapedinfo).split('x') except: #Formatear temporadaXepisodio patron = re.compile('Cap.*?\s*([\d]+)', re.IGNORECASE) info_extra = patron.search(scrapedinfo) if len(str(info_extra.group(1))) >= 3: cap = info_extra.group(1)[-2:] temp = info_extra.group(1)[:-2] else: cap = info_extra.group(1) patron = 'Temp.*?\s*([\d]+)' temp = re.compile( patron, re.IGNORECASE).search(scrapedinfo).group(1) title = item.title + " (" + temp + 'x' + cap + ")" #logger.info("[newpct1.py] get_episodios: fanart= " +item.fanart) itemlist.append( Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=item.thumbnail, show=item.show, fanart=item.fanart)) except: logger.error("ERROR al añadir un episodio") if "pagination" in data: patron = '<ul class="pagination">(.*?)</ul>' paginacion = scrapertools.get_match(data, patron) #logger.info("[newpct1.py] get_episodios: paginacion= " + paginacion) if "Next" in paginacion: url_next_page = scrapertools.get_match( paginacion, '<a href="([^>]+)>Next</a>')[:-1] url_next_page = url_next_page.replace(" ", "%20") #logger.info("[newpct1.py] get_episodios: url_next_page= " + url_next_page) itemlist.append( Item(channel=item.channel, action="get_episodios", title=">> Página siguiente", url=url_next_page)) return itemlist
def check_for_update(overwrite=True): logger.info("Aggiornamento series...") p_dialog = None serie_actualizada = False update_when_finished = False hoy = datetime.date.today() try: if config.get_setting("updatelibrary", "biblioteca") != 0 or overwrite: config.set_setting("updatelibrary_last_check", hoy.strftime('%Y-%m-%d'), "biblioteca") heading = 'Aggiornamento della libreria...' p_dialog = platformtools.dialog_progress_bg('Stefano', heading) p_dialog.update(0, '') show_list = [] for path, folders, files in filetools.walk(library.TVSHOWS_PATH): show_list.extend([filetools.join(path, f) for f in files if f == "tvshow.sod"]) if show_list: t = float(100) / len(show_list) for i, tvshow_file in enumerate(show_list): head_nfo, serie = library.read_nfo(tvshow_file) path = filetools.dirname(tvshow_file) logger.info("serie=" + serie.contentSerieName) p_dialog.update(int(math.ceil((i + 1) * t)), heading, serie.contentSerieName) interval = int(serie.active) # Can be bool type if not serie.active: # Unload if the Serie is not active continue # Update next update_next = serie.update_next if update_next: y, m, d = update_next.split('-') update_next = datetime.date(int(y), int(m), int(d)) else: update_next = hoy update_last = serie.update_last if update_last: y, m, d = update_last.split('-') update_last = datetime.date(int(y), int(m), int(d)) else: update_last = hoy # if the Serie is active ... if overwrite or config.get_setting("updatetvshows_interval", "biblioteca") == 0: # ... force autonomus update serie_actualizada = update(path, p_dialog, i, t, serie, overwrite) elif interval == 1 and update_next <= hoy: # ...weekly update serie_actualizada = update(path, p_dialog, i, t, serie, overwrite) if not serie_actualizada and update_last <= hoy - datetime.timedelta(days=7): # raise the interval interval = 7 update_next = hoy + datetime.timedelta(days=interval) elif interval == 7 and update_next <= hoy: # ...14days update serie_actualizada = update(path, p_dialog, i, t, serie, overwrite) if not serie_actualizada: if update_last <= hoy - datetime.timedelta(days=14): # raise the interval interval = 30 update_next += datetime.timedelta(days=interval) elif interval == 30 and update_next <= hoy: # ...monthly update serie_actualizada = update(path, p_dialog, i, t, serie, overwrite) if not serie_actualizada: update_next += datetime.timedelta(days=interval) if interval != int(serie.active) or update_next.strftime('%Y-%m-%d') != serie.update_next: serie.active = interval serie.update_next = update_next.strftime('%Y-%m-%d') serie.channel = "biblioteca" serie.action = "get_temporadas" filetools.write(tvshow_file, head_nfo + serie.tojson()) if serie_actualizada: if config.get_setting("search_new_content", "biblioteca") == 0: # Update Kodi library: Search contents in the Serie directory if config.is_xbmc(): from platformcode import xbmc_library xbmc_library.update(folder=filetools.basename(path)) else: update_when_finished = True if config.get_setting("search_new_content", "biblioteca") == 1 and update_when_finished: # Update Kodi library: Search contents for every Serie if config.is_xbmc(): from platformcode import xbmc_library xbmc_library.update() p_dialog.close() else: logger.info("Libreria non aggiornata, opzione disattiva nella configurazione di Stefano") except Exception as ex: logger.error("Si è verificato un errore nell'aggiornamento") template = "An exception of type %s occured. Arguments:\n%r" message = template % (type(ex).__name__, ex.args) logger.error(message) if p_dialog: p_dialog.close()
def auto_fork(section, input_category): # auto-detect correct section # config settings cfg = dict(core.CFG[section][input_category]) host = cfg.get('host') port = cfg.get('port') username = cfg.get('username') password = cfg.get('password') apikey = cfg.get('apikey') ssl = int(cfg.get('ssl', 0)) web_root = cfg.get('web_root', '') replace = { 'medusa': 'Medusa', 'medusa-api': 'Medusa-api', 'sickbeard-api': 'SickBeard-api', 'sickgear': 'SickGear', 'sickchill': 'SickChill', 'sickrage': 'SickRage', 'stheno': 'Stheno', } _val = cfg.get('fork', 'auto') f1 = replace.get(_val, _val) try: fork = f1, core.FORKS[f1] except KeyError: fork = 'auto' protocol = 'https://' if ssl else 'http://' detected = False if section == 'NzbDrone': logger.info('Attempting to verify {category} fork'.format( category=input_category)) url = '{protocol}{host}:{port}{root}/api/rootfolder'.format( protocol=protocol, host=host, port=port, root=web_root) headers = {'X-Api-Key': apikey} try: r = requests.get(url, headers=headers, stream=True, verify=False) except requests.ConnectionError: logger.warning( 'Could not connect to {0}:{1} to verify fork!'.format( section, input_category)) if not r.ok: logger.warning('Connection to {section}:{category} failed! ' 'Check your configuration'.format( section=section, category=input_category)) fork = ['default', {}] elif fork == 'auto': params = core.ALL_FORKS rem_params = [] logger.info('Attempting to auto-detect {category} fork'.format( category=input_category)) # define the order to test. Default must be first since the default fork doesn't reject parameters. # then in order of most unique parameters. if apikey: url = '{protocol}{host}:{port}{root}/api/{apikey}/?cmd=help&subject=postprocess'.format( protocol=protocol, host=host, port=port, root=web_root, apikey=apikey) else: url = '{protocol}{host}:{port}{root}/home/postprocess/'.format( protocol=protocol, host=host, port=port, root=web_root) # attempting to auto-detect fork try: s = requests.Session() if not apikey and username and password: login = '******'.format( protocol=protocol, host=host, port=port, root=web_root) login_params = {'username': username, 'password': password} r = s.get(login, verify=False, timeout=(30, 60)) if r.status_code == 401 and r.cookies.get('_xsrf'): login_params['_xsrf'] = r.cookies.get('_xsrf') s.post(login, data=login_params, stream=True, verify=False) r = s.get(url, auth=(username, password), verify=False) except requests.ConnectionError: logger.info( 'Could not connect to {section}:{category} to perform auto-fork detection!' .format(section=section, category=input_category)) r = [] if r and r.ok: if apikey: try: json_data = r.json() except ValueError: logger.error('Failed to get JSON data from response') logger.debug('Response received') raise try: json_data = json_data['data'] except KeyError: logger.error('Failed to get data from JSON') logger.debug('Response received: {}'.format(json_data)) raise else: json_data = json_data.get('data', json_data) optional_parameters = json_data['optionalParameters'].keys() # Find excess parameters excess_parameters = set(params).difference(optional_parameters) logger.debug('Removing excess parameters: {}'.format( sorted(excess_parameters))) rem_params.extend(excess_parameters) else: # Find excess parameters rem_params.extend( param for param in params if 'name="{param}"'.format(param=param) not in r.text) # Remove excess params for param in rem_params: params.pop(param) for fork in sorted(iteritems(core.FORKS), reverse=False): if params == fork[1]: detected = True break if detected: logger.info( '{section}:{category} fork auto-detection successful ...'. format(section=section, category=input_category)) elif rem_params: logger.info( '{section}:{category} fork auto-detection found custom params {params}' .format(section=section, category=input_category, params=params)) fork = ['custom', params] else: logger.info( '{section}:{category} fork auto-detection failed'.format( section=section, category=input_category)) fork = core.FORKS.items()[core.FORKS.keys().index( core.FORK_DEFAULT)] logger.info('{section}:{category} fork set to {fork}'.format( section=section, category=input_category, fork=fork[0])) return fork[0], fork[1]
def conf_tools(item): logger.info() # Activar o desactivar canales if item.extra == "channels_onoff": import channelselector from core import channeltools channel_list = channelselector.filterchannels("allchannelstatus") excluded_channels = [ 'tengourl', 'buscador', 'biblioteca', 'configuracion', 'novedades', 'personal', 'ayuda', 'descargas' ] list_controls = [] try: list_controls.append({ 'id': "all_channels", 'type': "list", 'label': "Todos los canales", 'default': 0, 'enabled': True, 'visible': True, 'lvalues': [ '', 'Activar todos', 'Desactivar todos', 'Establecer estado por defecto' ] }) for channel in channel_list: # Si el canal esta en la lista de exclusiones lo saltamos if channel.channel not in excluded_channels: channel_parameters = channeltools.get_channel_parameters( channel.channel) status_control = "" status = config.get_setting("enabled", channel.channel) # si status no existe es que NO HAY valor en _data.json if status is None: status = channel_parameters["active"] logger.debug("%s | Status (XML): %s" % (channel.channel, status)) if not status: status_control = " [COLOR grey](Desactivado por defecto)[/COLOR]" else: logger.debug("%s | Status: %s" % (channel.channel, status)) control = { 'id': channel.channel, 'type': "bool", 'label': channel_parameters["title"] + status_control, 'default': status, 'enabled': True, 'visible': True } list_controls.append(control) else: continue except: import traceback logger.error("Error: %s" % traceback.format_exc()) else: return platformtools.show_channel_settings( list_controls=list_controls, item=item.clone(channel_list=channel_list), caption="Canales", callback="channel_status", custom_button={"visible": False}) # Comprobacion de archivos channel_data.json elif item.extra == "lib_check_datajson": itemlist = [] import channelselector from core import channeltools channel_list = channelselector.filterchannels("allchannelstatus") # Tener una lista de exclusion no tiene mucho sentido por que se comprueba si # el xml tiene "settings", pero por si acaso se deja excluded_channels = ['tengourl', 'configuracion', 'personal', 'ayuda'] try: import os from core import jsontools for channel in channel_list: list_status = None default_settings = None # Se comprueba si el canal esta en la lista de exclusiones if channel.channel not in excluded_channels: # Se comprueba que tenga "settings", sino se salta jsonchannel = channeltools.get_channel_json( channel.channel) if not jsonchannel.get("settings"): itemlist.append( Item(channel=CHANNELNAME, title=channel.title + " - No tiene ajustes por defecto", action="", folder=False, thumbnail=channel.thumbnail)) continue # logger.info(channel.channel + " SALTADO!") # Se cargan los ajustes del archivo json del canal file_settings = os.path.join( config.get_data_path(), "settings_channels", channel.channel + "_data.json") dict_settings = {} dict_file = {} if filetools.exists(file_settings): # logger.info(channel.channel + " Tiene archivo _data.json") channeljson_exists = True # Obtenemos configuracion guardada de ../settings/channel_data.json try: dict_file = jsontools.load_json( open(file_settings, "rb").read()) if isinstance(dict_file, dict) and 'settings' in dict_file: dict_settings = dict_file['settings'] except EnvironmentError: logger.error("ERROR al leer el archivo: %s" % file_settings) else: # logger.info(channel.channel + " No tiene archivo _data.json") channeljson_exists = False if channeljson_exists == True: try: datajson_size = filetools.getsize(file_settings) except: import traceback logger.error(channel.title + " | Detalle del error: %s" % traceback.format_exc()) else: datajson_size = None # Si el _data.json esta vacio o no existe... if (len(dict_settings) and datajson_size) == 0 or channeljson_exists == False: # Obtenemos controles del archivo ../channels/channel.xml needsfix = True try: # Se cargan los ajustes por defecto list_controls, default_settings = channeltools.get_channel_controls_settings( channel.channel) # logger.info(channel.title + " | Default: %s" % default_settings) except: import traceback logger.error(channel.title + " | Detalle del error: %s" % traceback.format_exc()) # default_settings = {} # Si _data.json necesita ser reparado o no existe... if needsfix == True or channeljson_exists == False: if default_settings is not None: # Creamos el channel_data.json default_settings.update(dict_settings) dict_settings = default_settings dict_file['settings'] = dict_settings # Creamos el archivo ../settings/channel_data.json json_data = jsontools.dump_json(dict_file) try: open(file_settings, "wb").write(json_data) # logger.info(channel.channel + " - Archivo _data.json GUARDADO!") # El channel_data.json se ha creado/modificado list_status = " - [COLOR red] CORREGIDO!![/COLOR]" except EnvironmentError: logger.error( "ERROR al salvar el archivo: %s" % file_settings) else: if default_settings is None: list_status = " - [COLOR red] Imposible cargar los ajustes por defecto![/COLOR]" else: # logger.info(channel.channel + " - NO necesita correccion!") needsfix = False # Si se ha establecido el estado del canal se añade a la lista if needsfix is not None: if needsfix == True: if channeljson_exists == False: list_status = " - Ajustes creados" list_colour = "red" else: list_status = " - No necesita corrección" list_colour = "green" else: # Si "needsfix" es "false" y "datjson_size" es None habra # ocurrido algun error if datajson_size is None: list_status = " - Ha ocurrido algun error" list_colour = "red" else: list_status = " - No necesita corrección" list_colour = "green" if list_status is not None: itemlist.append( Item(channel=CHANNELNAME, title=channel.title + list_status, action="", folder=False, thumbnail=channel.thumbnail, text_color=list_colour)) else: logger.error("Algo va mal con el canal %s" % channel.channel) # Si el canal esta en la lista de exclusiones lo saltamos else: continue except: import traceback logger.error("Error: %s" % traceback.format_exc()) return itemlist
def convert_old_to_v4(): logger.info() path_series_xml = filetools.join(config.get_data_path(), "series.xml") path_series_json = filetools.join(config.get_data_path(), "series.json") series_insertadas = 0 series_fallidas = 0 version = 'v?' # Rename and create Series directory import time new_name = str(time.time()) path_series_old = filetools.join(library.LIBRARY_PATH, "SERIES_OLD_" + new_name) if filetools.rename(library.TVSHOWS_PATH, "SERIES_OLD_" + new_name): if not filetools.mkdir(library.TVSHOWS_PATH): logger.error("ERROR, impossibile creare la directory SERIES") return False else: logger.error("ERROR,impossibile rinominare la directory SERIES") return False path_cine_old = filetools.join(library.LIBRARY_PATH, "CINE_OLD_" + new_name) if filetools.rename(library.MOVIES_PATH, "CINE_OLD_" + new_name): if not filetools.mkdir(library.MOVIES_PATH): logger.error("ERROR, impossibile creare la directory CINE") return False else: logger.error("ERROR, impossibile rinominare la directory CINE") return False # Convert library from v1 to v4 (xml) if filetools.exists(path_series_xml): try: data = filetools.read(path_series_xml) for line in data.splitlines(): try: aux = line.rstrip('\n').split(",") tvshow = aux[0].strip() url = aux[1].strip() channel = aux[2].strip() serie = Item(contentSerieName=tvshow, url=url, channel=channel, action="episodios", title=tvshow, active=True) patron = "^(.+)[\s]\((\d{4})\)$" matches = re.compile(patron, re.DOTALL).findall(serie.contentSerieName) if matches: serie.infoLabels['title'] = matches[0][0] serie.infoLabels['year'] = matches[0][1] else: serie.infoLabels['title'] = tvshow insertados, sobreescritos, fallidos = library.save_library_tvshow(serie, list()) if fallidos == 0: series_insertadas += 1 platformtools.dialog_notification("Serie aggiornata", serie.infoLabels['title']) else: series_fallidas += 1 except: series_fallidas += 1 filetools.rename(path_series_xml, "series.xml.old") version = 'v4' except EnvironmentError: logger.error("ERROR al leer el archivo: %s" % path_series_xml) return False # Convert library from v2 to v4 (json) if filetools.exists(path_series_json): try: data = jsontools.load_json(filetools.read(path_series_json)) for tvshow in data: for channel in data[tvshow]["channels"]: try: serie = Item(contentSerieName=data[tvshow]["channels"][channel]["tvshow"], url=data[tvshow]["channels"][channel]["url"], channel=channel, action="episodios", title=data[tvshow]["name"], active=True) if not tvshow.startswith("t_"): serie.infoLabels["tmdb_id"] = tvshow insertados, sobreescritos, fallidos = library.save_library_tvshow(serie, list()) if fallidos == 0: series_insertadas += 1 platformtools.dialog_notification("Serie aggiornata", serie.infoLabels['title']) else: series_fallidas += 1 except: series_fallidas += 1 filetools.rename(path_series_json, "series.json.old") version = 'v4' except EnvironmentError: logger.error("ERROR al leer el archivo: %s" % path_series_json) return False # Convert library from v2 to v4 if version != 'v4': # Get old Series recursively for raiz, subcarpetas, ficheros in filetools.walk(path_series_old): for f in ficheros: if f == "tvshow.json": try: serie = Item().fromjson(filetools.read(filetools.join(raiz, f))) insertados, sobreescritos, fallidos = library.save_library_tvshow(serie, list()) if fallidos == 0: series_insertadas += 1 platformtools.dialog_notification("Serie aggiornata", serie.infoLabels['title']) else: series_fallidas += 1 except: series_fallidas += 1 movies_insertadas = 0 movies_fallidas = 0 for raiz, subcarpetas, ficheros in filetools.walk(path_cine_old): for f in ficheros: if f.endswith(".strm.json"): try: movie = Item().fromjson(filetools.read(filetools.join(raiz, f))) insertados, sobreescritos, fallidos = library.save_library_movie(movie) if fallidos == 0: movies_insertadas += 1 platformtools.dialog_notification("Film aggiornato", movie.infoLabels['title']) else: movies_fallidas += 1 except: movies_fallidas += 1 config.set_setting("library_version", 'v4') platformtools.dialog_notification("Libreria aggiornata con il nuovo formato", "%s serie convertite e %s serie scaricate. Continuare per" "ottenere le info sugli episodi" % (series_insertadas, series_fallidas), time=12000) # Cleanup library of empty records if config.is_xbmc(): from platformcode import xbmc_library xbmc_library.clean() return True
def addchannel(item): import os import time logger.info() tecleado = platformtools.dialog_input("", "Introduzca la URL") if not tecleado: return logger.info("url=%s" % tecleado) local_folder = config.get_runtime_path() if "canal" in item.title: local_folder = filetools.join(local_folder, 'channels') folder_to_extract = "channels" info_accion = "canal" else: local_folder = filetools.join(local_folder, 'servers') folder_to_extract = "servers" info_accion = "conector" # Detecta si es un enlace a un .py o .xml (pensado sobre todo para enlaces de github) try: extension = tecleado.rsplit(".", 1)[1] except: extension = "" files = [] zip = False if extension == "py" or extension == "xml": filename = tecleado.rsplit("/", 1)[1] localfilename = filetools.join(local_folder, filename) files.append([tecleado, localfilename, filename]) else: import re from core import scrapertools # Comprueba si la url apunta a una carpeta completa (channels o servers) de github if re.search(r'https://github.com/[^\s]+/' + folder_to_extract, tecleado): try: data = scrapertools.downloadpage(tecleado) matches = scrapertools.find_multiple_matches( data, '<td class="content">.*?href="([^"]+)".*?title="([^"]+)"') for url, filename in matches: url = "https://raw.githubusercontent.com" + url.replace( "/blob/", "/") localfilename = filetools.join(local_folder, filename) files.append([url, localfilename, filename]) except: import traceback logger.error("Detalle del error: %s" % traceback.format_exc()) platformtools.dialog_ok( "Error", "La url no es correcta o no está disponible") return else: filename = 'new%s.zip' % info_accion localfilename = filetools.join(config.get_data_path(), filename) files.append([tecleado, localfilename, filename]) zip = True logger.info("localfilename=%s" % localfilename) logger.info("descarga fichero...") try: if len(files) > 1: lista_opciones = ["No", "Sí", "Sí (Sobrescribir todos)"] overwrite_all = False from core import downloadtools for url, localfilename, filename in files: result = downloadtools.downloadfile(url, localfilename, continuar=False, resumir=False) if result == -3: if len(files) == 1: dyesno = platformtools.dialog_yesno( "El archivo ya existe", "Ya existe el %s %s. " "¿Desea sobrescribirlo?" % (info_accion, filename)) else: if not overwrite_all: dyesno = platformtools.dialog_select( "El archivo %s ya existe, ¿desea sobrescribirlo?" % filename, lista_opciones) else: dyesno = 1 # Diálogo cancelado if dyesno == -1: return # Caso de carpeta github, opción sobrescribir todos elif dyesno == 2: overwrite_all = True elif dyesno: hora_folder = "Copia seguridad [%s]" % time.strftime( "%d-%m_%H-%M", time.localtime()) backup = filetools.join(config.get_data_path(), 'backups', hora_folder, folder_to_extract) if not filetools.exists(backup): os.makedirs(backup) import shutil shutil.copy2(localfilename, filetools.join(backup, filename)) downloadtools.downloadfile(url, localfilename, continuar=True, resumir=False) else: if len(files) == 1: return else: continue except: import traceback logger.error("Detalle del error: %s" % traceback.format_exc()) return if zip: try: # Lo descomprime logger.info("descomprime fichero...") from core import ziptools unzipper = ziptools.ziptools() logger.info("destpathname=%s" % local_folder) unzipper.extract(localfilename, local_folder, folder_to_extract, True, True) except: import traceback logger.error("Detalle del error: %s" % traceback.format_exc()) # Borra el zip descargado filetools.remove(localfilename) platformtools.dialog_ok( "Error", "Se ha producido un error extrayendo el archivo") return # Borra el zip descargado logger.info("borra fichero...") filetools.remove(localfilename) logger.info("...fichero borrado") platformtools.dialog_ok( "Éxito", "Actualización/Instalación realizada correctamente")
def add_node(self, _type=NodeTypes.DEFAULT, _id=None, node_options=NodeOptions()): """ Add a node to the session, based on the provided node data. :param core.enumerations.NodeTypes _type: type of node to create :param int _id: id for node, defaults to None for generated id :param core.emulator.emudata.NodeOptions node_options: data to create node with :return: created node """ # retrieve node class for given node type try: node_class = nodeutils.get_node_class(_type) except KeyError: logger.error("invalid node type to create: %s", _type) return None # set node start based on current session state, override and check when rj45 start = self.state > EventTypes.DEFINITION_STATE.value enable_rj45 = getattr(self.options, "enablerj45", "0") == "1" if _type == NodeTypes.RJ45 and not enable_rj45: start = False # determine node id if not _id: while True: _id = self.node_id_gen.next() if _id not in self.objects: break # generate name if not provided name = node_options.name if not name: name = "%s%s" % (node_class.__name__, _id) # create node logger.info("creating node(%s) id(%s) name(%s) start(%s)", node_class.__name__, _id, name, start) node = self.add_object(cls=node_class, objid=_id, name=name, start=start) # set node attributes node.icon = node_options.icon node.canvas = node_options.canvas node.opaque = node_options.opaque # set node position and broadcast it self.set_node_position(node, node_options) # add services to default and physical nodes only if _type in [NodeTypes.DEFAULT, NodeTypes.PHYSICAL]: node.type = node_options.model logger.debug("set node type: %s", node.type) services = "|".join(node_options.services) or None self.services.addservicestonode(node, node.type, services) # boot nodes if created after runtime, LcxNodes, Physical, and RJ45 are all PyCoreNodes is_boot_node = isinstance( node, PyCoreNode) and not nodeutils.is_node(node, NodeTypes.RJ45) if self.state == EventTypes.RUNTIME_STATE.value and is_boot_node: self.write_objects() self.add_remove_control_interface(node=node, remove=False) # TODO: common method to both Physical and LxcNodes, but not the common PyCoreNode node.boot() return node