def temporadas(item): logger.debug("pelisalacarta.channels.mundoflv temporadas") itemlist = [] templist = [] data = scrapertools.cache_page(item.url) realplot = '' patron = "<button class='classnamer' onclick='javascript: mostrarcapitulos.*?blank'>([^<]+)</button>" matches = re.compile(patron,re.DOTALL).findall(data) serieid = scrapertools.find_single_match(data,"<link rel='shortlink' href='http:\/\/mundoflv.com\/\?p=([^']+)' \/>") item.thumbnail = item.thumbvid for scrapedtitle in matches: url = 'http://mundoflv.com/wp-content/themes/wpRafael/includes/capitulos.php?serie='+serieid+'&sr=&temporada=' + scrapedtitle title = 'Temporada '+ scrapertools.decodeHtmlentities(scrapedtitle) thumbnail = item.thumbnail realplot = scrapertools.find_single_match(data, '\/><\/a>([^*]+)<p><\/p>.*') plot = scrapertools.remove_htmltags(realplot) fanart = ''#scrapertools.find_single_match(data,'<img src="([^"]+)"/>.*?</a>') if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"])") itemlist.append( Item(channel=item.channel, action="episodios" , title=title , fulltitle=item.title, url=url, thumbnail=thumbnail, plot=plot, fanart = fanart, extra1=item.extra1, contentSerieName=item.contentSerieName)) if item.extra=='temporadas': for tempitem in itemlist: templist += episodios(tempitem) if config.get_library_support() and len(itemlist) > 0: itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la biblioteca[/COLOR]', url=item.url, action="add_serie_to_library", extra="temporadas", contentSerieName=item.contentSerieName, extra1 = item.extra1)) if item.extra=='temporadas': return templist else: return itemlist
def extractFinalRtmpUrl(url,referer): rtmpUrl = "" html = Mamahdcom.getContentFromUrl(url,"",Mamahdcom.cookie,referer) if 'file:\'' in html: file = Decoder.extract("file:'",'\',',html) rtmp = file[0:file.rfind("/") + 1] playpath = file[file.rfind("/") + 1:] swfUrl = "" secureToken = "SECURET0KEN#yw%.?()@W!" if url.find("hdcast.org") > -1: swfUrl = "http://player.hdcast.org/jws/jwplayer.flash.swf" rtmpUrl = rtmp + " playPath=" + playpath + " swfUrl=" + swfUrl + " pageUrl=" + url + " flashver=WIN/2019,0,0,226 live=true timeout=14 token=" + secureToken logger.debug("built final rtmp link: " + rtmpUrl) elif 'allowtransparency="true" src=' in html: logger.debug("using second way...") secondIframe = Decoder.extract('allowtransparency="true" src=', ' ', html).replace("&","&") logger.debug("found second way url: " + secondIframe+", referer: "+url) headers = { "User-Agent": Downloader.USER_AGENT, "Accept-Language": "en-US,en;q=0.8,es-ES;q=0.5,es;q=0.3", "Upgrade-Insecure-Requests" : "1", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", "Referer": url } html2 = Mamahdcom.getContentFromUrl(url=secondIframe,headers=headers) logger.debug("html2 is: "+html2) if 'file:"' in html2: rtmpUrl = Decoder.extract('file:"', '",', html2) logger.debug("using m3u8 for: "+rtmpUrl) return rtmpUrl
def series(item): logger.info() itemlist = [] # Descarga la página data = httptools.downloadpage(item.url).data patron = '<article.*?</article>' matches = re.compile(patron, re.DOTALL).findall(data) for match in matches: title = scrapertools.find_single_match(match, '<span>([^<]+)</span>') if title == "": title = scrapertools.find_single_match(match, '<a href="[^"]+" class="title link">([^<]+)</a>') url = urlparse.urljoin(item.url, scrapertools.find_single_match(match, '<a href="([^"]+)"')) thumbnail = scrapertools.find_single_match(match, '<div data-src="([^"]+)"') if thumbnail == "": thumbnail = scrapertools.find_single_match(match, '<img src="([^"]+)"') logger.debug("title=[" + title + "], url=[" + url + "]") itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail)) next_page_url = scrapertools.find_single_match(data, '<a href="([^"]+)" class="next">') if next_page_url != "": itemlist.append(Item(channel=item.channel, action="series", title=">> Pagina siguiente", url=urlparse.urljoin(item.url, next_page_url), viewmode="movie", thumbnail="", plot="", folder=True)) return itemlist
def play(item): logger.info("play: {0}".format(item.url)) data = httptools.downloadpage(item.url).data videoURL = scrapertools.find_single_match(data, "location.href='([^']+)") logger.debug("Video URL = {0}".format(videoURL)) itemlist = servertools.find_video_items(data=videoURL) return itemlist
def __init__(self, response): self.timeout = 5 self.domain = urlparse.urlparse(response["url"])[1] self.protocol = urlparse.urlparse(response["url"])[0] self.js_data = {} self.header_data = {} if not "var s,t,o,p,b,r,e,a,k,i,n,g,f" in response["data"] or "chk_jschl" in response["url"]: return try: self.js_data["auth_url"] = re.compile('<form id="challenge-form" action="([^"]+)" method="get">').findall(response["data"])[0] self.js_data["params"] = {} self.js_data["params"]["jschl_vc"] = re.compile('<input type="hidden" name="jschl_vc" value="([^"]+)"/>').findall(response["data"])[0] self.js_data["params"]["pass"] = re.compile('<input type="hidden" name="pass" value="([^"]+)"/>').findall(response["data"])[0] var, self.js_data["value"] = re.compile('var s,t,o,p,b,r,e,a,k,i,n,g,f[^:]+"([^"]+)":([^\n]+)};', re.DOTALL).findall(response["data"])[0] self.js_data["op"] = re.compile(var + "([\+|\-|\*|\/])=([^;]+)", re.MULTILINE).findall(response["data"]) self.js_data["wait"] = int(re.compile("\}, ([\d]+)\);", re.MULTILINE).findall(response["data"])[0]) / 1000 except: logger.debug("Metodo #1 (javascript): NO disponible") self.js_data = {} if "refresh" in response["headers"]: try: self.header_data["wait"] = int(response["headers"]["refresh"].split(";")[0]) self.header_data["auth_url"] = response["headers"]["refresh"].split("=")[1].split("?")[0] self.header_data["params"] = {} self.header_data["params"]["pass"] = response["headers"]["refresh"].split("=")[2] except: logger.debug("Metodo #2 (headers): NO disponible") self.header_data = {}
def play(item): logger.info("{0} - {1} = {2}".format(item.show, item.title, item.url)) if item.url.startswith(HOST): data = httptools.downloadpage(item.url).data ajaxLink = re.findall("loadEnlace\((\d+),(\d+),(\d+),(\d+)\)", data) ajaxData = "" for serie, temp, cap, linkID in ajaxLink: logger.debug("Ajax link request: Sherie = {0} - Temp = {1} - Cap = {2} - Link = {3}".format(serie, temp, cap, linkID)) ajaxData += httptools.downloadpage(HOST + '/ajax/load_enlace.php?serie=' + serie + '&temp=' + temp + '&cap=' + cap + '&id=' + linkID).data if ajaxData: data = ajaxData patron = "onclick='window.open\(\"([^\"]+)\"\);'/>" url = scrapertools.find_single_match(data, patron) else: url = item.url itemlist = servertools.find_video_items(data=url) titulo = scrapertools.find_single_match(item.fulltitle, "^(.*?)\s\[.+?$") if titulo: titulo += " [{language}]".format(language=item.language) for videoitem in itemlist: if titulo: videoitem.title = titulo else: videoitem.title = item.title videoitem.channel = item.channel return itemlist
def search(item, texto): logger.info("[laserietv.py] " + item.url + " search " + texto) itemlist = [] url = "%s/index.php?do=search" % host post = "do=search&subaction=search&search_start=0&full_search=0&result_from=1&story=" + texto logger.debug(post) data = scrapertools.cachePagePost(url, post=post) patron = '<div class="video-item-cover"[^<]+<a href="(.*?)">[^<]+<img src="(.*?)" alt="(.*?)">' matches = re.compile(patron, re.DOTALL).findall(data) scrapertools.printMatches(matches) for scrapedurl, scrapedthumbnail, scrapedtitle in matches: scrapedthumbnail = host + scrapedthumbnail logger.info(scrapedurl + " " + scrapedtitle + scrapedthumbnail) itemlist.append(infoSod( Item(channel=__channel__, action="episodi", title="[COLOR azure]" + scrapedtitle + "[/COLOR]", url=scrapedurl, thumbnail=scrapedthumbnail, fulltitle=scrapedtitle, show=scrapedtitle), tipo='tv')) return itemlist
def update(): #download ZIP file start = time.clock() localfile = ROOT_DIR+"/update.zip" response = urllib2.urlopen(REMOTE_FILE_XML) html = response.read() remote_file = common.parseDOM(html,"file")[0].encode("utf-8") #remote version downloadtools.downloadfile(remote_file, localfile, notStop=False) end = time.clock() logger.info("org.harddevelop.kodi.tv Downloaded in %d seconds " % (end-start+1)) separatorChar = XBMCUtils.getSeparatorChar() #unzip unzipper = ziptools.ziptools() logger.info("org.harddevelop.kodi.tv destpathname=%s" % ROOT_DIR) addons_dir = XBMCUtils.getAddonsDir() current_plugin_dir = XBMCUtils.getPathFixedFrom(XBMCUtils.getAddonInfo('path')) logger.debug("using dir: "+addons_dir+" to extract content") unzipper.extractReplacingMainFolder(localfile,addons_dir,current_plugin_dir) #github issues #unzipper.extract(localfile,ROOT_DIR) #clean downloaded zip file logger.info("org.harddevelop.kodi.tv clean zip file...") os.remove(localfile) logger.info("org.harddevelop.kodi.tv clean done!")
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ): logger.info("[nowdownload.py] get_video_url (page_url='%s')" % page_url) ''' <a href="http://f02.nowdownload.co/dl/91efaa9ec507ef4de023cd62bb9a0fe2/50ab76ac/6711c9c90ebf3_family.guy.s11e02.italian.subbed.hdtv.xvid_gannico.avi" class="btn btn-danger"><i class="icon-white icon-download"></i> Download Now</a> ''' data = scrapertools.cache_page( page_url ) logger.debug("[nowdownload.py] data:" + data) try: url = scrapertools.get_match(data,'<a href="([^"]*)" class="btn btn-danger"><i class="icon-white icon-download"></i> Download Now</a>') except: #$.get("/api/token.php?token=7e1ab09df2775dbea02506e1a2651883"); token = scrapertools.get_match(data,'(/api/token.php\?token=[^"]*)') logger.debug("[nowdownload.py] token:" + token) d= scrapertools.cache_page( "http://www.nowdownload.co"+ token ) url = scrapertools.get_match(data,'expiryText: \'<a class="btn btn-danger" href="([^"]*)') logger.debug("[nowdownload.py] url_1:" + url) data = scrapertools.cache_page("http://www.nowdownload.co" + url ) logger.debug("[nowdownload.py] data:" + data) #<a href="http://f03.nowdownload.co/dl/8ec5470153bb7a2177847ca7e1638389/50ab71b3/f92882f4d33a5_squadra.antimafia_palermo.oggi.4x01.episodio.01.ita.satrip.xvid_upz.avi" class="btn btn-success">Click here to download !</a> url = scrapertools.get_match(data,'<a href="([^"]*)" class="btn btn-success">Click here to download !</a>') logger.debug("[nowdownload.py] url_final:" + url) video_urls = [url] return video_urls
def series(item): logger.info() data = scrapertools.cache_page(item.url) patron = '<div class="post" id="post"[^<]+<center><h1 class="post-title entry-title"[^<]+<a href="([^"]+)">' \ '(.*?)</a>[^<]+</h1></center>[^<]+<div[^<]+</div>[^<]+<div[^<]+<div.+?<img src="([^"]+)"' matches = re.compile(patron, re.DOTALL).findall(data) itemlist = [] for scrapedurl, scrapedtitle, scrapedthumbnail in matches: title = scrapertools.unescape(scrapedtitle) fulltitle = title url = urlparse.urljoin(item.url, scrapedurl) thumbnail = urlparse.urljoin(item.url, scrapedthumbnail) show = title logger.debug("title=[{0}], url=[{1}], thumbnail=[{2}]".format(title, url, thumbnail)) itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, show=show, fulltitle=fulltitle, fanart=thumbnail, folder=True)) patron = '</span><a class="page larger" href="([^"]+)"' matches = re.compile(patron, re.DOTALL).findall(data) for match in matches: if len(matches) > 0: scrapedurl = match scrapedtitle = ">> Pagina Siguiente" itemlist.append(Item(channel=item.channel, action="series", title=scrapedtitle, url=scrapedurl, folder=True, viewmode="movies_with_plot")) return itemlist
def episodiosxtemp(item): logger.info() itemlist =[] data = httptools.downloadpage(item.url).data data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data) patron = 'class=numerando>(.*?)x(.*?)<\/div><div class=episodiotitle><a href=(.*?)>(.*?)<\/a><span class=date>.*?' matches = re.compile(patron,re.DOTALL).findall(data) infoLabels = item.infoLabels for scrapedtemp, scrapedep, scrapedurl, scrapedtitle in matches: url = host+scrapedurl contentEpisodeNumber = scrapedep.strip(' ') temp = scrapedtemp.strip(' ') title = item.contentSerieName+' '+temp+'x'+contentEpisodeNumber+' '+scrapedtitle thumbnail = item.thumbnail plot = item.plot fanart=item.fanart infoLabels['episode']=contentEpisodeNumber logger.debug('Nombre: '+item.contentSerieName) infoLabels = item.infoLabels if item.extra1 == 'library': itemlist.append( Item(channel=item.channel, action="findvideos" , title=title, fulltitle = item.fulltitle, url=url, thumbnail=item.thumbnail, plot=plot, contentSerieName = item.contentSerieName, contentSeasonNumber = item.contentSeasonNumber, infoLabels=infoLabels )) elif temp == item.contentSeasonNumber: itemlist.append( Item(channel=item.channel, action="findvideos" , title=title, fulltitle = item.fulltitle, url=url, thumbnail=item.thumbnail, plot=plot, contentSerieName = item.contentSerieName, contentSeasonNumber = item.contentSeasonNumber, infoLabels=infoLabels )) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb= True) return itemlist
def getAddonsDir(): separatorChar = '/' if XBMCUtils.isWindowsPlatform(): logger.debug("Detected Windows system...") separatorChar = "\\" addons_dir = xbmc.translatePath("special://home"+separatorChar+"addons"+separatorChar) return addons_dir
def search_library_path(): sql = 'SELECT strPath FROM path WHERE strPath LIKE "special://%/plugin.video.streamondemand/library/" AND idParentPath ISNULL' nun_records, records = execute_sql_kodi(sql) if nun_records >= 1: logger.debug(records[0][0]) return records[0][0] return None
def get_tvshow_cast(self, _id, lang=DEFAULT_LANG): """ obtiene el casting de una serie @param _id: codigo de la serie @type _id: str @param lang: codigo idioma para buscar @type lang: str @return: diccionario con los actores @rtype: dict """ logger.info() url = HOST + "/series/%s/actors" % _id DEFAULT_HEADERS["Accept-Language"] = lang logger.debug("url: %s, \nheaders: %s" % (url, DEFAULT_HEADERS)) req = urllib2.Request(url, headers=DEFAULT_HEADERS) response = urllib2.urlopen(req) html = response.read() response.close() dict_html = jsontools.load_json(html) dict_html["cast"] = dict_html.pop("data") self.result.update(dict_html)
def processEpisode(self, section, dirName, inputName=None, status=0, clientAgent="manual", inputCategory=None): if int(status) != 0: logger.warning("FAILED DOWNLOAD DETECTED, nothing to process.", section) return [1, "{0}: Failed to post-process. {1} does not support failed downloads".format(section, section)] cfg = dict(core.CFG[section][inputCategory]) host = cfg["host"] port = cfg["port"] username = cfg["username"] password = cfg["password"] ssl = int(cfg.get("ssl", 0)) web_root = cfg.get("web_root", "") remote_path = int(cfg.get("remote_path"), 0) protocol = "https://" if ssl else "http://" url = "{0}{1}:{2}{3}/post_process".format(protocol, host, port, web_root) if not server_responding(url): logger.error("Server did not respond. Exiting", section) return [1, "{0}: Failed to post-process - {1} did not respond.".format(section, section)] inputName, dirName = convert_to_ascii(inputName, dirName) clean_name, ext = os.path.splitext(inputName) if len(ext) == 4: # we assume this was a standard extension. inputName = clean_name params = {"nzb_folder": remoteDir(dirName) if remote_path else dirName} if inputName is not None: params["nzb_name"] = inputName success = False logger.debug("Opening URL: {0}".format(url), section) try: r = requests.get( url, auth=(username, password), params=params, stream=True, verify=False, timeout=(30, 300) ) except requests.ConnectionError: logger.error("Unable to open URL", section) return [1, "{0}: Failed to post-process - Unable to connect to {1}".format(section, section)] for line in r.iter_lines(): if line: logger.postprocess("{0}".format(line), section) if "Post Processing SUCCESSFUL" in line: success = True if r.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: logger.error("Server returned status {0}".format(r.status_code), section) return [1, "{0}: Failed to post-process - Server returned status {1}".format(section, r.status_code)] if success: logger.postprocess("SUCCESS: This issue has been processed successfully", section) return [0, "{0}: Successfully post-processed {1}".format(section, inputName)] else: logger.warning("The issue does not appear to have successfully processed. Please check your Logs", section) return [ 1, "{0}: Failed to post-process - Returned log from {1} was not as expected.".format(section, section), ]
def update(): #download ZIP file start = time.clock() localfile = ROOT_DIR+"/update.zip" response = urllib2.urlopen(REMOTE_FILE_XML) html = response.read() remote_file = common.parseDOM(html,"file")[0].encode("utf-8") #remote version downloadtools.downloadfile(remote_file, localfile, notStop=False) end = time.clock() logger.info("org.harddevelop.kodi.tv Downloaded in %d seconds " % (end-start+1)) separatorChar = "/" if xbmc.getCondVisibility( "system.platform.windows" ): logger.debug("Detected Windows system...") separatorChar = "\\" #unzip unzipper = ziptools.ziptools() logger.info("org.harddevelop.kodi.tv destpathname=%s" % ROOT_DIR) addons_dir = xbmc.translatePath(ROOT_DIR[:ROOT_DIR.rfind(separatorChar)+1]) current_plugin_dir = xbmc.translatePath(ROOT_DIR[ROOT_DIR.rfind(separatorChar)+1:]) logger.debug("using dir: "+addons_dir+" to extract content") unzipper.extractReplacingMainFolder(localfile,addons_dir,current_plugin_dir) #github issues #unzipper.extract(localfile,ROOT_DIR) #clean downloaded zip file logger.info("org.harddevelop.kodi.tv clean zip file...") os.remove(localfile) logger.info("org.harddevelop.kodi.tv clean done!")
def get_nzoid(inputName): nzoid = None logger.debug("Searching for nzoid from SAbnzbd ...") if "http" in core.SABNZBDHOST: baseURL = "%s:%s/api" % (core.SABNZBDHOST, core.SABNZBDPORT) else: baseURL = "http://%s:%s/api" % (core.SABNZBDHOST, core.SABNZBDPORT) url = baseURL params = {} params['apikey'] = core.SABNZBDAPIKEY params['mode'] = "queue" params['output'] = 'json' try: r = requests.get(url, params=params, verify=False, timeout=(30, 120)) except requests.ConnectionError: logger.error("Unable to open URL") return nzoid # failure try: result = r.json() cleanName = os.path.splitext(os.path.split(inputName)[1])[0] for slot in result['queue']['slots']: if slot['filename'] in [inputName, cleanName]: nzoid = slot['nzo_id'] logger.debug("Found nzoid: %s" % nzoid) break except: logger.warning("Data from SABnzbd could not be parsed") return nzoid
def create_service_files(self, node, service): """ Creates node service files. :param PyCoreNode node: node to reconfigure service for :param CoreService service: service to reconfigure :return: nothing """ logger.info("node(%s) service(%s) creating config files", node.name, service.name) # get values depending on if custom or not config_files = service.configs if not service.custom: config_files = service.get_configs(node) for file_name in config_files: logger.debug("generating service config: %s", file_name) if service.custom: cfg = service.config_data.get(file_name) if cfg is None: cfg = service.generate_config(node, file_name) # cfg may have a file:/// url for copying from a file try: if self.copy_service_file(node, file_name, cfg): continue except IOError: logger.exception("error copying service file: %s", file_name) continue else: cfg = service.generate_config(node, file_name) node.nodefile(file_name, cfg)
def _visit(self, current_service): logger.debug("visiting service(%s): %s", current_service.name, self.path) self.visited.add(current_service.name) self.visiting.add(current_service.name) # dive down for service_name in current_service.dependencies: if service_name not in self.node_services: raise ValueError("required dependency was not included in node services: %s" % service_name) if service_name in self.visiting: raise ValueError("cyclic dependency at service(%s): %s" % (current_service.name, service_name)) if service_name not in self.visited: service = self.node_services[service_name] self._visit(service) # add service when bottom is found logger.debug("adding service to boot path: %s", current_service.name) self.booted.add(current_service.name) self.path.append(current_service) self.visiting.remove(current_service.name) # rise back up for service_name in self.dependents.get(current_service.name, []): if service_name not in self.visited: service = self.node_services[service_name] self._visit(service) return self.path
def rename_script(dirname): rename_file = "" for dir, dirs, files in os.walk(dirname): for file in files: if re.search('(rename\S*\.(sh|bat)$)', file, re.IGNORECASE): rename_file = os.path.join(dir, file) dirname = dir break if rename_file: rename_lines = [line.strip() for line in open(rename_file)] for line in rename_lines: if re.search('^(mv|Move)', line, re.IGNORECASE): cmd = shlex.split(line)[1:] else: continue if len(cmd) == 2 and os.path.isfile(os.path.join(dirname, cmd[0])): orig = os.path.join(dirname, cmd[0]) dest = os.path.join(dirname, cmd[1].split('\\')[-1].split('/')[-1]) if os.path.isfile(dest): continue logger.debug("Renaming file {source} to {destination}".format (source=orig, destination=dest), "EXCEPTION") try: os.rename(orig, dest) except Exception as error: logger.error("Unable to rename file due to: {error}".format(error=error), "EXCEPTION")
def validate_service(self, node, service): """ Run the validation command(s) for a service. :param core.netns.vnode.LxcNode node: node to validate service for :param CoreService service: service to validate :return: service validation status :rtype: int """ logger.info("validating node(%s) service(%s)", node.name, service.name) cmds = service.validate if not service.custom: cmds = service.get_validate(node) status = 0 for cmd in cmds: logger.debug("validating service(%s) using: %s", service.name, cmd) try: node.check_cmd(cmd) except CoreCommandError as e: logger.error("node(%s) service(%s) validate failed", node.name, service.name) logger.error("cmd(%s): %s", e.cmd, e.output) status = -1 break return status
def update_biblio(item): logger.info() # Actualizar las series activas sobreescribiendo import library_service if item.extra == "overwrite_everything": if config.is_xbmc(): seleccion = platformtools.dialog_yesno(config.PLUGIN_NAME, "Avviso: devi attendere.", "Vuoi continuare ?") if seleccion == 1: library_service.check_for_update(overwrite="everything") else: library_service.check_for_update(overwrite="everything") else: library_service.check_for_update(overwrite=True) # Eliminar las carpetas de peliculas que no contengan archivo strm for raiz, subcarpetas, ficheros in filetools.walk(library.MOVIES_PATH): strm = False for f in ficheros: if f.endswith(".strm"): strm = True break if ficheros and not strm: logger.debug("Borrando carpeta de pelicula eliminada: %s" % raiz) filetools.rmdirtree(raiz)
def abort_download(self): logger.debug("abort_download") try: self.download_thread.abort() except: pass self.screen_manager.current = self.screen_manager.previous()
def get_tvshows(from_channel): """ Obtiene las series filtradas de un canal :param from_channel: canal que tiene las series filtradas :type from_channel: str :return: dict con las series :rtype: dict """ logger.info() dict_series = {} name_file = from_channel if not os.path.exists(os.path.join(config.get_data_path(), "settings_channels")): os.mkdir(os.path.join(config.get_data_path(), "settings_channels")) fname = os.path.join(config.get_data_path(), "settings_channels", name_file + "_data.json") data = filetools.read(fname) dict_data = jsontools.load_json(data) check_json_file(data, fname, dict_data) if TAG_TVSHOW_FILTER in dict_data: dict_series = dict_data[TAG_TVSHOW_FILTER] logger.debug("json_series: {0}".format(dict_series)) return dict_series
def check_json_file(data, fname, dict_data): """ Comprueba que si dict_data(conversion del fichero JSON a dict) no es un diccionario, se genere un fichero con data de nombre fname.bk. :param data: contenido del fichero fname :type data: str :param fname: nombre del fichero leido :type fname: str :param dict_data: nombre del diccionario :type dict_data: dict """ logger.info() if not dict_data: logger.error("Error al cargar el json del fichero {0}".format(fname)) if data != "": # se crea un nuevo fichero title = filetools.write("{0}.bk".format(fname), data) if title != "": logger.error("Ha habido un error al guardar el fichero: {0}.bk" .format(fname)) else: logger.debug("Se ha guardado una copia con el nombre: {0}.bk" .format(fname)) else: logger.debug("Está vacío el fichero: {0}".format(fname))
def busqueda(item): logger.info() itemlist = [] # Descarga la página data = httptools.downloadpage(item.url).data from core import jsontools data = jsontools.load_json(data) for entry in data["results"]: title = entry["richSnippet"]["metatags"]["ogTitle"] url = entry["url"] plot = entry["content"] plot = scrapertools.htmlclean(plot) thumbnail = entry["richSnippet"]["metatags"]["ogImage"] title = scrapertools.find_single_match(title,'(.*?) \(.*?\)') year = re.sub(r'.*?\((\d{4})\)','', title) title = year fulltitle = title logger.debug(plot) new_item = item.clone(action="findvideos", title=title, fulltitle=fulltitle, url=url, thumbnail=thumbnail, contentTitle=title, contentType="movie", plot= plot, infoLabels = {'year':year, 'sinopsis':plot}) itemlist.append(new_item) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) actualpage = int(scrapertools.find_single_match(item.url, 'start=(\d+)')) totalresults = int(data["cursor"]["resultCount"]) if actualpage + 20 <= totalresults: url_next = item.url.replace("start="+str(actualpage), "start="+str(actualpage+20)) itemlist.append(Item(channel=item.channel, action="busqueda", title=">> Página Siguiente", url=url_next)) return itemlist
def run(self): logger.debug("DownloadThread.run") self.running = True app = App.get_running_app() self.progressDialog.value = 0 self.progressDialogLabel.text = "decoding link: "+self.url decoded_link = Decoder.decodeLink(self.url) #copy to clipboard app.copy(decoded_link) if decoded_link!='' and decoded_link.find("http")>-1: app.message("Info","Link has been decoded (from "+self.url+"):\n"+decoded_link+" has been decoded and copied to clipboard.\nDownload should be started/resumed.") app.download_screen.ids.label_text_message.text=decoded_link elif self.aborted: app.message("Error","File "+app.target_file+" has not been downloaded, please try again and make sure remote url exists.") separationChar = '/' fileName = '' if self.url.find("/")>-1: fileName = self.url[self.url.rfind("/")+1:] if fileName.find(".")==-1 and decoded_link.find("/"): fileName = decoded_link[decoded_link.rfind("/")+1:] self.downloadfile(decoded_link,self.folder+separationChar+fileName,[],False,True,self.progressDialog,self.progressDialogLabel) self.running = False app.download_screen.ids.loading.opacity=0 if self.aborted: app.message("Info","Download proccess has been stopped for file: \n "+app.target_file)
def par2(dirname): sofar = 0 parfile = '' objects = [] if os.path.exists(dirname): objects = os.listdir(dirname) for item in objects: if item.endswith('.par2'): size = os.path.getsize(os.path.join(dirname, item)) if size > sofar: sofar = size parfile = item if core.PAR2CMD and parfile: pwd = os.getcwd() # Get our Present Working Directory os.chdir(dirname) # set directory to run par on. if platform.system() == 'Windows': bitbucket = open('NUL') else: bitbucket = open('/dev/null') logger.info('Running par2 on file {0}.'.format(parfile), 'PAR2') command = [core.PAR2CMD, 'r', parfile, '*'] cmd = '' for item in command: cmd = '{cmd} {item}'.format(cmd=cmd, item=item) logger.debug('calling command:{0}'.format(cmd), 'PAR2') try: proc = subprocess.Popen(command, stdout=bitbucket, stderr=bitbucket) proc.communicate() result = proc.returncode except Exception: logger.error('par2 file processing for {0} has failed'.format(parfile), 'PAR2') if result == 0: logger.info('par2 file processing succeeded', 'PAR2') os.chdir(pwd) bitbucket.close()
def extractVideoLinksFromYoutube(self, video, params): logger.debug(u"trying website: " + repr(params)) get = params.get result = self.getVideoPageFromYoutube(get) if False: # self.isVideoAgeRestricted(result): #Restricciones de edad, me las paso por el arco logger.debug(u"Age restricted video") if False: # self.pluginsettings.userHasProvidedValidCredentials(): # self.login._httpLogin({"new":"true"}) result = self.getVideoPageFromYoutube(get) else: video[u"apierror"] = "ERROR1" # self.language(30622) if result[u"status"] != 200: # No se puede extraer el video de YOUTUBE logger.debug(u"Couldn't get video page from YouTube") return ({}, video) links = self.scrapeWebPageForVideoLinks(result, video) if len(links) == 0 and not ("hlsvp" in video): logger.debug(u"Couldn't find video url- or stream-map.") if not u"apierror" in video: video[u"apierror"] = "AS" # self.core._findErrors(result) logger.debug(u"Done") return (links, video)
def searchLists(param): url = ( "https://www.googleapis.com/customsearch/v1element?" "key=AIzaSyCVAXiUzRYsML1Pv6RwSG1gunmMikTzQqY" "&rsz=filtered_cse" "&num=20" "&hl=en" "&prettyPrint=false" "&source=gcsc" "&gss=.com" "&sig=8bdfc79787aa2b2b1ac464140255872c" "&cx=013305635491195529773:0ufpuq-fpt0" ) url += "&q=" + param + "&sort=date&googlehost=www.google.com&callback=google.search.Search.apiary846" results = Pastebin.getContentFromUrl(url) x = [] jsonString = Decoder.extract(',"results":', "]});", results) logger.debug(jsonString) for jsonResult in results.split('{"GsearchResultClass"'): element = {} link = Decoder.extract('"url":"', '","', jsonResult) if "pastebin.com" in link and "/raw/" not in link: link = link[: link.rfind("/")] + "/raw/" + link[link.rfind("/") + 1 :] title = Decoder.extract('"title":"', '","titleNoFormatting"', jsonResult) if "http" in link: logger.debug("appending result: " + title + ", url: " + link) element["title"] = title element["link"] = link x.append(element) return x
def get_video_url(page_url, premium=False, user="", password="", video_password=""): logger.info("[nowvideo.py] get_video_url(page_url='%s')" % page_url) video_urls = [] if premium: # Lee la página de login login_url = "http://www.nowvideo.eu/login.php" data = scrapertools.cache_page(login_url) # Hace el login login_url = "http://www.nowvideo.eu/login.php?return=" post = "user="******"&pass="******"®ister=Login" headers = [["User-Agent", USER_AGENT], ["Referer", "http://www.nowvideo.eu/login.php"]] data = scrapertools.cache_page(login_url, post=post, headers=headers) # Descarga la página del vídeo data = scrapertools.cache_page(page_url) logger.debug("data:" + data) # URL a invocar: http://www.nowvideo.eu/api/player.api.php?user=aaa&file=rxnwy9ku2nwx7&pass=bbb&cid=1&cid2=undefined&key=83%2E46%2E246%2E226%2Dc7e707c6e20a730c563e349d2333e788&cid3=undefined # En la página: ''' flashvars.domain="http://www.nowvideo.eu"; flashvars.file="rxnwy9ku2nwx7"; flashvars.filekey="83.46.246.226-c7e707c6e20a730c563e349d2333e788"; flashvars.advURL="0"; flashvars.autoplay="false"; flashvars.cid="1"; flashvars.user="******"; flashvars.key="bbb"; flashvars.type="1"; ''' flashvar_file = scrapertools.find_single_match( data, 'flashvars.file="([^"]+)"') flashvar_filekey = scrapertools.find_single_match( data, 'flashvars.filekey=([^;]+);') flashvar_filekey = scrapertools.find_single_match( data, 'var ' + flashvar_filekey + '="([^"]+)"') flashvar_user = scrapertools.find_single_match( data, 'flashvars.user="******"]+)"') flashvar_key = scrapertools.find_single_match( data, 'flashvars.key="([^"]+)"') flashvar_type = scrapertools.find_single_match( data, 'flashvars.type="([^"]+)"') # http://www.nowvideo.eu/api/player.api.php?user=aaa&file=rxnwy9ku2nwx7&pass=bbb&cid=1&cid2=undefined&key=83%2E46%2E246%2E226%2Dc7e707c6e20a730c563e349d2333e788&cid3=undefined url = "http://www.nowvideo.eu/api/player.api.php?user="******"&file=" + flashvar_file + "&pass="******"&cid=1&cid2=undefined&key=" + flashvar_filekey.replace( ".", "%2E").replace("-", "%2D") + "&cid3=undefined" data = scrapertools.cache_page(url) logger.info("data=" + data) location = scrapertools.find_single_match(data, 'url=([^\&]+)&') location += "?client=FLASH" video_urls.append([ scrapertools.get_filename_from_url(location)[-4:] + " [premium][nowvideo]", location ]) else: data = scrapertools.cache_page(page_url) video_id = scrapertools.find_single_match( data, 'flashvars\.file\s*=\s*"([^"]+)') flashvar_filekey = scrapertools.find_single_match( data, 'flashvars\.file[_]*key\s*=\s*([^;]+)') filekey = scrapertools.find_single_match( data, 'var\s+%s\s*=\s*"([^"]+)' % flashvar_filekey) filekey = filekey.replace(".", "%2E").replace("-", "%2D") # get stream url from api url = 'http://www.nowvideo.sx/api/player.api.php?key=%s&file=%s' % ( filekey, video_id) data = scrapertools.cache_page(url) data = scrapertools.find_single_match(data, 'url=([^&]+)') res = scrapertools.get_header_from_response( url, header_to_get="content-type") if res == "text/html": data = urllib.quote_plus(data).replace(".", "%2E") url = 'http://www.nowvideo.sx/api/player.api.php?cid3=undefined&numOfErrors=1&user=undefined&errorUrl=%s&pass=undefined&errorCode=404&cid=1&cid2=undefined&file=%s&key=%s' % ( data, video_id, filekey) data = scrapertools.cache_page(url) try: data = scrapertools.find_single_match(data, 'url=([^&]+)') except: url = 'http://www.nowvideo.sx/api/player.api.php?key=%s&file=%s' % ( filekey, video_id) data = scrapertools.cache_page(url) data = scrapertools.find_single_match(data, 'url=([^&]+)') media_url = data video_urls.append([ scrapertools.get_filename_from_url(media_url)[-4:] + " [nowvideo]", media_url ]) return video_urls
def transcode_directory(dir_name): if not core.FFMPEG: return 1, dir_name logger.info('Checking for files to be transcoded') final_result = 0 # initialize as successful if core.OUTPUTVIDEOPATH: new_dir = core.OUTPUTVIDEOPATH make_dir(new_dir) name = os.path.splitext(os.path.split(dir_name)[1])[0] new_dir = os.path.join(new_dir, name) make_dir(new_dir) else: new_dir = dir_name if platform.system() == 'Windows': bitbucket = open('NUL') else: bitbucket = open('/dev/null') movie_name = os.path.splitext(os.path.split(dir_name)[1])[0] file_list = core.list_media_files(dir_name, media=True, audio=False, meta=False, archives=False) file_list, rem_list, new_list, success = process_list( file_list, new_dir, bitbucket) if not success: bitbucket.close() return 1, dir_name for file in file_list: if isinstance(file, string_types) and os.path.splitext( file)[1] in core.IGNOREEXTENSIONS: continue command = build_commands(file, new_dir, movie_name, bitbucket) newfile_path = command[-1] # transcoding files may remove the original file, so make sure to extract subtitles first if core.SEXTRACT and isinstance(file, string_types): extract_subs(file, newfile_path, bitbucket) try: # Try to remove the file that we're transcoding to just in case. (ffmpeg will return an error if it already exists for some reason) os.remove(newfile_path) except OSError as e: if e.errno != errno.ENOENT: # Ignore the error if it's just telling us that the file doesn't exist logger.debug( 'Error when removing transcoding target: {0}'.format(e)) except Exception as e: logger.debug( 'Error when removing transcoding target: {0}'.format(e)) logger.info('Transcoding video: {0}'.format(newfile_path)) print_cmd(command) result = 1 # set result to failed in case call fails. try: if isinstance(file, string_types): proc = subprocess.Popen(command, stdout=bitbucket, stderr=bitbucket) else: img, data = next(iteritems(file)) proc = subprocess.Popen(command, stdout=bitbucket, stderr=bitbucket, stdin=subprocess.PIPE) for vob in data['files']: procin = zip_out(vob, img, bitbucket) if procin: shutil.copyfileobj(procin.stdout, proc.stdin) procin.stdout.close() proc.communicate() result = proc.returncode except Exception: logger.error( 'Transcoding of video {0} has failed'.format(newfile_path)) if core.SUBSDIR and result == 0 and isinstance(file, string_types): for sub in get_subs(file): name = os.path.splitext(os.path.split(file)[1])[0] subname = os.path.split(sub)[1] newname = os.path.splitext(os.path.split(newfile_path)[1])[0] newpath = os.path.join(core.SUBSDIR, subname.replace(name, newname)) if not os.path.isfile(newpath): os.rename(sub, newpath) if result == 0: try: shutil.copymode(file, newfile_path) except Exception: pass logger.info( 'Transcoding of video to {0} succeeded'.format(newfile_path)) if os.path.isfile(newfile_path) and (file in new_list or not core.DUPLICATE): try: os.unlink(file) except Exception: pass else: logger.error( 'Transcoding of video to {0} failed with result {1}'.format( newfile_path, result)) # this will be 0 (successful) it all are successful, else will return a positive integer for failure. final_result = final_result + result if final_result == 0 and not core.DUPLICATE: for file in rem_list: try: os.unlink(file) except Exception: pass if not os.listdir( text_type(new_dir) ): # this is an empty directory and we didn't transcode into it. os.rmdir(new_dir) new_dir = dir_name if not core.PROCESSOUTPUT and core.DUPLICATE: # We postprocess the original files to CP/SB new_dir = dir_name bitbucket.close() return final_result, new_dir
def print_cmd(command): cmd = '' for item in command: cmd = '{cmd} {item}'.format(cmd=cmd, item=item) logger.debug('calling command:{0}'.format(cmd))
def log(funzione="", stringa="", canale=__channel__): logger.debug("[" + canale + "].[" + funzione + "] " + stringa)
def sync_trakt_pelisalacarta(path_folder): """ Actualiza los valores de episodios vistos si """ logger.info() # si existe el addon hacemos la busqueda if xbmc.getCondVisibility('System.HasAddon("script.trakt")'): # importamos dependencias paths = [ "special://home/addons/script.module.dateutil/lib/", "special://home/addons/script.module.six/lib/", "special://home/addons/script.module.arrow/lib/", "special://home/addons/script.module.trakt/lib/", "special://home/addons/script.trakt/" ] for path in paths: import sys sys.path.append(xbmc.translatePath(path)) # se obtiene las series vistas try: from resources.lib.traktapi import traktAPI traktapi = traktAPI() except: return shows = traktapi.getShowsWatched({}) shows = shows.items() # obtenemos el id de la serie para comparar import re _id = re.findall("\[(.*?)\]", path_folder, flags=re.DOTALL)[0] logger.debug("el id es %s" % _id) if "tt" in _id: type_id = "imdb" elif "tvdb_" in _id: _id = _id.strip("tvdb_") type_id = "tvdb" elif "tmdb_" in _id: type_id = "tmdb" _id = _id.strip("tmdb_") else: logger.error("No hay _id de la serie") return # obtenemos los valores de la serie de pelisalacarta from core import library tvshow_file = filetools.join(path_folder, "tvshow.nfo") head_nfo, serie = library.read_nfo(tvshow_file) # buscamos en las series de trakt for show in shows: show_aux = show[1].to_dict() try: _id_trakt = show_aux['ids'].get(type_id, None) # logger.debug("ID ES %s" % _id_trakt) if _id_trakt: if _id == _id_trakt: logger.debug("ENCONTRADO!! %s" % show_aux) # creamos el diccionario de trakt para la serie encontrada con el valor que tiene "visto" dict_trakt_show = {} for idx_season, season in enumerate( show_aux['seasons']): for idx_episode, episode in enumerate( show_aux['seasons'][idx_season] ['episodes']): sea_epi = "%sx%s" % ( show_aux['seasons'][idx_season]['number'], str(show_aux['seasons'][idx_season] ['episodes'][idx_episode] ['number']).zfill(2)) dict_trakt_show[sea_epi] = show_aux['seasons'][ idx_season]['episodes'][idx_episode][ 'watched'] logger.debug("dict_trakt_show %s " % dict_trakt_show) # obtenemos las keys que son episodios regex_epi = re.compile('\d+x\d+') keys_episodes = [ key for key in serie.library_playcounts if regex_epi.match(key) ] # obtenemos las keys que son temporadas keys_seasons = [ key for key in serie.library_playcounts if 'season ' in key ] # obtenemos los numeros de las keys temporadas seasons = [ key.strip('season ') for key in keys_seasons ] # marcamos los episodios vistos for k in keys_episodes: serie.library_playcounts[k] = dict_trakt_show.get( k, 0) for season in seasons: episodios_temporada = 0 episodios_vistos_temporada = 0 # obtenemos las keys de los episodios de una determinada temporada keys_season_episodes = [ key for key in keys_episodes if key.startswith("%sx" % season) ] for k in keys_season_episodes: episodios_temporada += 1 if serie.library_playcounts[k] > 0: episodios_vistos_temporada += 1 # se comprueba que si todos los episodios están vistos, se marque la temporada como vista if episodios_temporada == episodios_vistos_temporada: serie.library_playcounts.update( {"season %s" % season: 1}) temporada = 0 temporada_vista = 0 for k in keys_seasons: temporada += 1 if serie.library_playcounts[k] > 0: temporada_vista += 1 # se comprueba que si todas las temporadas están vistas, se marque la serie como vista if temporada == temporada_vista: serie.library_playcounts.update({serie.title: 1}) logger.debug("los valores nuevos %s " % serie.library_playcounts) filetools.write(tvshow_file, head_nfo + serie.tojson()) break else: continue else: logger.error( "no se ha podido obtener el id, trakt tiene: %s" % show_aux['ids']) except: import traceback logger.error(traceback.format_exc())
def add_sources(path): logger.info() from xml.dom import minidom SOURCES_PATH = xbmc.translatePath("special://userdata/sources.xml") if os.path.exists(SOURCES_PATH): xmldoc = minidom.parse(SOURCES_PATH) else: # Crear documento xmldoc = minidom.Document() nodo_sources = xmldoc.createElement("sources") for type in ['programs', 'video', 'music', 'picture', 'files']: nodo_type = xmldoc.createElement(type) element_default = xmldoc.createElement("default") element_default.setAttribute("pathversion", "1") nodo_type.appendChild(element_default) nodo_sources.appendChild(nodo_type) xmldoc.appendChild(nodo_sources) # Buscamos el nodo video nodo_video = xmldoc.childNodes[0].getElementsByTagName("video")[0] # Buscamos el path dentro de los nodos_path incluidos en el nodo_video nodos_paths = nodo_video.getElementsByTagName("path") list_path = [p.firstChild.data for p in nodos_paths] logger.debug(list_path) if path in list_path: logger.debug("La ruta %s ya esta en sources.xml" % path) return logger.debug("La ruta %s NO esta en sources.xml" % path) # Si llegamos aqui es por q el path no esta en sources.xml, asi q lo incluimos nodo_source = xmldoc.createElement("source") # Nodo <name> nodo_name = xmldoc.createElement("name") sep = os.sep if path.startswith("special://") or path.startswith("smb://"): sep = "/" name = path if path.endswith(sep): name = path[:-1] nodo_name.appendChild(xmldoc.createTextNode(name.rsplit(sep)[-1])) nodo_source.appendChild(nodo_name) # Nodo <path> nodo_path = xmldoc.createElement("path") nodo_path.setAttribute("pathversion", "1") nodo_path.appendChild(xmldoc.createTextNode(path)) nodo_source.appendChild(nodo_path) # Nodo <allowsharing> nodo_allowsharing = xmldoc.createElement("allowsharing") nodo_allowsharing.appendChild(xmldoc.createTextNode('true')) nodo_source.appendChild(nodo_allowsharing) # Añadimos <source> a <video> nodo_video.appendChild(nodo_source) # Guardamos los cambios filetools.write( SOURCES_PATH, '\n'.join([ x for x in xmldoc.toprettyxml().encode("utf-8").splitlines() if x.strip() ]))
def execute_sql_kodi(sql): """ Ejecuta la consulta sql contra la base de datos de kodi @param sql: Consulta sql valida @type sql: str @return: Numero de registros modificados o devueltos por la consulta @rtype nun_records: int @return: lista con el resultado de la consulta @rtype records: list of tuples """ logger.info() file_db = "" nun_records = 0 records = None # Buscamos el archivo de la BBDD de videos segun la version de kodi video_db = config.get_platform(True)['video_db'] if video_db: file_db = filetools.join( xbmc.translatePath("special://userdata/Database"), video_db) # metodo alternativo para localizar la BBDD if not file_db or not filetools.exists(file_db): file_db = "" for f in filetools.listdir( xbmc.translatePath("special://userdata/Database")): path_f = filetools.join( xbmc.translatePath("special://userdata/Database"), f) if filetools.isfile(path_f) and f.lower().startswith( 'myvideos') and f.lower().endswith('.db'): file_db = path_f break if file_db: logger.info("Archivo de BD: %s" % file_db) conn = None try: import sqlite3 conn = sqlite3.connect(file_db) cursor = conn.cursor() logger.info("Ejecutando sql: %s" % sql) cursor.execute(sql) conn.commit() records = cursor.fetchall() if sql.lower().startswith("select"): nun_records = len(records) if nun_records == 1 and records[0][0] is None: nun_records = 0 records = [] else: nun_records = conn.total_changes conn.close() logger.info("Consulta ejecutada. Registros: %s" % nun_records) except: logger.error("Error al ejecutar la consulta sql") if conn: conn.close() else: logger.debug("Base de datos no encontrada") return nun_records, records
def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, clientAgent): status = 1 # 1 = failed | 0 = success root = 0 foundFile = 0 uniquePath = 1 if clientAgent != 'manual' and not core.DOWNLOADINFO: logger.debug( 'Adding TORRENT download info for directory %s to database' % (inputDirectory)) myDB = nzbToMediaDB.DBConnection() encoded, inputDirectory1 = CharReplace(inputDirectory) encoded, inputName1 = CharReplace(inputName) controlValueDict = {"input_directory": unicode(inputDirectory1)} newValueDict = { "input_name": unicode(inputName1), "input_hash": unicode(inputHash), "input_id": unicode(inputID), "client_agent": unicode(clientAgent), "status": 0, "last_update": datetime.date.today().toordinal() } myDB.upsert("downloads", newValueDict, controlValueDict) logger.debug("Received Directory: %s | Name: %s | Category: %s" % (inputDirectory, inputName, inputCategory)) inputDirectory, inputName, inputCategory, root = core.category_search( inputDirectory, inputName, inputCategory, root, core.CATEGORIES) # Confirm the category by parsing directory structure if inputCategory == "": inputCategory = "UNCAT" usercat = inputCategory try: inputName = inputName.encode(core.SYS_ENCODING) except: pass try: inputDirectory = inputDirectory.encode(core.SYS_ENCODING) except: pass logger.debug("Determined Directory: %s | Name: %s | Category: %s" % (inputDirectory, inputName, inputCategory)) # auto-detect section section = core.CFG.findsection(inputCategory).isenabled() if section is None: section = core.CFG.findsection("ALL").isenabled() if section is None: logger.error( 'Category:[%s] is not defined or is not enabled. Please rename it or ensure it is enabled for the appropriate section in your autoProcessMedia.cfg and try again.' % (inputCategory)) return [-1, ""] else: usercat = "ALL" if len(section) > 1: logger.error( 'Category:[%s] is not unique, %s are using it. Please rename it or disable all other sections using the same category name in your autoProcessMedia.cfg and try again.' % (usercat, section.keys())) return [-1, ""] if section: sectionName = section.keys()[0] logger.info('Auto-detected SECTION:%s' % (sectionName)) else: logger.error( "Unable to locate a section with subsection:%s enabled in your autoProcessMedia.cfg, exiting!" % (inputCategory)) return [-1, ""] try: Torrent_NoLink = int(section[usercat]["Torrent_NoLink"]) except: Torrent_NoLink = 0 try: extract = int(section[usercat]['extract']) except: extract = 0 try: uniquePath = int(section[usercat]["unique_path"]) except: uniquePath = 1 if clientAgent != 'manual': core.pause_torrent(clientAgent, inputHash, inputID, inputName) if uniquePath: outputDestination = os.path.normpath( core.os.path.join(core.OUTPUTDIRECTORY, inputCategory, core.sanitizeName(inputName))) else: outputDestination = os.path.normpath( core.os.path.join(core.OUTPUTDIRECTORY, inputCategory)) try: outputDestination = outputDestination.encode(core.SYS_ENCODING) except: pass logger.info("Output directory set to: %s" % (outputDestination)) if core.SAFE_MODE and outputDestination == core.TORRENT_DEFAULTDIR: logger.error( 'The output directory:[%s] is the Download Directory. Edit outputDirectory in autoProcessMedia.cfg. Exiting' % (inputDirectory)) return [-1, ""] logger.debug("Scanning files in directory: %s" % (inputDirectory)) if sectionName == 'HeadPhones': core.NOFLATTEN.extend( inputCategory ) # Make sure we preserve folder structure for HeadPhones. now = datetime.datetime.now() inputFiles = core.listMediaFiles(inputDirectory) logger.debug("Found %s files in %s" % (str(len(inputFiles)), inputDirectory)) for inputFile in inputFiles: filePath = os.path.dirname(inputFile) fileName, fileExt = os.path.splitext(os.path.basename(inputFile)) fullFileName = os.path.basename(inputFile) targetFile = core.os.path.join(outputDestination, fullFileName) if inputCategory in core.NOFLATTEN: if not os.path.basename(filePath) in outputDestination: targetFile = core.os.path.join( core.os.path.join(outputDestination, os.path.basename(filePath)), fullFileName) logger.debug( "Setting outputDestination to %s to preserve folder structure" % (os.path.dirname(targetFile))) try: targetFile = targetFile.encode(core.SYS_ENCODING) except: pass if root == 1: if not foundFile: logger.debug("Looking for %s in: %s" % (inputName, inputFile)) if (core.sanitizeName(inputName) in core.sanitizeName(inputFile) ) or (core.sanitizeName(fileName) in core.sanitizeName(inputName)): foundFile = True logger.debug("Found file %s that matches Torrent Name %s" % (fullFileName, inputName)) else: continue if root == 2: mtime_lapse = now - datetime.datetime.fromtimestamp( os.path.getmtime(inputFile)) ctime_lapse = now - datetime.datetime.fromtimestamp( os.path.getctime(inputFile)) if not foundFile: logger.debug( "Looking for files with modified/created dates less than 5 minutes old." ) if (mtime_lapse < datetime.timedelta(minutes=5)) or ( ctime_lapse < datetime.timedelta(minutes=5)): foundFile = True logger.debug( "Found file %s with date modifed/created less than 5 minutes ago." % (fullFileName)) else: continue # This file has not been recently moved or created, skip it if Torrent_NoLink == 0: try: core.copy_link(inputFile, targetFile, core.USELINK) core.rmReadOnly(targetFile) except: logger.error("Failed to link: %s to %s" % (inputFile, targetFile)) inputName, outputDestination = convert_to_ascii(inputName, outputDestination) if extract == 1: logger.debug('Checking for archives to extract in directory: %s' % (outputDestination)) core.extractFiles(outputDestination) if not inputCategory in core.NOFLATTEN: #don't flatten hp in case multi cd albums, and we need to copy this back later. core.flatten(outputDestination) # Now check if video files exist in destination: if sectionName in ["SickBeard", "NzbDrone", "CouchPotato"]: numVideos = len( core.listMediaFiles(outputDestination, media=True, audio=False, meta=False, archives=False)) if numVideos > 0: logger.info("Found %s media files in %s" % (numVideos, outputDestination)) status = 0 elif extract != 1: logger.info( "Found no media files in %s. Sending to %s to process" % (outputDestination, sectionName)) status = 0 else: logger.warning("Found no media files in %s" % outputDestination) # Only these sections can handling failed downloads so make sure everything else gets through without the check for failed if not sectionName in ['CouchPotato', 'SickBeard', 'NzbDrone']: status = 0 logger.info("Calling %s:%s to post-process:%s" % (sectionName, usercat, inputName)) result = [0, ""] if sectionName == 'UserScript': result = external_script(outputDestination, inputName, inputCategory, section[usercat]) elif sectionName == 'CouchPotato': result = core.autoProcessMovie().process(sectionName, outputDestination, inputName, status, clientAgent, inputHash, inputCategory) elif sectionName in ['SickBeard', 'NzbDrone']: if inputHash: inputHash = inputHash.upper() result = core.autoProcessTV().processEpisode(sectionName, outputDestination, inputName, status, clientAgent, inputHash, inputCategory) elif sectionName == 'HeadPhones': result = core.autoProcessMusic().process(sectionName, outputDestination, inputName, status, clientAgent, inputCategory) elif sectionName == 'Mylar': result = core.autoProcessComics().processEpisode( sectionName, outputDestination, inputName, status, clientAgent, inputCategory) elif sectionName == 'Gamez': result = core.autoProcessGames().process(sectionName, outputDestination, inputName, status, clientAgent, inputCategory) plex_update(inputCategory) if result[0] != 0: if clientAgent != 'manual': logger.error( "A problem was reported in the autoProcess* script. If torrent was paused we will resume seeding" ) core.resume_torrent(clientAgent, inputHash, inputID, inputName) else: if clientAgent != 'manual': # update download status in our DB core.update_downloadInfoStatus(inputName, 1) # remove torrent core.remove_torrent(clientAgent, inputHash, inputID, inputName) if not sectionName == 'UserScript': # for user script, we assume this is cleaned by the script or option USER_SCRIPT_CLEAN # cleanup our processing folders of any misc unwanted files and empty directories core.cleanDir(outputDestination, sectionName, inputCategory) return result
def category_search(inputDirectory, inputName, inputCategory, root, categories): tordir = False try: inputName = inputName.encode(core.SYS_ENCODING) except: pass try: inputDirectory = inputDirectory.encode(core.SYS_ENCODING) except: pass if inputDirectory is None: # =Nothing to process here. return inputDirectory, inputName, inputCategory, root pathlist = os.path.normpath(inputDirectory).split(os.sep) if inputCategory and inputCategory in pathlist: logger.debug("SEARCH: Found the Category: %s in directory structure" % (inputCategory)) elif inputCategory: logger.debug("SEARCH: Could not find the category: %s in the directory structure" % (inputCategory)) else: try: inputCategory = list(set(pathlist) & set(categories))[-1] # assume last match is most relevant category. logger.debug("SEARCH: Found Category: %s in directory structure" % (inputCategory)) except IndexError: inputCategory = "" logger.debug("SEARCH: Could not find a category in the directory structure") if not os.path.isdir(inputDirectory) and os.path.isfile(inputDirectory): # If the input directory is a file if not inputName: inputName = os.path.split(os.path.normpath(inputDirectory))[1] return inputDirectory, inputName, inputCategory, root if inputCategory and os.path.isdir(os.path.join(inputDirectory, inputCategory)): logger.info( "SEARCH: Found category directory %s in input directory directory %s" % (inputCategory, inputDirectory)) inputDirectory = os.path.join(inputDirectory, inputCategory) logger.info("SEARCH: Setting inputDirectory to %s" % (inputDirectory)) if inputName and os.path.isdir(os.path.join(inputDirectory, inputName)): logger.info("SEARCH: Found torrent directory %s in input directory directory %s" % (inputName, inputDirectory)) inputDirectory = os.path.join(inputDirectory, inputName) logger.info("SEARCH: Setting inputDirectory to %s" % (inputDirectory)) tordir = True elif inputName and os.path.isdir(os.path.join(inputDirectory, sanitizeName(inputName))): logger.info("SEARCH: Found torrent directory %s in input directory directory %s" % ( sanitizeName(inputName), inputDirectory)) inputDirectory = os.path.join(inputDirectory, sanitizeName(inputName)) logger.info("SEARCH: Setting inputDirectory to %s" % (inputDirectory)) tordir = True elif inputName and os.path.isfile(os.path.join(inputDirectory, inputName)): logger.info("SEARCH: Found torrent file %s in input directory directory %s" % (inputName, inputDirectory)) inputDirectory = os.path.join(inputDirectory, inputName) logger.info("SEARCH: Setting inputDirectory to %s" % (inputDirectory)) tordir = True elif inputName and os.path.isfile(os.path.join(inputDirectory, sanitizeName(inputName))): logger.info("SEARCH: Found torrent file %s in input directory directory %s" % ( sanitizeName(inputName), inputDirectory)) inputDirectory = os.path.join(inputDirectory, sanitizeName(inputName)) logger.info("SEARCH: Setting inputDirectory to %s" % (inputDirectory)) tordir = True imdbid = [item for item in pathlist if '.cp(tt' in item] # This looks for the .cp(tt imdb id in the path. if imdbid and not '.cp(tt' in inputName: inputName = imdbid[0] # This ensures the imdb id is preserved and passed to CP tordir = True if inputCategory and not tordir: try: index = pathlist.index(inputCategory) if index + 1 < len(pathlist): tordir = True logger.info("SEARCH: Found a unique directory %s in the category directory" % (pathlist[index + 1])) if not inputName: inputName = pathlist[index + 1] except ValueError: pass if inputName and not tordir: if inputName in pathlist or sanitizeName(inputName) in pathlist: logger.info("SEARCH: Found torrent directory %s in the directory structure" % (inputName)) tordir = True else: root = 1 if not tordir: root = 2 if root > 0: logger.info("SEARCH: Could not find a unique directory for this download. Assume a common directory.") logger.info("SEARCH: We will try and determine which files to process, individually") return inputDirectory, inputName, inputCategory, root
def main(args): # Initialize the config core.initialize() # clientAgent for Torrents clientAgent = core.TORRENT_CLIENTAGENT logger.info("#########################################################") logger.info("## ..::[%s]::.. ##" % os.path.basename(__file__)) logger.info("#########################################################") # debug command line options logger.debug("Options passed into TorrentToMedia: %s" % (args)) # Post-Processing Result result = [0, ""] try: inputDirectory, inputName, inputCategory, inputHash, inputID = core.parse_args( clientAgent, args) except: logger.error("There was a problem loading variables") return -1 if inputDirectory and inputName and inputHash and inputID: result = processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, clientAgent) else: # Perform Manual Post-Processing logger.warning( "Invalid number of arguments received from client, Switching to manual run mode ..." ) for section, subsections in core.SECTIONS.items(): for subsection in subsections: if not core.CFG[section][subsection].isenabled(): continue for dirName in core.getDirs(section, subsection, link='hard'): logger.info("Starting manual run for %s:%s - Folder:%s" % (section, subsection, dirName)) logger.info( "Checking database for download info for %s ..." % (os.path.basename(dirName))) core.DOWNLOADINFO = core.get_downloadInfo( os.path.basename(dirName), 0) if core.DOWNLOADINFO: logger.info( "Found download info for %s, setting variables now ..." % (os.path.basename(dirName))) else: logger.info( 'Unable to locate download info for %s, continuing to try and process this release ...' % (os.path.basename(dirName))) try: clientAgent = str(core.DOWNLOADINFO[0]['client_agent']) except: clientAgent = 'manual' try: inputHash = str(core.DOWNLOADINFO[0]['input_hash']) except: inputHash = None try: inputID = str(core.DOWNLOADINFO[0]['input_id']) except: inputID = None if clientAgent.lower( ) not in core.TORRENT_CLIENTS and clientAgent != 'manual': continue try: dirName = dirName.encode(core.SYS_ENCODING) except: pass inputName = os.path.basename(dirName) try: inputName = inputName.encode(core.SYS_ENCODING) except: pass results = processTorrent(dirName, inputName, subsection, inputHash, inputID, clientAgent) if results[0] != 0: logger.error( "A problem was reported when trying to perform a manual run for %s:%s." % (section, subsection)) result = results if result[0] == 0: logger.info("The %s script completed successfully." % (args[0])) else: logger.error("A problem was reported in the %s script." % (args[0])) del core.MYAPP return result[0]
def episodios(item): logger.info("pelisalacarta.mejortorrent episodios") itemlist = [] # Descarga la página data = scrapertools.cachePage(item.url) total_capis = scrapertools.get_match( data, "<input type='hidden' name='total_capis' value='(\d+)'>") tabla = scrapertools.get_match( data, "<input type='hidden' name='tabla' value='([^']+)'>") titulo = scrapertools.get_match( data, "<input type='hidden' name='titulo' value='([^']+)'>") item.thumbnail = scrapertools.find_single_match( data, "src='http://www\.mejortorrent\.com(/uploads/imagenes/" + tabla + "/[a-zA-Z0-9_ ]+.jpg)'") item.thumbnail = 'http://www.mejortorrent.com' + urllib.quote( item.thumbnail) #<form name='episodios' action='secciones.php?sec=descargas&ap=contar_varios' method='post'> data = scrapertools.get_match( data, "<form name='episodios' action='secciones.php\?sec=descargas\&ap=contar_varios' method='post'>(.*?)</form>" ) ''' <td bgcolor='#C8DAC8' style='border-bottom:1px solid black;'><a href='/serie-episodio-descargar-torrent-18741-Juego-de-tronos-4x01.html'>4x01 - Episodio en V.O. Sub Esp.</a></td> <td width='120' bgcolor='#C8DAC8' align='right' style='border-right:1px solid black; border-bottom:1px solid black;'><div style='color:#666666; font-size:9px; margin-right:5px;'>Fecha: 2014-04-07</div></td> <td width='60' bgcolor='#F1F1F1' align='center' style='border-bottom:1px solid black;'> <input type='checkbox' name='episodios[1]' value='18741'> ''' if item.extra == "series": patron = "<td bgcolor[^>]+><a[^>]+>([^>]+)</a></td>[^<]+" else: patron = "<td bgcolor[^>]+>([^>]+)</td>[^<]+" patron += "<td[^<]+<div[^>]+>Fecha: ([^<]+)</div></td>[^<]+" patron += "<td[^<]+" patron += "<input type='checkbox' name='([^']+)' value='([^']+)'" matches = re.compile(patron, re.DOTALL).findall(data) scrapertools.printMatches(matches) tmdb_title = re.sub( r'(\s*-\s*)?\d+.*?\s*Temporada|(\s*-\s*)?\s*Miniserie\.?|\(.*\)|\[.*\]', '', item.title).strip() logger.debug('pelisalacarta.mejortorrent episodios tmdb_title=' + tmdb_title) if item.extra == "series": oTmdb = Tmdb(texto_buscado=tmdb_title.strip(), tipo='tv', idioma_busqueda="es") else: oTmdb = Tmdb(texto_buscado=tmdb_title.strip(), idioma_busqueda="es") for scrapedtitle, fecha, name, value in matches: scrapedtitle = scrapedtitle.strip() if scrapedtitle.endswith('.'): scrapedtitle = scrapedtitle[:-1] title = scrapedtitle + " (" + fecha + ")" url = "http://www.mejortorrent.com/secciones.php?sec=descargas&ap=contar_varios" #"episodios%5B1%5D=11744&total_capis=5&tabla=series&titulo=Sea+Patrol+-+2%AA+Temporada" post = urllib.urlencode({ name: value, "total_capis": total_capis, "tabla": tabla, "titulo": titulo }) logger.debug("post=" + post) if item.extra == "series": epi = scrapedtitle.split("x") # Sólo comprobar Tmdb si el formato es temporadaXcapitulo if len(epi) > 1: temporada = re.sub("\D", "", epi[0]) capitulo = re.sub("\D", "", epi[1]) epi_data = oTmdb.get_episodio(temporada, capitulo) logger.debug("epi_data=" + str(epi_data)) if epi_data: item.thumbnail = epi_data["temporada_poster"] item.fanart = epi_data["episodio_imagen"] item.plot = epi_data["episodio_sinopsis"] epi_title = epi_data["episodio_titulo"] if epi_title != "": title = scrapedtitle + " " + epi_title + " (" + fecha + ")" else: try: item.fanart = oTmdb.get_backdrop() except: pass item.plot = oTmdb.get_sinopsis() logger.debug("title=[" + title + "], url=[" + url + "], item=[" + str(item) + "]") itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=item.thumbnail, plot=item.plot, fanart=item.fanart, extra=post, folder=False)) return itemlist
def find_imdbid(dirName, inputName): imdbid = None logger.info('Attemping imdbID lookup for %s' % (inputName)) # find imdbid in dirName logger.info('Searching folder and file names for imdbID ...') m = re.search('(tt\d{7})', dirName+inputName) if m: imdbid = m.group(1) logger.info("Found imdbID [%s]" % imdbid) return imdbid if os.path.isdir(dirName): for file in os.listdir(dirName): m = re.search('(tt\d{7})', file) if m: imdbid = m.group(1) logger.info("Found imdbID [%s] via file name" % imdbid) return imdbid if os.environ.has_key('NZBPR__DNZB_MOREINFO'): dnzb_more_info=os.environ.get('NZBPR__DNZB_MOREINFO', '') if dnzb_more_info != '': regex = re.compile(r'^http://www.imdb.com/title/(tt[0-9]+)/$', re.IGNORECASE) m = regex.match(dnzb_more_info) if m: imdbid = m.group(1) logger.info("Found imdbID [%s] from DNZB-MoreInfo" % imdbid) return imdbid logger.info('Searching IMDB for imdbID ...') guess = guessit.guess_movie_info(inputName) if guess: # Movie Title title = None if 'title' in guess: title = guess['title'] # Movie Year year = None if 'year' in guess: year = guess['year'] url = "http://www.omdbapi.com" logger.debug("Opening URL: %s" % url) try: r = requests.get(url, params={'y': year, 't': title}, verify=False, timeout=(60, 300)) except requests.ConnectionError: logger.error("Unable to open URL %s" % url) return results = r.json() try: imdbid = results['imdbID'] except: pass if imdbid: logger.info("Found imdbID [%s]" % imdbid) return imdbid logger.warning('Unable to find a imdbID for %s' % (inputName)) return imdbid
def buscador(item): logger.info("pelisalacarta.mejortorrent buscador") itemlist = [] data = scrapertools.cachePage(item.url) # pelis # <a href="/peli-descargar-torrent-9578-Presentimientos.html"> # <img src="/uploads/imagenes/peliculas/Presentimientos.jpg" border="1"></a # # series # #<a href="/serie-descargar-torrents-11589-11590-Ahora-o-nunca-4-Temporada.html"> #<img src="/uploads/imagenes/series/Ahora o nunca4.jpg" border="1"></a> # # docs # #<a href="/doc-descargar-torrent-1406-1407-El-sueno-de-todos.html"> #<img border="1" src="/uploads/imagenes/documentales/El sueno de todos.jpg"></a> #busca series patron = "<a href='(/serie-descargar-torrent[^']+)'[^>]+>(.*?)</a>" patron += ".*?<span style='color:gray;'>([^']+)</span>" patron_enlace = "/serie-descargar-torrents-\d+-\d+-(.*?)\.html" matches = scrapertools.find_multiple_matches(data, patron) scrapertools.printMatches(matches) for scrapedurl, scrapedtitle, scrapedinfo in matches: title = scrapertools.remove_htmltags(scrapedtitle).decode( 'iso-8859-1').encode('utf8') + ' ' + scrapedinfo.decode( 'iso-8859-1').encode('utf8') url = urlparse.urljoin(item.url, scrapedurl) logger.debug("title=[" + title + "], url=[" + url + "]") itemlist.append( Item(channel=item.channel, action="episodios", title=title, url=url, folder=True, extra="series", viewmode="movie_with_plot")) #busca pelis patron = "<a href='(/peli-descargar-torrent-[^']+)'[^>]+>(.*?)</a>" patron_enlace = "/peli-descargar-torrent-\d+(.*?)\.html" matches = re.compile(patron, re.DOTALL).findall(data) scrapertools.printMatches(matches) for scrapedurl, scrapedtitle in matches: title = scrapertools.remove_htmltags(scrapedtitle).decode( 'iso-8859-1').encode('utf-8') url = urlparse.urljoin(item.url, scrapedurl) logger.debug("title=[" + title + "], url=[" + url + "]") itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, folder=False, extra="")) #busca docu patron = "<a href='(/doc-descargar-torrent[^']+)' .*?" patron += "<font Color='darkblue'>(.*?)</font>.*?" patron += "<td align='right' width='20%'>(.*?)</td>" patron_enlace = "/doc-descargar-torrent-\d+-\d+-(.*?)\.html" matches = re.compile(patron, re.DOTALL).findall(data) scrapertools.printMatches(matches) for scrapedurl, scrapedtitle, scrapedinfo in matches: title = scrapedtitle.decode('iso-8859-1').encode( 'utf8') + " " + scrapedinfo.decode('iso-8859-1').encode('utf8') url = urlparse.urljoin(item.url, scrapedurl) logger.debug("title=[" + title + "], url=[" + url + "]") itemlist.append( Item(channel=item.channel, action="episodios", title=title, url=url, folder=True, extra="docu", viewmode="movie_with_plot")) if len(itemlist) == 0: itemlist.append( Item(channel=item.channel, action="mainlist", title="No se han encontrado nada con ese término")) return itemlist
def processDir(path): folders = [] logger.info("Searching %s for mediafiles to post-process ..." % (path)) sync = [ o for o in os.listdir(path) if os.path.splitext(o)[1] in ['.!sync','.bts'] ] # search for single files and move them into their own folder for post-processing for mediafile in [ os.path.join(path, o) for o in os.listdir(path) if os.path.isfile(os.path.join(path, o)) ]: if len(sync) > 0: break if os.path.split(mediafile)[1] in ['Thumbs.db', 'thumbs.db']: continue try: logger.debug("Found file %s in root directory %s." % (os.path.split(mediafile)[1], path)) newPath = None fileExt = os.path.splitext(mediafile)[1] try: if fileExt in core.AUDIOCONTAINER: f = beets.mediafile.MediaFile(mediafile) # get artist and album info artist = f.artist album = f.album # create new path newPath = os.path.join(path, "%s - %s" % (sanitizeName(artist), sanitizeName(album))) elif fileExt in core.MEDIACONTAINER: f = guessit.guess_video_info(mediafile) # get title title = None try: title = f['series'] except: title = f['title'] if not title: title = os.path.splitext(os.path.basename(mediafile))[0] newPath = os.path.join(path, sanitizeName(title)) except Exception as e: logger.error("Exception parsing name for media file: %s: %s" % (os.path.split(mediafile)[1], e)) if not newPath: title = os.path.splitext(os.path.basename(mediafile))[0] newPath = os.path.join(path, sanitizeName(title)) try: newPath = newPath.encode(core.SYS_ENCODING) except: pass # Just fail-safe incase we already have afile with this clean-name (was actually a bug from earlier code, but let's be safe). if os.path.isfile(newPath): newPath2 = os.path.join(os.path.join(os.path.split(newPath)[0], 'new'), os.path.split(newPath)[1]) newPath = newPath2 # create new path if it does not exist if not os.path.exists(newPath): makeDir(newPath) newfile = os.path.join(newPath, sanitizeName(os.path.split(mediafile)[1])) try: newfile = newfile.encode(core.SYS_ENCODING) except: pass # link file to its new path copy_link(mediafile, newfile, link) except Exception as e: logger.error("Failed to move %s to its own directory: %s" % (os.path.split(mediafile)[1], e)) #removeEmptyFolders(path, removeRoot=False) if os.listdir(path): for dir in [os.path.join(path, o) for o in os.listdir(path) if os.path.isdir(os.path.join(path, o))]: sync = [ o for o in os.listdir(dir) if os.path.splitext(o)[1] in ['.!sync','.bts'] ] if len(sync) > 0 or len(os.listdir(dir)) == 0: continue folders.extend([dir]) return folders
def process(inputDirectory, inputName=None, status=0, clientAgent='manual', download_id=None, inputCategory=None, failureLink=None): if core.SAFE_MODE and inputDirectory == core.NZB_DEFAULTDIR: logger.error( 'The input directory:[%s] is the Default Download Directory. Please configure category directories to prevent processing of other media.' % ( inputDirectory)) return [-1, ""] if not download_id and clientAgent == 'sabnzbd': download_id = get_nzoid(inputName) if clientAgent != 'manual' and not core.DOWNLOADINFO: logger.debug('Adding NZB download info for directory %s to database' % (inputDirectory)) myDB = nzbToMediaDB.DBConnection() encoded, inputDirectory1 = CharReplace(inputDirectory) encoded, inputName1 = CharReplace(inputName) controlValueDict = {"input_directory": unicode(inputDirectory1)} newValueDict = {"input_name": unicode(inputName1), "input_hash": unicode(download_id), "input_id": unicode(download_id), "client_agent": unicode(clientAgent), "status": 0, "last_update": datetime.date.today().toordinal() } myDB.upsert("downloads", newValueDict, controlValueDict) # auto-detect section if inputCategory is None: inputCategory = 'UNCAT' usercat = inputCategory section = core.CFG.findsection(inputCategory).isenabled() if section is None: section = core.CFG.findsection("ALL").isenabled() if section is None: logger.error( 'Category:[%s] is not defined or is not enabled. Please rename it or ensure it is enabled for the appropriate section in your autoProcessMedia.cfg and try again.' % ( inputCategory)) return [-1, ""] else: usercat = "ALL" if len(section) > 1: logger.error( 'Category:[%s] is not unique, %s are using it. Please rename it or disable all other sections using the same category name in your autoProcessMedia.cfg and try again.' % ( inputCategory, section.keys())) return [-1, ""] if section: sectionName = section.keys()[0] logger.info('Auto-detected SECTION:%s' % (sectionName)) else: logger.error("Unable to locate a section with subsection:%s enabled in your autoProcessMedia.cfg, exiting!" % ( inputCategory)) return [-1, ""] try: extract = int(section[usercat]['extract']) except: extract = 0 try: if int(section[usercat]['remote_path']) and not core.REMOTEPATHS: logger.error('Remote Path is enabled for %s:%s but no Network mount points are defined. Please check your autoProcessMedia.cfg, exiting!' % ( sectionName, inputCategory)) return [-1, ""] except: logger.error('Remote Path %s is not valid for %s:%s Please set this to either 0 to disable or 1 to enable!' % ( section[usercat]['remote_path'], sectionName, inputCategory)) inputName, inputDirectory = convert_to_ascii(inputName, inputDirectory) if extract == 1: logger.debug('Checking for archives to extract in directory: %s' % (inputDirectory)) extractFiles(inputDirectory) logger.info("Calling %s:%s to post-process:%s" % (sectionName, inputCategory, inputName)) if sectionName == "CouchPotato": result = autoProcessMovie().process(sectionName, inputDirectory, inputName, status, clientAgent, download_id, inputCategory, failureLink) elif sectionName in ["SickBeard", "NzbDrone"]: result = autoProcessTV().processEpisode(sectionName, inputDirectory, inputName, status, clientAgent, download_id, inputCategory, failureLink) elif sectionName == "HeadPhones": result = autoProcessMusic().process(sectionName, inputDirectory, inputName, status, clientAgent, inputCategory) elif sectionName == "Mylar": result = autoProcessComics().processEpisode(sectionName, inputDirectory, inputName, status, clientAgent, inputCategory) elif sectionName == "Gamez": result = autoProcessGames().process(sectionName, inputDirectory, inputName, status, clientAgent, inputCategory) elif sectionName == 'UserScript': result = external_script(inputDirectory, inputName, inputCategory, section[usercat]) else: result = [-1, ""] plex_update(inputCategory) if result[0] == 0: if clientAgent != 'manual': # update download status in our DB update_downloadInfoStatus(inputName, 1) if not sectionName in ['UserScript', 'NzbDrone']: # cleanup our processing folders of any misc unwanted files and empty directories cleanDir(inputDirectory, sectionName, inputCategory) return result
def getlist(item): logger.info("pelisalacarta.mejortorrent seriesydocs") itemlist = [] data = scrapertools.cachePage(item.url) # pelis # <a href="/peli-descargar-torrent-9578-Presentimientos.html"> # <img src="/uploads/imagenes/peliculas/Presentimientos.jpg" border="1"></a # # series # #<a href="/serie-descargar-torrents-11589-11590-Ahora-o-nunca-4-Temporada.html"> #<img src="/uploads/imagenes/series/Ahora o nunca4.jpg" border="1"></a> # # docs # #<a href="/doc-descargar-torrent-1406-1407-El-sueno-de-todos.html"> #<img border="1" src="/uploads/imagenes/documentales/El sueno de todos.jpg"></a> if item.url.find("peliculas") > -1: patron = '<a href="(/peli-descargar-torrent[^"]+)">[^<]+' patron += '<img src="([^"]+)"[^<]+</a>' patron_enlace = "/peli-descargar-torrent-\d+(.*?)\.html" patron_title = '<a href="/peli-descargar-torrent[^"]+">([^<]+)</a>(\s*<b>([^>]+)</b>)?' action = "show_movie_info" folder = True extra = "" elif item.url.find("series-letra") > -1: patron = "<a href='(/serie-descargar-torrent[^']+)'>()" patron_enlace = "/serie-descargar-torrents-\d+-\d+-(.*?)\.html" patron_title = '<a href="/serie-descargar-torrent[^"]+">([^<]+)</a>(\s*<b>([^>]+)</b>)?' action = "episodios" folder = True extra = "series" elif item.url.find("series") > -1: patron = '<a href="(/serie-descargar-torrent[^"]+)">[^<]+' patron += '<img src="([^"]+)"[^<]+</a>' patron_enlace = "/serie-descargar-torrents-\d+-\d+-(.*?)\.html" patron_title = '<a href="/serie-descargar-torrent[^"]+">([^<]+)</a>(\s*<b>([^>]+)</b>)?' action = "episodios" folder = True extra = "series" else: patron = '<a href="(/doc-descargar-torrent[^"]+)">[^<]+' patron += '<img src="([^"]+)"[^<]+</a>' patron_enlace = "/doc-descargar-torrent-\d+-\d+-(.*?)\.html" patron_title = '<a href="/doc-descargar-torrent[^"]+">([^<]+)</a>(\s*<b>([^>]+)</b>)?' action = "episodios" folder = True extra = "docus" matches = re.compile(patron, re.DOTALL).findall(data) scrapertools.printMatches(matches) for scrapedurl, scrapedthumbnail in matches: title = scrapertools.get_match(scrapedurl, patron_enlace) title = title.replace("-", " ") url = urlparse.urljoin(item.url, scrapedurl) thumbnail = urlparse.urljoin(item.url, urllib.quote(scrapedthumbnail)) plot = "" logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") itemlist.append( Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail, plot=plot, folder=folder, extra=extra)) matches = re.compile(patron_title, re.DOTALL).findall(data) scrapertools.printMatches(matches) # Cambia el título sacado de la URL por un título con más información. # esta implementación asume que va a encontrar las mismas coincidencias # que en el bucle anterior, lo cual técnicamente es erróneo, pero que # funciona mientras no cambien el formato de la página cnt = 0 for scrapedtitle, notused, scrapedinfo in matches: title = re.sub( '\r\n', '', scrapedtitle).decode('iso-8859-1').encode('utf8').strip() if title.endswith('.'): title = title[:-1] info = scrapedinfo.decode('iso-8859-1').encode('utf8') if info != "": title = '{0} {1}'.format(title, info) itemlist[cnt].title = title cnt += 1 if len(itemlist) == 0: itemlist.append( Item(channel=item.channel, action="mainlist", title="No se ha podido cargar el listado")) else: # Extrae el paginador patronvideos = "<a href='([^']+)' class='paginar'> Siguiente >>" matches = re.compile(patronvideos, re.DOTALL).findall(data) scrapertools.printMatches(matches) if len(matches) > 0: scrapedurl = urlparse.urljoin(item.url, matches[0]) itemlist.append( Item(channel=item.channel, action="getlist", title="Pagina siguiente >>", url=scrapedurl, folder=True)) return itemlist
def save_library_episodes(path, episodelist, serie, silent=False, overwrite=True): """ guarda en la ruta indicada todos los capitulos incluidos en la lista episodelist @type path: str @param path: ruta donde guardar los episodios @type episodelist: list @param episodelist: listado de items que representan los episodios que se van a guardar. @type serie: item @param serie: serie de la que se van a guardar los episodios @type silent: bool @param silent: establece si se muestra la notificación @param overwrite: permite sobreescribir los ficheros existentes @type overwrite: bool @rtype insertados: int @return: el número de episodios insertados @rtype sobreescritos: int @return: el número de episodios sobreescritos @rtype fallidos: int @return: el número de episodios fallidos """ logger.info() # No hay lista de episodios, no hay nada que guardar if not len(episodelist): logger.info("No hay lista de episodios, salimos sin crear strm") return 0, 0, 0 insertados = 0 sobreescritos = 0 fallidos = 0 news_in_playcounts = {} # Listamos todos los ficheros de la serie, asi evitamos tener que comprobar si existe uno por uno raiz, carpetas_series, ficheros = filetools.walk(path).next() ficheros = [filetools.join(path, f) for f in ficheros] # Silent es para no mostrar progreso (para library_service) if not silent: # progress dialog p_dialog = platformtools.dialog_progress('pelisalacarta', 'Añadiendo episodios...') p_dialog.update(0, 'Añadiendo episodio...') # fix float porque la division se hace mal en python 2.x t = float(100) / len(episodelist) for i, e in enumerate(episodelist): if not silent: p_dialog.update(int(math.ceil((i + 1) * t)), 'Añadiendo episodio...', e.title) try: season_episode = scrapertools.get_season_and_episode(e.title.lower()) e.infoLabels = serie.infoLabels e.contentSeason, e.contentEpisodeNumber = season_episode.split("x") season_episode = "%sx%s" % (e.contentSeason, str(e.contentEpisodeNumber).zfill(2)) except: continue strm_path = filetools.join(path, "%s.strm" % season_episode) nfo_path = filetools.join(path, "%s.nfo" % season_episode) json_path = filetools.join(path, ("%s [%s].json" % (season_episode, e.channel)).lower()) strm_exists = strm_path in ficheros nfo_exists = nfo_path in ficheros json_exists = json_path in ficheros if not strm_exists: # Si no existe season_episode.strm añadirlo item_strm = Item(action='play_from_library', channel='biblioteca', strm_path=strm_path.replace(TVSHOWS_PATH, ""), infoLabels={}) item_strm.contentSeason = e.contentSeason item_strm.contentEpisodeNumber = e.contentEpisodeNumber item_strm.contentType = e.contentType item_strm.contentTitle = season_episode # FILTERTOOLS if item_strm.list_idiomas: # si tvshow.nfo tiene filtro se le pasa al item_strm que se va a generar if "library_filter_show" in serie: item_strm.library_filter_show = serie.library_filter_show if item_strm.library_filter_show == "": logger.error("Se ha producido un error al obtener el nombre de la serie a filtrar") # logger.debug("item_strm" + item_strm.tostring('\n')) # logger.debug("serie " + serie.tostring('\n')) strm_exists = filetools.write(strm_path, '%s?%s' % (addon_name, item_strm.tourl())) item_nfo = None if not nfo_exists and e.infoLabels.get("imdb_id"): # Si no existe season_episode.nfo añadirlo if e.infoLabels["tmdb_id"]: scraper.find_and_set_infoLabels(e) head_nfo = "https://www.themoviedb.org/tv/%s/season/%s/episode/%s\n" % (e.infoLabels['tmdb_id'], e.contentSeason, e.contentEpisodeNumber) elif e.infoLabels["tvdb_id"]: head_nfo = e.url_scraper else: head_nfo = "Aqui ira el xml" # TODO item_nfo = e.clone(channel="biblioteca", url="", action='findvideos', strm_path=strm_path.replace(TVSHOWS_PATH, "")) nfo_exists = filetools.write(nfo_path, head_nfo + item_nfo.tojson()) # Solo si existen season_episode.nfo y season_episode.strm continuamos if nfo_exists and strm_exists: if not json_exists or overwrite: # Obtenemos infoLabel del episodio if not item_nfo: head_nfo, item_nfo = read_nfo(nfo_path) e.infoLabels = item_nfo.infoLabels if filetools.write(json_path, e.tojson()): if not json_exists: logger.info("Insertado: %s" % json_path) insertados += 1 # Marcamos episodio como no visto news_in_playcounts[season_episode] = 0 # Marcamos la temporada como no vista news_in_playcounts["season %s" % e.contentSeason] = 0 # Marcamos la serie como no vista # logger.debug("serie " + serie.tostring('\n')) news_in_playcounts[serie.contentTitle] = 0 else: logger.info("Sobreescrito: %s" % json_path) sobreescritos += 1 else: logger.info("Fallido: %s" % json_path) fallidos += 1 else: logger.info("Fallido: %s" % json_path) fallidos += 1 if not silent and p_dialog.iscanceled(): break if not silent: p_dialog.close() if news_in_playcounts: # Si hay nuevos episodios los marcamos como no vistos en tvshow.nfo ... tvshow_path = filetools.join(path, "tvshow.nfo") try: import datetime head_nfo, tvshow_item = read_nfo(tvshow_path) tvshow_item.library_playcounts.update(news_in_playcounts) if tvshow_item.active == 30: tvshow_item.active = 1 update_last = datetime.date.today() tvshow_item.update_last = update_last.strftime('%Y-%m-%d') update_next = datetime.date.today() + datetime.timedelta(days=int(tvshow_item.active)) tvshow_item.update_next = update_next.strftime('%Y-%m-%d') filetools.write(tvshow_path, head_nfo + tvshow_item.tojson()) except: logger.error("Error al actualizar tvshow.nfo") fallidos = -1 # ... y actualizamos la biblioteca de Kodi if config.is_xbmc() and not silent: from platformcode import xbmc_library xbmc_library.update(FOLDER_TVSHOWS, filetools.basename(path)) if fallidos == len(episodelist): fallidos = -1 logger.debug("%s [%s]: insertados= %s, sobreescritos= %s, fallidos= %s" % (serie.contentSerieName, serie.channel, insertados, sobreescritos, fallidos)) return insertados, sobreescritos, fallidos
def main(args, section=None): # Initialize the config core.initialize(section) # clientAgent for NZBs clientAgent = core.NZB_CLIENTAGENT logger.info("#########################################################") logger.info("## ..::[%s]::.. ##" % os.path.basename(__file__)) logger.info("#########################################################") # debug command line options logger.debug("Options passed into nzbToMedia: %s" % args) # Post-Processing Result result = [0, ""] status = 0 # NZBGet if os.environ.has_key('NZBOP_SCRIPTDIR'): # Check if the script is called from nzbget 11.0 or later if os.environ['NZBOP_VERSION'][0:5] < '11.0': logger.error("NZBGet Version %s is not supported. Please update NZBGet." %(str(os.environ['NZBOP_VERSION']))) sys.exit(core.NZBGET_POSTPROCESS_ERROR) logger.info("Script triggered from NZBGet Version %s." %(str(os.environ['NZBOP_VERSION']))) # Check if the script is called from nzbget 13.0 or later if os.environ.has_key('NZBPP_TOTALSTATUS'): if not os.environ['NZBPP_TOTALSTATUS'] == 'SUCCESS': logger.info("Download failed with status %s." %(os.environ['NZBPP_STATUS'])) status = 1 else: # Check par status if os.environ['NZBPP_PARSTATUS'] == '1' or os.environ['NZBPP_PARSTATUS'] == '4': logger.warning("Par-repair failed, setting status \"failed\"") status = 1 # Check unpack status if os.environ['NZBPP_UNPACKSTATUS'] == '1': logger.warning("Unpack failed, setting status \"failed\"") status = 1 if os.environ['NZBPP_UNPACKSTATUS'] == '0' and os.environ['NZBPP_PARSTATUS'] == '0': # Unpack was skipped due to nzb-file properties or due to errors during par-check if os.environ['NZBPP_HEALTH'] < 1000: logger.warning( "Download health is compromised and Par-check/repair disabled or no .par2 files found. Setting status \"failed\"") logger.info("Please check your Par-check/repair settings for future downloads.") status = 1 else: logger.info( "Par-check/repair disabled or no .par2 files found, and Unpack not required. Health is ok so handle as though download successful") logger.info("Please check your Par-check/repair settings for future downloads.") # Check for download_id to pass to CouchPotato download_id = "" failureLink = None if os.environ.has_key('NZBPR_COUCHPOTATO'): download_id = os.environ['NZBPR_COUCHPOTATO'] elif os.environ.has_key('NZBPR_DRONE'): download_id = os.environ['NZBPR_DRONE'] elif os.environ.has_key('NZBPR_SONARR'): download_id = os.environ['NZBPR_SONARR'] if os.environ.has_key('NZBPR__DNZB_FAILURE'): failureLink = os.environ['NZBPR__DNZB_FAILURE'] # All checks done, now launching the script. clientAgent = 'nzbget' result = process(os.environ['NZBPP_DIRECTORY'], inputName=os.environ['NZBPP_NZBNAME'], status=status, clientAgent=clientAgent, download_id=download_id, inputCategory=os.environ['NZBPP_CATEGORY'], failureLink=failureLink) # SABnzbd Pre 0.7.17 elif len(args) == core.SABNZB_NO_OF_ARGUMENTS: # SABnzbd argv: # 1 The final directory of the job (full path) # 2 The original name of the NZB file # 3 Clean version of the job name (no path info and ".nzb" removed) # 4 Indexer's report number (if supported) # 5 User-defined category # 6 Group that the NZB was posted in e.g. alt.binaries.x # 7 Status of post processing. 0 = OK, 1=failed verification, 2=failed unpack, 3=1+2 clientAgent = 'sabnzbd' logger.info("Script triggered from SABnzbd") result = process(args[1], inputName=args[2], status=args[7], inputCategory=args[5], clientAgent=clientAgent, download_id='') # SABnzbd 0.7.17+ elif len(args) >= core.SABNZB_0717_NO_OF_ARGUMENTS: # SABnzbd argv: # 1 The final directory of the job (full path) # 2 The original name of the NZB file # 3 Clean version of the job name (no path info and ".nzb" removed) # 4 Indexer's report number (if supported) # 5 User-defined category # 6 Group that the NZB was posted in e.g. alt.binaries.x # 7 Status of post processing. 0 = OK, 1=failed verification, 2=failed unpack, 3=1+2 # 8 Failure URL clientAgent = 'sabnzbd' logger.info("Script triggered from SABnzbd 0.7.17+") result = process(args[1], inputName=args[2], status=args[7], inputCategory=args[5], clientAgent=clientAgent, download_id='', failureLink=''.join(args[8:])) else: # Perform Manual Post-Processing logger.warning("Invalid number of arguments received from client, Switching to manual run mode ...") for section, subsections in core.SECTIONS.items(): for subsection in subsections: if not core.CFG[section][subsection].isenabled(): continue for dirName in getDirs(section, subsection, link = 'move'): logger.info("Starting manual run for %s:%s - Folder:%s" % (section, subsection, dirName)) logger.info("Checking database for download info for %s ..." % (os.path.basename(dirName))) core.DOWNLOADINFO = get_downloadInfo(os.path.basename(dirName), 0) if core.DOWNLOADINFO: logger.info( "Found download info for %s, setting variables now ..." % (os.path.basename(dirName))) else: logger.info( 'Unable to locate download info for %s, continuing to try and process this release ...' % ( os.path.basename(dirName)) ) try: clientAgent = str(core.DOWNLOADINFO[0]['client_agent']) except: clientAgent = 'manual' try: download_id = str(core.DOWNLOADINFO[0]['input_id']) except: download_id = None if clientAgent.lower() not in core.NZB_CLIENTS and clientAgent != 'manual': continue try: dirName = dirName.encode(core.SYS_ENCODING) except: pass inputName = os.path.basename(dirName) try: inputName = inputName.encode(core.SYS_ENCODING) except: pass results = process(dirName, inputName, 0, clientAgent=clientAgent, download_id=download_id, inputCategory=subsection) if results[0] != 0: logger.error("A problem was reported when trying to perform a manual run for %s:%s." % ( section, subsection)) result = results if result[0] == 0: logger.info("The %s script completed successfully." % args[0]) if result[1]: print result[1] + "!" # For SABnzbd Status display. if os.environ.has_key('NZBOP_SCRIPTDIR'): # return code for nzbget v11 del core.MYAPP return (core.NZBGET_POSTPROCESS_SUCCESS) else: logger.error("A problem was reported in the %s script." % args[0]) if result[1]: print result[1] + "!" # For SABnzbd Status display. if os.environ.has_key('NZBOP_SCRIPTDIR'): # return code for nzbget v11 del core.MYAPP return (core.NZBGET_POSTPROCESS_ERROR) del core.MYAPP return (result[0])
def novedades(item): logger.info() itemlist = [] # Descarga la página data = scrapertools.cachePage(item.url) ''' <div class="post-45732 post type-post status-publish format-standard hentry category-2012 category-blu-ray category-mkv-hd720p" id="post-45732"> <h2 class="title"><a href="http://unsoloclic.info/2012/11/ek-tha-tiger-2012-blu-ray-720p-hd/" rel="bookmark" title="Permanent Link to Pelicula Ek Tha Tiger (2012) BLU-RAY 720p HD">Pelicula Ek Tha Tiger (2012) BLU-RAY 720p HD</a></h2> <div class="postdate"><img src="http://unsoloclic.info/wp-content/themes/TinyWeb/images/date.png" /> noviembre 5th, 2012 <!-- <img src="http://unsoloclic.info/wp-content/themes/TinyWeb/images/user.png" /> unsoloclic --> </div> <div class="entry"> <p><a href="http://unsoloclic.info/2012/11/ek-tha-tiger-2012-blu-ray-720p-hd/" rel="attachment wp-att-45737"><img src="http://unsoloclic.info/wp-content/uploads/2012/11/Ek-Tha-Tiger-2012.jpg" alt="" title="Ek Tha Tiger (2012)" width="500" height="629" class="aligncenter size-full wp-image-45737" /></a></p> <h2 style="text-align: center;"></h2> <div class="readmorecontent"> <a class="readmore" href="http://unsoloclic.info/2012/11/ek-tha-tiger-2012-blu-ray-720p-hd/" rel="bookmark" title="Permanent Link to Pelicula Ek Tha Tiger (2012) BLU-RAY 720p HD">Seguir Leyendo</a> </div> </div> </div><!--/post-45732--> ''' ''' <div class="post-45923 post type-post status-publish format-standard hentry category-2012 category-blu-ray category-comedia category-drama category-mkv category-mkv-hd720p category-romance tag-chris-messina tag-jenna-fischer tag-lee-kirk tag-the-giant-mechanical-man-pelicula tag-topher-grace" id="post-45923"> <h2 class="title"><a href="http://unsoloclic.info/2012/12/the-giant-mechanical-man-2012-bluray-720p-hd/" rel="bookmark" title="Permanent Link to The Giant Mechanical Man (2012) BluRay 720p HD">The Giant Mechanical Man (2012) BluRay 720p HD</a></h2> <div class="postdate"><img src="http://unsoloclic.info/wp-content/themes/TinyWeb/images/date.png" /> diciembre 24th, 2012 <!-- <img src="http://unsoloclic.info/wp-content/themes/TinyWeb/images/user.png" /> deportv --> </div> <div class="entry"> <p style="text-align: center;"><a href="http://unsoloclic.info/2012/12/the-giant-mechanical-man-2012-bluray-720p-hd/"><img class="aligncenter size-full wp-image-45924" title="Giant Michanical Man Pelicula Descargar" src="http://unsoloclic.info/wp-content/uploads/2012/12/Giant-Michanical-Man-Pelicula-Descargar.jpg" alt="" width="380" height="500" /></a></p> <p style="text-align: center;"> <div class="readmorecontent"> <a class="readmore" href="http://unsoloclic.info/2012/12/the-giant-mechanical-man-2012-bluray-720p-hd/" rel="bookmark" title="Permanent Link to The Giant Mechanical Man (2012) BluRay 720p HD">Seguir Leyendo</a> </div> </div> </div><!--/post-45923--> ''' patron = '<div class="post[^"]+" id="post-\d+">[^<]+' patron += '<h2 class="title"><a href="([^"]+)" rel="bookmark" title="[^"]+">([^<]+)</a></h2>[^<]+' patron += '<div class="postdate">.*?</div>[^<]+' patron += '<div class="entry">[^<]+' patron += '<p[^<]+<a[^<]+<img.*?src="([^"]+)"' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle, scrapedthumbnail in matches: scrapedplot = "" logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True)) ''' <a href="http://unsoloclic.info/page/2/" >« Peliculas anteriores</a> ''' patron = '<a href="([^"]+)" >\«\; Peliculas anteriores</a>' matches = re.compile(patron, re.DOTALL).findall(data) for match in matches: scrapedtitle = ">> Página siguiente" scrapedplot = "" scrapedurl = urlparse.urljoin(item.url, match) scrapedthumbnail = "" logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") itemlist.append( Item(channel=item.channel, action="novedades", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True)) return itemlist
def novedades(item): logger.info() itemlist = [] # Descarga la pagina data = scrapertools.cachePage(item.url, headers=headers) # Saca el plot si lo tuviese scrapedplot = scrapertools.find_single_match( data, '<div class="pm-section-head">(.*?)</div>') if "<div" in scrapedplot: scrapedplot = "" else: scrapedplot = scrapertools.htmlclean(scrapedplot) bloque = scrapertools.find_multiple_matches( data, '<li class="col-xs-[\d] col-sm-[\d] col-md-[\d]">(.*?)</li>') if "Registrarse" in data or not account: for match in bloque: patron = '<span class="pm-label-duration">(.*?)</span>.*?<a href="([^"]+)"' \ '.*?title="([^"]+)".*?data-echo="([^"]+)"' matches = scrapertools.find_multiple_matches(match, patron) for duracion, scrapedurl, scrapedtitle, scrapedthumbnail in matches: contentTitle = scrapedtitle[:] scrapedtitle += " [" + duracion + "]" if not scrapedthumbnail.startswith("data:image"): scrapedthumbnail += "|" + headers[0][0] + "=" + headers[0][ 1] else: scrapedthumbnail = item.thumbnail logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") itemlist.append( item.clone(action="play_", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot, fulltitle=scrapedtitle, contentTitle=contentTitle, folder=False)) else: for match in bloque: patron = '<span class="pm-label-duration">(.*?)</span>.*?onclick="watch_later_add\(([\d]+)\)' \ '.*?<a href="([^"]+)".*?title="([^"]+)".*?data-echo="([^"]+)"' matches = scrapertools.find_multiple_matches(match, patron) for duracion, video_id, scrapedurl, scrapedtitle, scrapedthumbnail in matches: contentTitle = scrapedtitle[:] scrapedtitle += " [" + duracion + "]" if not scrapedthumbnail.startswith("data:image"): scrapedthumbnail += "|" + headers[0][0] + "=" + headers[0][ 1] else: scrapedthumbnail = item.thumbnail logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") itemlist.append( item.clone(action="findvideos", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot, id=video_id, fulltitle=scrapedtitle, contentTitle=contentTitle)) # Busca enlaces de paginas siguientes... try: next_page_url = scrapertools.get_match( data, '<a href="([^"]+)">»</a>') next_page_url = urlparse.urljoin(host, next_page_url) itemlist.append( item.clone(action="novedades", title=">> Página siguiente", url=next_page_url)) except: logger.error("Siguiente pagina no encontrada") return itemlist
def addnzbget(): # load configs into memory cfg_new = config() try: if 'NZBPO_NDCATEGORY' in os.environ and 'NZBPO_SBCATEGORY' in os.environ: if os.environ['NZBPO_NDCATEGORY'] == os.environ[ 'NZBPO_SBCATEGORY']: logger.warning( '{x} category is set for SickBeard and Sonarr. ' 'Please check your config in NZBGet'.format( x=os.environ['NZBPO_NDCATEGORY'])) if 'NZBPO_RACATEGORY' in os.environ and 'NZBPO_CPSCATEGORY' in os.environ: if os.environ['NZBPO_RACATEGORY'] == os.environ[ 'NZBPO_CPSCATEGORY']: logger.warning( '{x} category is set for CouchPotato and Radarr. ' 'Please check your config in NZBGet'.format( x=os.environ['NZBPO_RACATEGORY'])) if 'NZBPO_RACATEGORY' in os.environ and 'NZBPO_W3CATEGORY' in os.environ: if os.environ['NZBPO_RACATEGORY'] == os.environ[ 'NZBPO_W3CATEGORY']: logger.warning( '{x} category is set for Watcher3 and Radarr. ' 'Please check your config in NZBGet'.format( x=os.environ['NZBPO_RACATEGORY'])) if 'NZBPO_W3CATEGORY' in os.environ and 'NZBPO_CPSCATEGORY' in os.environ: if os.environ['NZBPO_W3CATEGORY'] == os.environ[ 'NZBPO_CPSCATEGORY']: logger.warning( '{x} category is set for CouchPotato and Watcher3. ' 'Please check your config in NZBGet'.format( x=os.environ['NZBPO_W3CATEGORY'])) if 'NZBPO_LICATEGORY' in os.environ and 'NZBPO_HPCATEGORY' in os.environ: if os.environ['NZBPO_LICATEGORY'] == os.environ[ 'NZBPO_HPCATEGORY']: logger.warning( '{x} category is set for HeadPhones and Lidarr. ' 'Please check your config in NZBGet'.format( x=os.environ['NZBPO_LICATEGORY'])) section = 'Nzb' key = 'NZBOP_DESTDIR' if key in os.environ: option = 'default_downloadDirectory' value = os.environ[key] cfg_new[section][option] = value section = 'General' env_keys = [ 'AUTO_UPDATE', 'CHECK_MEDIA', 'SAFE_MODE', 'NO_EXTRACT_FAILED' ] cfg_keys = [ 'auto_update', 'check_media', 'safe_mode', 'no_extract_failed' ] for index in range(len(env_keys)): key = 'NZBPO_{index}'.format(index=env_keys[index]) if key in os.environ: option = cfg_keys[index] value = os.environ[key] cfg_new[section][option] = value section = 'Network' env_keys = ['MOUNTPOINTS'] cfg_keys = ['mount_points'] for index in range(len(env_keys)): key = 'NZBPO_{index}'.format(index=env_keys[index]) if key in os.environ: option = cfg_keys[index] value = os.environ[key] cfg_new[section][option] = value section = 'CouchPotato' env_cat_key = 'NZBPO_CPSCATEGORY' env_keys = [ 'ENABLED', 'APIKEY', 'HOST', 'PORT', 'SSL', 'WEB_ROOT', 'METHOD', 'DELETE_FAILED', 'REMOTE_PATH', 'WAIT_FOR', 'WATCH_DIR', 'OMDBAPIKEY' ] cfg_keys = [ 'enabled', 'apikey', 'host', 'port', 'ssl', 'web_root', 'method', 'delete_failed', 'remote_path', 'wait_for', 'watch_dir', 'omdbapikey' ] if env_cat_key in os.environ: for index in range(len(env_keys)): key = 'NZBPO_CPS{index}'.format(index=env_keys[index]) if key in os.environ: option = cfg_keys[index] value = os.environ[key] if os.environ[env_cat_key] not in cfg_new[ section].sections: cfg_new[section][os.environ[env_cat_key]] = {} cfg_new[section][ os.environ[env_cat_key]][option] = value cfg_new[section][os.environ[env_cat_key]]['enabled'] = 1 if os.environ[env_cat_key] in cfg_new['Radarr'].sections: cfg_new['Radarr'][env_cat_key]['enabled'] = 0 if os.environ[env_cat_key] in cfg_new['Watcher3'].sections: cfg_new['Watcher3'][env_cat_key]['enabled'] = 0 section = 'Watcher3' env_cat_key = 'NZBPO_W3CATEGORY' env_keys = [ 'ENABLED', 'APIKEY', 'HOST', 'PORT', 'SSL', 'WEB_ROOT', 'METHOD', 'DELETE_FAILED', 'REMOTE_PATH', 'WAIT_FOR', 'WATCH_DIR', 'OMDBAPIKEY' ] cfg_keys = [ 'enabled', 'apikey', 'host', 'port', 'ssl', 'web_root', 'method', 'delete_failed', 'remote_path', 'wait_for', 'watch_dir', 'omdbapikey' ] if env_cat_key in os.environ: for index in range(len(env_keys)): key = 'NZBPO_W3{index}'.format(index=env_keys[index]) if key in os.environ: option = cfg_keys[index] value = os.environ[key] if os.environ[env_cat_key] not in cfg_new[ section].sections: cfg_new[section][os.environ[env_cat_key]] = {} cfg_new[section][ os.environ[env_cat_key]][option] = value cfg_new[section][os.environ[env_cat_key]]['enabled'] = 1 if os.environ[env_cat_key] in cfg_new['Radarr'].sections: cfg_new['Radarr'][env_cat_key]['enabled'] = 0 if os.environ[env_cat_key] in cfg_new['CouchPotato'].sections: cfg_new['CouchPotato'][env_cat_key]['enabled'] = 0 section = 'SickBeard' env_cat_key = 'NZBPO_SBCATEGORY' env_keys = [ 'ENABLED', 'HOST', 'PORT', 'APIKEY', 'USERNAME', 'PASSWORD', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'FORK', 'DELETE_FAILED', 'TORRENT_NOLINK', 'NZBEXTRACTIONBY', 'REMOTE_PATH', 'PROCESS_METHOD' ] cfg_keys = [ 'enabled', 'host', 'port', 'apikey', 'username', 'password', 'ssl', 'web_root', 'watch_dir', 'fork', 'delete_failed', 'Torrent_NoLink', 'nzbExtractionBy', 'remote_path', 'process_method' ] if env_cat_key in os.environ: for index in range(len(env_keys)): key = 'NZBPO_SB{index}'.format(index=env_keys[index]) if key in os.environ: option = cfg_keys[index] value = os.environ[key] if os.environ[env_cat_key] not in cfg_new[ section].sections: cfg_new[section][os.environ[env_cat_key]] = {} cfg_new[section][ os.environ[env_cat_key]][option] = value cfg_new[section][os.environ[env_cat_key]]['enabled'] = 1 if os.environ[env_cat_key] in cfg_new['NzbDrone'].sections: cfg_new['NzbDrone'][env_cat_key]['enabled'] = 0 section = 'HeadPhones' env_cat_key = 'NZBPO_HPCATEGORY' env_keys = [ 'ENABLED', 'APIKEY', 'HOST', 'PORT', 'SSL', 'WEB_ROOT', 'WAIT_FOR', 'WATCH_DIR', 'REMOTE_PATH', 'DELETE_FAILED' ] cfg_keys = [ 'enabled', 'apikey', 'host', 'port', 'ssl', 'web_root', 'wait_for', 'watch_dir', 'remote_path', 'delete_failed' ] if env_cat_key in os.environ: for index in range(len(env_keys)): key = 'NZBPO_HP{index}'.format(index=env_keys[index]) if key in os.environ: option = cfg_keys[index] value = os.environ[key] if os.environ[env_cat_key] not in cfg_new[ section].sections: cfg_new[section][os.environ[env_cat_key]] = {} cfg_new[section][ os.environ[env_cat_key]][option] = value cfg_new[section][os.environ[env_cat_key]]['enabled'] = 1 if os.environ[env_cat_key] in cfg_new['Lidarr'].sections: cfg_new['Lidarr'][env_cat_key]['enabled'] = 0 section = 'Mylar' env_cat_key = 'NZBPO_MYCATEGORY' env_keys = [ 'ENABLED', 'HOST', 'PORT', 'USERNAME', 'PASSWORD', 'APIKEY', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'REMOTE_PATH' ] cfg_keys = [ 'enabled', 'host', 'port', 'username', 'password', 'apikey', 'ssl', 'web_root', 'watch_dir', 'remote_path' ] if env_cat_key in os.environ: for index in range(len(env_keys)): key = 'NZBPO_MY{index}'.format(index=env_keys[index]) if key in os.environ: option = cfg_keys[index] value = os.environ[key] if os.environ[env_cat_key] not in cfg_new[ section].sections: cfg_new[section][os.environ[env_cat_key]] = {} cfg_new[section][ os.environ[env_cat_key]][option] = value cfg_new[section][os.environ[env_cat_key]]['enabled'] = 1 section = 'Gamez' env_cat_key = 'NZBPO_GZCATEGORY' env_keys = [ 'ENABLED', 'APIKEY', 'HOST', 'PORT', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'LIBRARY', 'REMOTE_PATH' ] cfg_keys = [ 'enabled', 'apikey', 'host', 'port', 'ssl', 'web_root', 'watch_dir', 'library', 'remote_path' ] if env_cat_key in os.environ: for index in range(len(env_keys)): key = 'NZBPO_GZ{index}'.format(index=env_keys[index]) if key in os.environ: option = cfg_keys[index] value = os.environ[key] if os.environ[env_cat_key] not in cfg_new[ section].sections: cfg_new[section][os.environ[env_cat_key]] = {} cfg_new[section][ os.environ[env_cat_key]][option] = value cfg_new[section][os.environ[env_cat_key]]['enabled'] = 1 section = 'LazyLibrarian' env_cat_key = 'NZBPO_LLCATEGORY' env_keys = [ 'ENABLED', 'APIKEY', 'HOST', 'PORT', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'REMOTE_PATH' ] cfg_keys = [ 'enabled', 'apikey', 'host', 'port', 'ssl', 'web_root', 'watch_dir', 'remote_path' ] if env_cat_key in os.environ: for index in range(len(env_keys)): key = 'NZBPO_LL{index}'.format(index=env_keys[index]) if key in os.environ: option = cfg_keys[index] value = os.environ[key] if os.environ[env_cat_key] not in cfg_new[ section].sections: cfg_new[section][os.environ[env_cat_key]] = {} cfg_new[section][ os.environ[env_cat_key]][option] = value cfg_new[section][os.environ[env_cat_key]]['enabled'] = 1 section = 'NzbDrone' env_cat_key = 'NZBPO_NDCATEGORY' env_keys = [ 'ENABLED', 'HOST', 'APIKEY', 'PORT', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'FORK', 'DELETE_FAILED', 'TORRENT_NOLINK', 'NZBEXTRACTIONBY', 'WAIT_FOR', 'DELETE_FAILED', 'REMOTE_PATH', 'IMPORTMODE' ] # new cfgKey added for importMode cfg_keys = [ 'enabled', 'host', 'apikey', 'port', 'ssl', 'web_root', 'watch_dir', 'fork', 'delete_failed', 'Torrent_NoLink', 'nzbExtractionBy', 'wait_for', 'delete_failed', 'remote_path', 'importMode' ] if env_cat_key in os.environ: for index in range(len(env_keys)): key = 'NZBPO_ND{index}'.format(index=env_keys[index]) if key in os.environ: option = cfg_keys[index] value = os.environ[key] if os.environ[env_cat_key] not in cfg_new[ section].sections: cfg_new[section][os.environ[env_cat_key]] = {} cfg_new[section][ os.environ[env_cat_key]][option] = value cfg_new[section][os.environ[env_cat_key]]['enabled'] = 1 if os.environ[env_cat_key] in cfg_new['SickBeard'].sections: cfg_new['SickBeard'][env_cat_key]['enabled'] = 0 section = 'Radarr' env_cat_key = 'NZBPO_RACATEGORY' env_keys = [ 'ENABLED', 'HOST', 'APIKEY', 'PORT', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'FORK', 'DELETE_FAILED', 'TORRENT_NOLINK', 'NZBEXTRACTIONBY', 'WAIT_FOR', 'DELETE_FAILED', 'REMOTE_PATH', 'OMDBAPIKEY', 'IMPORTMODE' ] # new cfgKey added for importMode cfg_keys = [ 'enabled', 'host', 'apikey', 'port', 'ssl', 'web_root', 'watch_dir', 'fork', 'delete_failed', 'Torrent_NoLink', 'nzbExtractionBy', 'wait_for', 'delete_failed', 'remote_path', 'omdbapikey', 'importMode' ] if env_cat_key in os.environ: for index in range(len(env_keys)): key = 'NZBPO_RA{index}'.format(index=env_keys[index]) if key in os.environ: option = cfg_keys[index] value = os.environ[key] if os.environ[env_cat_key] not in cfg_new[ section].sections: cfg_new[section][os.environ[env_cat_key]] = {} cfg_new[section][ os.environ[env_cat_key]][option] = value cfg_new[section][os.environ[env_cat_key]]['enabled'] = 1 if os.environ[env_cat_key] in cfg_new['CouchPotato'].sections: cfg_new['CouchPotato'][env_cat_key]['enabled'] = 0 if os.environ[env_cat_key] in cfg_new['Wacther3'].sections: cfg_new['Watcher3'][env_cat_key]['enabled'] = 0 section = 'Lidarr' env_cat_key = 'NZBPO_LICATEGORY' env_keys = [ 'ENABLED', 'HOST', 'APIKEY', 'PORT', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'FORK', 'DELETE_FAILED', 'TORRENT_NOLINK', 'NZBEXTRACTIONBY', 'WAIT_FOR', 'DELETE_FAILED', 'REMOTE_PATH' ] cfg_keys = [ 'enabled', 'host', 'apikey', 'port', 'ssl', 'web_root', 'watch_dir', 'fork', 'delete_failed', 'Torrent_NoLink', 'nzbExtractionBy', 'wait_for', 'delete_failed', 'remote_path' ] if env_cat_key in os.environ: for index in range(len(env_keys)): key = 'NZBPO_LI{index}'.format(index=env_keys[index]) if key in os.environ: option = cfg_keys[index] value = os.environ[key] if os.environ[env_cat_key] not in cfg_new[ section].sections: cfg_new[section][os.environ[env_cat_key]] = {} cfg_new[section][ os.environ[env_cat_key]][option] = value cfg_new[section][os.environ[env_cat_key]]['enabled'] = 1 if os.environ[env_cat_key] in cfg_new['HeadPhones'].sections: cfg_new['HeadPhones'][env_cat_key]['enabled'] = 0 section = 'Extensions' env_keys = [ 'COMPRESSEDEXTENSIONS', 'MEDIAEXTENSIONS', 'METAEXTENSIONS' ] cfg_keys = [ 'compressedExtensions', 'mediaExtensions', 'metaExtensions' ] for index in range(len(env_keys)): key = 'NZBPO_{index}'.format(index=env_keys[index]) if key in os.environ: option = cfg_keys[index] value = os.environ[key] cfg_new[section][option] = value section = 'Posix' env_keys = ['NICENESS', 'IONICE_CLASS', 'IONICE_CLASSDATA'] cfg_keys = ['niceness', 'ionice_class', 'ionice_classdata'] for index in range(len(env_keys)): key = 'NZBPO_{index}'.format(index=env_keys[index]) if key in os.environ: option = cfg_keys[index] value = os.environ[key] cfg_new[section][option] = value section = 'Transcoder' env_keys = [ 'TRANSCODE', 'DUPLICATE', 'IGNOREEXTENSIONS', 'OUTPUTFASTSTART', 'OUTPUTVIDEOPATH', 'PROCESSOUTPUT', 'AUDIOLANGUAGE', 'ALLAUDIOLANGUAGES', 'SUBLANGUAGES', 'ALLSUBLANGUAGES', 'EMBEDSUBS', 'BURNINSUBTITLE', 'EXTRACTSUBS', 'EXTERNALSUBDIR', 'OUTPUTDEFAULT', 'OUTPUTVIDEOEXTENSION', 'OUTPUTVIDEOCODEC', 'VIDEOCODECALLOW', 'OUTPUTVIDEOPRESET', 'OUTPUTVIDEOFRAMERATE', 'OUTPUTVIDEOBITRATE', 'OUTPUTAUDIOCODEC', 'AUDIOCODECALLOW', 'OUTPUTAUDIOBITRATE', 'OUTPUTQUALITYPERCENT', 'GETSUBS', 'OUTPUTAUDIOTRACK2CODEC', 'AUDIOCODEC2ALLOW', 'OUTPUTAUDIOTRACK2BITRATE', 'OUTPUTAUDIOOTHERCODEC', 'AUDIOOTHERCODECALLOW', 'OUTPUTAUDIOOTHERBITRATE', 'OUTPUTSUBTITLECODEC', 'OUTPUTAUDIOCHANNELS', 'OUTPUTAUDIOTRACK2CHANNELS', 'OUTPUTAUDIOOTHERCHANNELS', 'OUTPUTVIDEORESOLUTION' ] cfg_keys = [ 'transcode', 'duplicate', 'ignoreExtensions', 'outputFastStart', 'outputVideoPath', 'processOutput', 'audioLanguage', 'allAudioLanguages', 'subLanguages', 'allSubLanguages', 'embedSubs', 'burnInSubtitle', 'extractSubs', 'externalSubDir', 'outputDefault', 'outputVideoExtension', 'outputVideoCodec', 'VideoCodecAllow', 'outputVideoPreset', 'outputVideoFramerate', 'outputVideoBitrate', 'outputAudioCodec', 'AudioCodecAllow', 'outputAudioBitrate', 'outputQualityPercent', 'getSubs', 'outputAudioTrack2Codec', 'AudioCodec2Allow', 'outputAudioTrack2Bitrate', 'outputAudioOtherCodec', 'AudioOtherCodecAllow', 'outputAudioOtherBitrate', 'outputSubtitleCodec', 'outputAudioChannels', 'outputAudioTrack2Channels', 'outputAudioOtherChannels', 'outputVideoResolution' ] for index in range(len(env_keys)): key = 'NZBPO_{index}'.format(index=env_keys[index]) if key in os.environ: option = cfg_keys[index] value = os.environ[key] cfg_new[section][option] = value section = 'WakeOnLan' env_keys = ['WAKE', 'HOST', 'PORT', 'MAC'] cfg_keys = ['wake', 'host', 'port', 'mac'] for index in range(len(env_keys)): key = 'NZBPO_WOL{index}'.format(index=env_keys[index]) if key in os.environ: option = cfg_keys[index] value = os.environ[key] cfg_new[section][option] = value section = 'UserScript' env_cat_key = 'NZBPO_USCATEGORY' env_keys = [ 'USER_SCRIPT_MEDIAEXTENSIONS', 'USER_SCRIPT_PATH', 'USER_SCRIPT_PARAM', 'USER_SCRIPT_RUNONCE', 'USER_SCRIPT_SUCCESSCODES', 'USER_SCRIPT_CLEAN', 'USDELAY', 'USREMOTE_PATH' ] cfg_keys = [ 'user_script_mediaExtensions', 'user_script_path', 'user_script_param', 'user_script_runOnce', 'user_script_successCodes', 'user_script_clean', 'delay', 'remote_path' ] if env_cat_key in os.environ: for index in range(len(env_keys)): key = 'NZBPO_{index}'.format(index=env_keys[index]) if key in os.environ: option = cfg_keys[index] value = os.environ[key] if os.environ[env_cat_key] not in cfg_new[ section].sections: cfg_new[section][os.environ[env_cat_key]] = {} cfg_new[section][ os.environ[env_cat_key]][option] = value cfg_new[section][os.environ[env_cat_key]]['enabled'] = 1 except Exception as error: logger.debug( 'Error {msg} when applying NZBGet config'.format(msg=error)) try: # write our new config to autoProcessMedia.cfg cfg_new.filename = core.CONFIG_FILE cfg_new.write() except Exception as error: logger.debug( 'Error {msg} when writing changes to .cfg'.format(msg=error)) return cfg_new
def save_library_movie(item): """ guarda en la libreria de peliculas el elemento item, con los valores que contiene. @type item: item @param item: elemento que se va a guardar. @rtype insertados: int @return: el número de elementos insertados @rtype sobreescritos: int @return: el número de elementos sobreescritos @rtype fallidos: int @return: el número de elementos fallidos o -1 si ha fallado todo """ logger.info() # logger.debug(item.tostring('\n')) insertados = 0 sobreescritos = 0 fallidos = 0 path = "" # Itentamos obtener el titulo correcto: # 1. contentTitle: Este deberia ser el sitio correcto, ya que title suele contener "Añadir a la biblioteca..." # 2. fulltitle # 3. title if not item.contentTitle: # Colocamos el titulo correcto en su sitio para que scraper lo localize if item.fulltitle: item.contentTitle = item.fulltitle else: item.contentTitle = item.title # Si llegados a este punto no tenemos titulo, salimos if not item.contentTitle or not item.channel: logger.debug("NO ENCONTRADO contentTitle") return 0, 0, -1 # Salimos sin guardar # TODO configurar para segun el scraper se llamara a uno u otro scraper_return = scraper.find_and_set_infoLabels(item) # Llegados a este punto podemos tener: # scraper_return = True: Un item con infoLabels con la información actualizada de la peli # scraper_return = False: Un item sin información de la peli (se ha dado a cancelar en la ventana) # item.infoLabels['code'] == "" : No se ha encontrado el identificador de IMDB necesario para continuar, salimos if not scraper_return or not item.infoLabels['code']: # TODO de momento si no hay resultado no añadimos nada, # aunq podriamos abrir un cuadro para introducir el identificador/nombre a mano logger.debug("NO ENCONTRADO EN SCRAPER O NO TIENE IMDB_ID") return 0, 0, -1 _id = item.infoLabels['code'] # progress dialog p_dialog = platformtools.dialog_progress('pelisalacarta', 'Añadiendo película...') base_name = filetools.validate_path(item.contentTitle).lower() for raiz, subcarpetas, ficheros in filetools.walk(MOVIES_PATH): for c in subcarpetas: if c.endswith("[%s]" % _id): path = filetools.join(raiz, c) break if not path: # Crear carpeta path = filetools.join(MOVIES_PATH, ("%s [%s]" % (base_name, _id)).strip()) logger.info("Creando directorio pelicula:" + path) if not filetools.mkdir(path): logger.debug("No se ha podido crear el directorio") return 0, 0, -1 nfo_path = filetools.join(path, "%s [%s].nfo" % (base_name, _id)) strm_path = filetools.join(path, "%s.strm" % base_name) json_path = filetools.join(path, ("%s [%s].json" % (base_name, item.channel.lower()))) nfo_exists = filetools.exists(nfo_path) strm_exists = filetools.exists(strm_path) json_exists = filetools.exists(json_path) if not nfo_exists: # Creamos .nfo si no existe logger.info("Creando .nfo: " + nfo_path) if item.infoLabels['tmdb_id']: head_nfo = "https://www.themoviedb.org/movie/%s\n" % item.infoLabels['tmdb_id'] else: head_nfo = "Aqui ira el xml" # TODO item_nfo = Item(title=item.contentTitle, channel="biblioteca", action='findvideos', library_playcounts={"%s [%s]" % (base_name, _id): 0}, infoLabels=item.infoLabels, library_urls={}) else: # Si existe .nfo, pero estamos añadiendo un nuevo canal lo abrimos head_nfo, item_nfo = read_nfo(nfo_path) if not strm_exists: # Crear base_name.strm si no existe item_strm = item.clone(channel='biblioteca', action='play_from_library', strm_path=strm_path.replace(MOVIES_PATH, ""), contentType='movie', infoLabels={'title': item.contentTitle}) strm_exists = filetools.write(strm_path, '%s?%s' % (addon_name, item_strm.tourl())) item_nfo.strm_path = strm_path.replace(MOVIES_PATH, "") # Solo si existen item_nfo y .strm continuamos if item_nfo and strm_exists: if json_exists: logger.info("El fichero existe. Se sobreescribe") sobreescritos += 1 else: insertados += 1 if filetools.write(json_path, item.tojson()): p_dialog.update(100, 'Añadiendo película...', item.contentTitle) item_nfo.library_urls[item.channel] = item.url if filetools.write(nfo_path, head_nfo + item_nfo.tojson()): # actualizamos la biblioteca de Kodi con la pelicula if config.is_xbmc(): from platformcode import xbmc_library xbmc_library.update(FOLDER_MOVIES, filetools.basename(path) + "/") p_dialog.close() return insertados, sobreescritos, fallidos # Si llegamos a este punto es por q algo ha fallado logger.error("No se ha podido guardar %s en la biblioteca" % item.contentTitle) p_dialog.update(100, 'Fallo al añadir...', item.contentTitle) p_dialog.close() # TODO habria q poner otra advertencia? return 0, 0, -1
def get_release(base_url, imdb_id=None, download_id=None, release_id=None): results = {} params = {} # determine cmd and params to send to CouchPotato to get our results section = 'movies' cmd = 'media.list' if release_id or imdb_id: section = 'media' cmd = 'media.get' params['id'] = release_id or imdb_id if not (release_id or imdb_id or download_id): logger.debug('No information available to filter CP results') return results url = '{0}{1}'.format(base_url, cmd) logger.debug('Opening URL: {0} with PARAMS: {1}'.format(url, params)) try: r = requests.get(url, params=params, verify=False, timeout=(30, 60)) except requests.ConnectionError: logger.error('Unable to open URL {0}'.format(url)) return results try: result = r.json() except ValueError: # ValueError catches simplejson's JSONDecodeError and json's ValueError logger.error('CouchPotato returned the following non-json data') for line in r.iter_lines(): logger.error('{0}'.format(line)) return results if not result['success']: if 'error' in result: logger.error('{0}'.format(result['error'])) else: logger.error('no media found for id {0}'.format(params['id'])) return results # Gather release info and return it back, no need to narrow results if release_id: try: cur_id = result[section]['_id'] results[cur_id] = result[section] return results except Exception: pass # Gather release info and proceed with trying to narrow results to one release choice movies = result[section] if not isinstance(movies, list): movies = [movies] for movie in movies: if movie['status'] not in ['active', 'done']: continue releases = movie['releases'] if not releases: continue for release in releases: try: if release['status'] not in ['snatched', 'downloaded', 'done']: continue if download_id: if download_id.lower( ) != release['download_info']['id'].lower(): continue cur_id = release['_id'] results[cur_id] = release results[cur_id]['title'] = movie['title'] except Exception: continue # Narrow results by removing old releases by comparing their last_edit field if len(results) > 1: for id1, x1 in results.items(): for x2 in results.values(): try: if x2['last_edit'] > x1['last_edit']: results.pop(id1) except Exception: continue # Search downloads on clients for a match to try and narrow our results down to 1 if len(results) > 1: for cur_id, x in results.items(): try: if not find_download( str(x['download_info']['downloader']).lower(), x['download_info']['id']): results.pop(cur_id) except Exception: continue return results
def episodios(item): logger.info() itemlist = [] html_serie = get_url_contents(item.url) info_serie = __extract_info_from_serie(html_serie) plot = info_serie[3] if info_serie else '' episodes = re.findall(REGEX_EPISODE, html_serie, re.DOTALL) es_pelicula = False for url, title, date in episodes: episode = scrapertools.find_single_match(title, r'Episodio (\d+)') # El enlace pertenece a un episodio if episode: season = 1 episode = int(episode) title = "{0}x{1:02d} {2} ({3})".format(season, episode, "Episodio " + str(episode), date) # El enlace pertenece a una pelicula else: title = "{0} ({1})".format(title, date) item.url = url es_pelicula = True logger.debug("title=[{0}], url=[{1}], thumbnail=[{2}]".format( title, url, item.thumbnail)) itemlist.append( Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=item.thumbnail, plot=plot, show=item.show, fulltitle="{0} {1}".format(item.show, title), viewmode="movies_with_plot", folder=True)) # El sistema soporta la biblioteca y se encontro por lo menos un episodio # o pelicula if config.get_library_support() and len(itemlist) > 0: if es_pelicula: item_title = "Añadir película a la biblioteca" item_action = "add_pelicula_to_library" item_extra = "" else: item_title = "Añadir serie a la biblioteca" item_action = "add_serie_to_library" item_extra = "episodios" itemlist.append( Item(channel=item.channel, title=item_title, url=item.url, action=item_action, extra=item_extra, show=item.show)) if not es_pelicula: itemlist.append( Item(channel=item.channel, title="Descargar todos los episodios", url=item.url, action="download_all_episodes", extra="episodios", show=item.show)) return itemlist
def get_video_url(page_url, premium=False, user="", password="", video_password=""): logger.info("[nowvideo.py] get_video_url(page_url='%s')" % page_url) video_urls = [] video_id = scrapertools.get_match( page_url, "http://www.nowvideo.../video/([a-z0-9]+)") if premium: # Lee la página de login login_url = "http://www.nowvideo.eu/login.php" data = scrapertools.cache_page(login_url) # Hace el login login_url = "http://www.nowvideo.eu/login.php?return=" post = "user="******"&pass="******"®ister=Login" headers = [] headers.append(["User-Agent", USER_AGENT]) headers.append(["Referer", "http://www.nowvideo.eu/login.php"]) data = scrapertools.cache_page(login_url, post=post, headers=headers) # Descarga la página del vídeo data = scrapertools.cache_page(page_url) logger.debug("data:" + data) # URL a invocar: http://www.nowvideo.eu/api/player.api.php?user=aaa&file=rxnwy9ku2nwx7&pass=bbb&cid=1&cid2=undefined&key=83%2E46%2E246%2E226%2Dc7e707c6e20a730c563e349d2333e788&cid3=undefined # En la página: ''' flashvars.domain="http://www.nowvideo.eu"; flashvars.file="rxnwy9ku2nwx7"; flashvars.filekey="83.46.246.226-c7e707c6e20a730c563e349d2333e788"; flashvars.advURL="0"; flashvars.autoplay="false"; flashvars.cid="1"; flashvars.user="******"; flashvars.key="bbb"; flashvars.type="1"; ''' flashvar_file = scrapertools.get_match(data, 'flashvars.file="([^"]+)"') flashvar_filekey = scrapertools.get_match( data, 'flashvars.filekey="([^"]+)"') flashvar_user = scrapertools.get_match(data, 'flashvars.user="******"]+)"') flashvar_key = scrapertools.get_match(data, 'flashvars.key="([^"]+)"') flashvar_type = scrapertools.get_match(data, 'flashvars.type="([^"]+)"') #http://www.nowvideo.eu/api/player.api.php?user=aaa&file=rxnwy9ku2nwx7&pass=bbb&cid=1&cid2=undefined&key=83%2E46%2E246%2E226%2Dc7e707c6e20a730c563e349d2333e788&cid3=undefined url = "http://www.nowvideo.eu/api/player.api.php?user="******"&file=" + flashvar_file + "&pass="******"&cid=1&cid2=undefined&key=" + flashvar_filekey.replace( ".", "%2E").replace("-", "%2D") + "&cid3=undefined" data = scrapertools.cache_page(url) logger.info("data=" + data) location = scrapertools.get_match(data, 'url=([^\&]+)&') location = location + "?client=FLASH" video_urls.append([ scrapertools.get_filename_from_url(location)[-4:] + " [premium][nowvideo]", location ]) else: # http://www.nowvideo.sx/video/xuntu4pfq0qye data = scrapertools.cache_page(page_url) logger.debug("data=" + data) data = unwise.unwise_process(data) logger.debug("data=" + data) filekey = unwise.resolve_var(data, "flashvars.filekey") logger.debug("filekey=" + filekey) #get stream url from api url = 'http://www.nowvideo.sx/api/player.api.php?key=%s&file=%s' % ( filekey, video_id) data = scrapertools.cache_page(url) logger.debug("data=" + data) location = scrapertools.get_match(data, 'url=(.+?)&title') video_urls.append([ scrapertools.get_filename_from_url(location)[-4:] + " [nowvideo]", location ]) for video_url in video_urls: logger.info("[nowvideo.py] %s - %s" % (video_url[0], video_url[1])) return video_urls
def find_and_set_infoLabels(item): """ función que se llama para buscar y setear los infolabels :param item: :return: boleano que indica si se ha podido encontrar el 'code' """ global scraper scraper = None # logger.debug("item:\n" + item.tostring('\n')) list_opciones_cuadro = ["Immettere un altro nome", "Informazioni complete"] # Si se añaden más scrapers hay q declararlos aqui-> "modulo_scraper": "Texto_en_cuadro" scrapers_disponibles = { 'tmdb': "Cerca su TheMovieDB.org", 'tvdb': "Cerca su TheTvDB.com" } # Obtener el Scraper por defecto de la configuracion segun el tipo de contenido if item.contentType == "movie": scraper_actual = ['tmdb'][config.get_setting("scraper_movies", "biblioteca")] tipo_contenido = "película" title = item.contentTitle # Completar lista de opciones para este tipo de contenido list_opciones_cuadro.append(scrapers_disponibles['tmdb']) else: scraper_actual = ['tmdb', 'tvdb'][config.get_setting("scraper_tvshows", "biblioteca")] tipo_contenido = "serie" title = item.contentSerieName # Completar lista de opciones para este tipo de contenido list_opciones_cuadro.append(scrapers_disponibles['tmdb']) list_opciones_cuadro.append(scrapers_disponibles['tvdb']) # Importamos el scraper try: scraper = __import__('core.%s' % scraper_actual, fromlist=["core.%s" % scraper_actual]) except ImportError: exec "import core." + scraper_actual + " as scraper" except: import traceback logger.error(traceback.format_exc()) while scraper: # Llamamos a la funcion find_and_set_infoLabels del scraper seleccionado scraper_result = scraper.find_and_set_infoLabels(item) # Verificar si existe 'code' if scraper_result and item.infoLabels['code']: # code correcto logger.info("Identificador encontrado: %s" % item.infoLabels['code']) scraper.completar_codigos(item) return True elif scraper_result: # Contenido encontrado pero no hay 'code' msg = "ID Non trovato per: %s" % title else: # Contenido no encontrado msg = "Nessuna informazione trovata per: %s" % title logger.info(msg) # Mostrar cuadro con otras opciones: if scrapers_disponibles[scraper_actual] in list_opciones_cuadro: list_opciones_cuadro.remove(scrapers_disponibles[scraper_actual]) index = platformtools.dialog_select(msg, list_opciones_cuadro) if index < 0: logger.debug("Se ha pulsado 'cancelar' en la ventana '%s'" % msg) return False elif index == 0: # Pregunta el titulo title = platformtools.dialog_input( title, "Inserire il nome %s per ricerca" % tipo_contenido) if title: if item.contentType == "movie": item.contentTitle = title else: item.contentSerieName = title else: logger.debug( "he pulsado 'cancelar' en la ventana 'Introduzca el nombre correcto'" ) return False elif index == 1: # Hay q crear un cuadro de dialogo para introducir los datos logger.info("Completar información") if cuadro_completar(item): # code correcto logger.info("Identificador encontrado: %s" % str(item.infoLabels['code'])) return True # raise elif list_opciones_cuadro[index] in scrapers_disponibles.values(): # Obtener el nombre del modulo del scraper for k, v in scrapers_disponibles.items(): if list_opciones_cuadro[index] == v: if scrapers_disponibles[ scraper_actual] not in list_opciones_cuadro: list_opciones_cuadro.append( scrapers_disponibles[scraper_actual]) # Importamos el scraper k scraper_actual = k try: scraper = None scraper = __import__( 'core.%s' % scraper_actual, fromlist=["core.%s" % scraper_actual]) except ImportError: exec "import core." + scraper_actual + " as scraper_module" break logger.error("Error al importar el modulo scraper %s" % scraper_actual)
def get_video_url(page_url, premium=False, user="", password="", video_password=""): logger.info("[nowvideo.py] get_video_url(page_url='%s')" % page_url) video_urls = [] video_id = scrapertools.get_match( page_url, "http://www.nowvideo.../video/([a-z0-9]+)") if premium: # Lee la página de login login_url = "http://www.nowvideo.eu/login.php" data = scrapertools.cache_page(login_url) # Hace el login login_url = "http://www.nowvideo.eu/login.php?return=" post = "user="******"&pass="******"®ister=Login" headers = [] headers.append(["User-Agent", USER_AGENT]) headers.append(["Referer", "http://www.nowvideo.eu/login.php"]) data = scrapertools.cache_page(login_url, post=post, headers=headers) # Descarga la página del vídeo data = scrapertools.cache_page(page_url) logger.debug("data:" + data) # URL a invocar: http://www.nowvideo.eu/api/player.api.php?user=aaa&file=rxnwy9ku2nwx7&pass=bbb&cid=1&cid2=undefined&key=83%2E46%2E246%2E226%2Dc7e707c6e20a730c563e349d2333e788&cid3=undefined # En la página: ''' flashvars.domain="http://www.nowvideo.eu"; flashvars.file="rxnwy9ku2nwx7"; flashvars.filekey="83.46.246.226-c7e707c6e20a730c563e349d2333e788"; flashvars.advURL="0"; flashvars.autoplay="false"; flashvars.cid="1"; flashvars.user="******"; flashvars.key="bbb"; flashvars.type="1"; ''' flashvar_file = scrapertools.get_match(data, 'flashvars.file="([^"]+)"') flashvar_filekey = scrapertools.get_match( data, 'flashvars.filekey=([^;]+);') flashvar_filekey = scrapertools.get_match( data, 'var ' + flashvar_filekey + '="([^"]+)"') flashvar_user = scrapertools.get_match(data, 'flashvars.user="******"]+)"') flashvar_key = scrapertools.get_match(data, 'flashvars.key="([^"]+)"') flashvar_type = scrapertools.get_match(data, 'flashvars.type="([^"]+)"') #http://www.nowvideo.eu/api/player.api.php?user=aaa&file=rxnwy9ku2nwx7&pass=bbb&cid=1&cid2=undefined&key=83%2E46%2E246%2E226%2Dc7e707c6e20a730c563e349d2333e788&cid3=undefined url = "http://www.nowvideo.eu/api/player.api.php?user="******"&file=" + flashvar_file + "&pass="******"&cid=1&cid2=undefined&key=" + flashvar_filekey.replace( ".", "%2E").replace("-", "%2D") + "&cid3=undefined" data = scrapertools.cache_page(url) logger.info("data=" + data) location = scrapertools.get_match(data, 'url=([^\&]+)&') location = location + "?client=FLASH" video_urls.append([ scrapertools.get_filename_from_url(location)[-4:] + " [premium][nowvideo]", location ]) else: # http://www.nowvideo.sx/video/xuntu4pfq0qye url = page_url.replace("http://www.nowvideo.sx/video/", "http://embed.nowvideo.sx/embed/?v=") data = scrapertools.cache_page(url) logger.debug("data=" + data) videourl = scrapertools.find_single_match(data, '<source src="([^"]+)"') if not videourl: data = scrapertools.cache_page(page_url) stepkey = scrapertools.find_single_match( data, '<input type="hidden" name="stepkey" value="([^"]+)"') if stepkey != "": #stepkey=6cd619a0cea72a1cb45a56167c296716&submit=submit #<form method="post" action=""> #<input type="hidden" name="stepkey" value="6cd619a0cea72a1cb45a56167c296716"><Br> #<button type="submit" name="submit" class="btn" value="submit">Continue to the video</button> data = scrapertools.cache_page(page_url, post="stepkey=" + stepkey + "&submit=submit") videourl = scrapertools.find_single_match(data, '<source src="([^"]+)"') if not videourl: flashvar_filekey = scrapertools.get_match( data, 'flashvars.filekey=([^;]+);') filekey = scrapertools.get_match( data, 'var ' + flashvar_filekey + '="([^"]+)"') ''' data = unwise.unwise_process(data) logger.debug("data="+data) filekey = unwise.resolve_var(data, "flashvars.filekey") ''' logger.debug("filekey=" + filekey) #get stream url from api url = 'http://www.nowvideo.sx/api/player.api.php?key=%s&file=%s' % ( filekey, video_id) data = scrapertools.cache_page(url).replace("flv&", "flv?") videourl = re.sub(r"^url=", "", data) logger.debug("data=" + videourl) ''' location = scrapertools.get_match(data,'url=(.+?)&title') mobile="http://www.nowvideo.at/mobile/video.php?id="+ video_id+"&download=2" data = scrapertools.cache_page(mobile) location = scrapertools.get_match(data,'<source src="([^"]+)" type="video/flv">') video_urls.append( [ "[nowvideo]",location ] ) ''' video_urls.append([ scrapertools.get_filename_from_url(videourl)[-4:] + " [nowvideo]", videourl ]) for video_url in video_urls: logger.info("[nowvideo.py] %s - %s" % (video_url[0], video_url[1])) return video_urls
def process(section, dir_name, input_name=None, status=0, client_agent='manual', download_id='', input_category=None, failure_link=None): cfg = dict(core.CFG[section][input_category]) host = cfg['host'] port = cfg['port'] apikey = cfg['apikey'] if section == 'CouchPotato': method = cfg['method'] else: method = None # added importMode for Radarr config if section == 'Radarr': import_mode = cfg.get('importMode', 'Move') else: import_mode = None delete_failed = int(cfg['delete_failed']) wait_for = int(cfg['wait_for']) ssl = int(cfg.get('ssl', 0)) web_root = cfg.get('web_root', '') remote_path = int(cfg.get('remote_path', 0)) protocol = 'https://' if ssl else 'http://' omdbapikey = cfg.get('omdbapikey', '') status = int(status) if status > 0 and core.NOEXTRACTFAILED: extract = 0 else: extract = int(cfg.get('extract', 0)) imdbid = find_imdbid(dir_name, input_name, omdbapikey) if section == 'CouchPotato': base_url = '{0}{1}:{2}{3}/api/{4}/'.format(protocol, host, port, web_root, apikey) if section == 'Radarr': base_url = '{0}{1}:{2}{3}/api/command'.format(protocol, host, port, web_root) url2 = '{0}{1}:{2}{3}/api/config/downloadClient'.format( protocol, host, port, web_root) headers = {'X-Api-Key': apikey} if not apikey: logger.info( 'No CouchPotato or Radarr apikey entered. Performing transcoder functions only' ) release = None elif server_responding(base_url): if section == 'CouchPotato': release = get_release(base_url, imdbid, download_id) else: release = None else: logger.error('Server did not respond. Exiting', section) return ProcessResult( message='{0}: Failed to post-process - {0} did not respond.'. format(section), status_code=1, ) # pull info from release found if available release_id = None media_id = None downloader = None release_status_old = None if release: try: release_id = list(release.keys())[0] media_id = release[release_id]['media_id'] download_id = release[release_id]['download_info']['id'] downloader = release[release_id]['download_info']['downloader'] release_status_old = release[release_id]['status'] except Exception: pass if not os.path.isdir(dir_name) and os.path.isfile( dir_name ): # If the input directory is a file, assume single file download and split dir/name. dir_name = os.path.split(os.path.normpath(dir_name))[0] specific_path = os.path.join(dir_name, str(input_name)) clean_name = os.path.splitext(specific_path) if clean_name[1] == '.nzb': specific_path = clean_name[0] if os.path.isdir(specific_path): dir_name = specific_path process_all_exceptions(input_name, dir_name) input_name, dir_name = convert_to_ascii(input_name, dir_name) if not list_media_files( dir_name, media=True, audio=False, meta=False, archives=False) and list_media_files( dir_name, media=False, audio=False, meta=False, archives=True) and extract: logger.debug( 'Checking for archives to extract in directory: {0}'.format( dir_name)) core.extract_files(dir_name) input_name, dir_name = convert_to_ascii(input_name, dir_name) good_files = 0 num_files = 0 # Check video files for corruption for video in list_media_files(dir_name, media=True, audio=False, meta=False, archives=False): num_files += 1 if transcoder.is_video_good(video, status): import_subs(video) good_files += 1 if num_files and good_files == num_files: if status: logger.info( 'Status shown as failed from Downloader, but {0} valid video files found. Setting as success.' .format(good_files), section) status = 0 elif num_files and good_files < num_files: logger.info( 'Status shown as success from Downloader, but corrupt video files found. Setting as failed.', section) if 'NZBOP_VERSION' in os.environ and os.environ['NZBOP_VERSION'][ 0:5] >= '14.0': print('[NZB] MARK=BAD') if failure_link: failure_link += '&corrupt=true' status = 1 elif client_agent == 'manual': logger.warning( 'No media files found in directory {0} to manually process.'. format(dir_name), section) return ProcessResult( message='', status_code=0, # Success (as far as this script is concerned) ) else: logger.warning( 'No media files found in directory {0}. Processing this as a failed download' .format(dir_name), section) status = 1 if 'NZBOP_VERSION' in os.environ and os.environ['NZBOP_VERSION'][ 0:5] >= '14.0': print('[NZB] MARK=BAD') if status == 0: if core.TRANSCODE == 1: result, new_dir_name = transcoder.transcode_directory(dir_name) if result == 0: logger.debug( 'Transcoding succeeded for files in {0}'.format(dir_name), section) dir_name = new_dir_name chmod_directory = int(str(cfg.get('chmodDirectory', '0')), 8) logger.debug( 'Config setting \'chmodDirectory\' currently set to {0}'. format(oct(chmod_directory)), section) if chmod_directory: logger.info( 'Attempting to set the octal permission of \'{0}\' on directory \'{1}\'' .format(oct(chmod_directory), dir_name), section) core.rchmod(dir_name, chmod_directory) else: logger.error( 'Transcoding failed for files in {0}'.format(dir_name), section) return ProcessResult( message='{0}: Failed to post-process - Transcoding failed'. format(section), status_code=1, ) for video in list_media_files(dir_name, media=True, audio=False, meta=False, archives=False): if not release and '.cp(tt' not in video and imdbid: video_name, video_ext = os.path.splitext(video) video2 = '{0}.cp({1}){2}'.format(video_name, imdbid, video_ext) if not (client_agent in [core.TORRENT_CLIENT_AGENT, 'manual'] and core.USE_LINK == 'move-sym'): logger.debug('Renaming: {0} to: {1}'.format(video, video2)) os.rename(video, video2) if not apikey: # If only using Transcoder functions, exit here. logger.info( 'No CouchPotato or Radarr apikey entered. Processing completed.' ) return ProcessResult( message='{0}: Successfully post-processed {1}'.format( section, input_name), status_code=0, ) params = { 'media_folder': remote_dir(dir_name) if remote_path else dir_name, } if download_id and release_id: params['downloader'] = downloader or client_agent params['download_id'] = download_id if section == 'CouchPotato': if method == 'manage': command = 'manage.update' params.clear() else: command = 'renamer.scan' url = '{0}{1}'.format(base_url, command) logger.debug( 'Opening URL: {0} with PARAMS: {1}'.format(url, params), section) logger.postprocess( 'Starting {0} scan for {1}'.format(method, input_name), section) if section == 'Radarr': payload = { 'name': 'DownloadedMoviesScan', 'path': params['media_folder'], 'downloadClientId': download_id, 'importMode': import_mode } if not download_id: payload.pop('downloadClientId') logger.debug( 'Opening URL: {0} with PARAMS: {1}'.format(base_url, payload), section) logger.postprocess( 'Starting DownloadedMoviesScan scan for {0}'.format( input_name), section) try: if section == 'CouchPotato': r = requests.get(url, params=params, verify=False, timeout=(30, 1800)) else: r = requests.post(base_url, data=json.dumps(payload), headers=headers, stream=True, verify=False, timeout=(30, 1800)) except requests.ConnectionError: logger.error('Unable to open URL', section) return ProcessResult( message='{0}: Failed to post-process - Unable to connect to {0}' .format(section), status_code=1, ) result = r.json() if r.status_code not in [ requests.codes.ok, requests.codes.created, requests.codes.accepted ]: logger.error('Server returned status {0}'.format(r.status_code), section) return ProcessResult( message= '{0}: Failed to post-process - Server returned status {1}'. format(section, r.status_code), status_code=1, ) elif section == 'CouchPotato' and result['success']: logger.postprocess( 'SUCCESS: Finished {0} scan for folder {1}'.format( method, dir_name), section) if method == 'manage': return ProcessResult( message='{0}: Successfully post-processed {1}'.format( section, input_name), status_code=0, ) elif section == 'Radarr': logger.postprocess('Radarr response: {0}'.format(result['state'])) try: res = json.loads(r.content) scan_id = int(res['id']) logger.debug('Scan started with id: {0}'.format(scan_id), section) except Exception as e: logger.warning('No scan id was returned due to: {0}'.format(e), section) scan_id = None else: logger.error( 'FAILED: {0} scan was unable to finish for folder {1}. exiting!' .format(method, dir_name), section) return ProcessResult( message= '{0}: Failed to post-process - Server did not return success'. format(section), status_code=1, ) else: core.FAILED = True logger.postprocess( 'FAILED DOWNLOAD DETECTED FOR {0}'.format(input_name), section) if failure_link: report_nzb(failure_link, client_agent) if section == 'Radarr': logger.postprocess( 'SUCCESS: Sending failed download to {0} for CDH processing'. format(section), section) return ProcessResult( message='{0}: Sending failed download back to {0}'.format( section), status_code= 1, # Return as failed to flag this in the downloader. ) # Return failed flag, but log the event as successful. if delete_failed and os.path.isdir( dir_name) and not os.path.dirname(dir_name) == dir_name: logger.postprocess( 'Deleting failed files and folder {0}'.format(dir_name), section) remove_dir(dir_name) if not release_id and not media_id: logger.error( 'Could not find a downloaded movie in the database matching {0}, exiting!' .format(input_name), section) return ProcessResult( message= '{0}: Failed to post-process - Failed download not found in {0}' .format(section), status_code=1, ) if release_id: logger.postprocess( 'Setting failed release {0} to ignored ...'.format(input_name), section) url = '{url}release.ignore'.format(url=base_url) params = {'id': release_id} logger.debug( 'Opening URL: {0} with PARAMS: {1}'.format(url, params), section) try: r = requests.get(url, params=params, verify=False, timeout=(30, 120)) except requests.ConnectionError: logger.error('Unable to open URL {0}'.format(url), section) return ProcessResult( message= '{0}: Failed to post-process - Unable to connect to {0}'. format(section), status_code=1, ) result = r.json() if r.status_code not in [ requests.codes.ok, requests.codes.created, requests.codes.accepted ]: logger.error( 'Server returned status {0}'.format(r.status_code), section) return ProcessResult( status_code=1, message= '{0}: Failed to post-process - Server returned status {1}'. format(section, r.status_code), ) elif result['success']: logger.postprocess( 'SUCCESS: {0} has been set to ignored ...'.format( input_name), section) else: logger.warning( 'FAILED: Unable to set {0} to ignored!'.format(input_name), section) return ProcessResult( message= '{0}: Failed to post-process - Unable to set {1} to ignored' .format(section, input_name), status_code=1, ) logger.postprocess('Trying to snatch the next highest ranked release.', section) url = '{0}movie.searcher.try_next'.format(base_url) logger.debug('Opening URL: {0}'.format(url), section) try: r = requests.get(url, params={'media_id': media_id}, verify=False, timeout=(30, 600)) except requests.ConnectionError: logger.error('Unable to open URL {0}'.format(url), section) return ProcessResult( message='{0}: Failed to post-process - Unable to connect to {0}' .format(section), status_code=1, ) result = r.json() if r.status_code not in [ requests.codes.ok, requests.codes.created, requests.codes.accepted ]: logger.error('Server returned status {0}'.format(r.status_code), section) return ProcessResult( message= '{0}: Failed to post-process - Server returned status {1}'. format(section, r.status_code), status_code=1, ) elif result['success']: logger.postprocess( 'SUCCESS: Snatched the next highest release ...', section) return ProcessResult( message='{0}: Successfully snatched next highest release'. format(section), status_code=0, ) else: logger.postprocess( 'SUCCESS: Unable to find a new release to snatch now. CP will keep searching!', section) return ProcessResult( status_code=0, message='{0}: No new release found now. {0} will keep searching' .format(section), ) # Added a release that was not in the wanted list so confirm rename successful by finding this movie media.list. if not release: download_id = None # we don't want to filter new releases based on this. # we will now check to see if CPS has finished renaming before returning to TorrentToMedia and unpausing. timeout = time.time() + 60 * wait_for while time.time() < timeout: # only wait 2 (default) minutes, then return. logger.postprocess('Checking for status change, please stand by ...', section) if section == 'CouchPotato': release = get_release(base_url, imdbid, download_id, release_id) scan_id = None else: release = None if release: try: release_id = list(release.keys())[0] title = release[release_id]['title'] release_status_new = release[release_id]['status'] if release_status_old is None: # we didn't have a release before, but now we do. logger.postprocess( 'SUCCESS: Movie {0} has now been added to CouchPotato with release status of [{1}]' .format(title, str(release_status_new).upper()), section) return ProcessResult( message='{0}: Successfully post-processed {1}'.format( section, input_name), status_code=0, ) if release_status_new != release_status_old: logger.postprocess( 'SUCCESS: Release for {0} has now been marked with a status of [{1}]' .format(title, str(release_status_new).upper()), section) return ProcessResult( message='{0}: Successfully post-processed {1}'.format( section, input_name), status_code=0, ) except Exception: pass elif scan_id: url = '{0}/{1}'.format(base_url, scan_id) command_status = command_complete(url, params, headers, section) if command_status: logger.debug( 'The Scan command return status: {0}'.format( command_status), section) if command_status in ['completed']: logger.debug( 'The Scan command has completed successfully. Renaming was successful.', section) return ProcessResult( message='{0}: Successfully post-processed {1}'.format( section, input_name), status_code=0, ) elif command_status in ['failed']: logger.debug( 'The Scan command has failed. Renaming was not successful.', section) # return ProcessResult( # message='{0}: Failed to post-process {1}'.format(section, input_name), # status_code=1, # ) if not os.path.isdir(dir_name): logger.postprocess( 'SUCCESS: Input Directory [{0}] has been processed and removed' .format(dir_name), section) return ProcessResult( status_code=0, message='{0}: Successfully post-processed {1}'.format( section, input_name), ) elif not list_media_files( dir_name, media=True, audio=False, meta=False, archives=True): logger.postprocess( 'SUCCESS: Input Directory [{0}] has no remaining media files. This has been fully processed.' .format(dir_name), section) return ProcessResult( message='{0}: Successfully post-processed {1}'.format( section, input_name), status_code=0, ) # pause and let CouchPotatoServer/Radarr catch its breath time.sleep(10 * wait_for) # The status hasn't changed. we have waited wait_for minutes which is more than enough. uTorrent can resume seeding now. if section == 'Radarr' and completed_download_handling( url2, headers, section=section): logger.debug( 'The Scan command did not return status completed, but complete Download Handling is enabled. Passing back to {0}.' .format(section), section) return ProcessResult( message= '{0}: Complete DownLoad Handling is enabled. Passing back to {0}'. format(section), status_code=status, ) logger.warning( '{0} does not appear to have changed status after {1} minutes, Please check your logs.' .format(input_name, wait_for), section, ) return ProcessResult( status_code=1, message='{0}: Failed to post-process - No change in status'.format( section), )
def findvideos(item): logger.info() # logger.debug("item:\n" + item.tostring('\n')) itemlist = [] list_canales = {} item_local = None if not item.contentTitle or not item.strm_path: logger.debug("No se pueden buscar videos por falta de parametros") return [] content_title = filter(lambda c: c not in ":*?<>|\/", item.contentTitle).strip().lower() if item.contentType == 'movie': item.strm_path = filetools.join(library.MOVIES_PATH, item.strm_path) path_dir = os.path.dirname(item.strm_path) item.nfo = filetools.join(path_dir, os.path.basename(path_dir) + ".nfo") else: item.strm_path = filetools.join(library.TVSHOWS_PATH, item.strm_path) path_dir = os.path.dirname(item.strm_path) item.nfo = filetools.join(path_dir, 'tvshow.nfo') for fd in filetools.listdir(path_dir): if fd.endswith('.json'): contenido, nom_canal = fd[:-6].split('[') if (contenido.startswith(content_title) or item.contentType == 'movie') and nom_canal not in \ list_canales.keys(): list_canales[nom_canal] = filetools.join(path_dir, fd) num_canales = len(list_canales) # logger.debug(str(list_canales)) if 'descargas' in list_canales: json_path = list_canales['descargas'] item_json = Item().fromjson(filetools.read(json_path)) item_json.contentChannel = "local" # Soporte para rutas relativas en descargas if filetools.is_relative(item_json.url): item_json.url = filetools.join(library.LIBRARY_PATH, item_json.url) del list_canales['descargas'] # Comprobar q el video no haya sido borrado if filetools.exists(item_json.url): item_local = item_json.clone(action='play') itemlist.append(item_local) else: num_canales -= 1 filtro_canal = '' if num_canales > 1 and config.get_setting("ask_channel", "biblioteca") == True: opciones = [ "Mostra solo link %s" % k.capitalize() for k in list_canales.keys() ] opciones.insert(0, "Mosta tutti i collegamenti") if item_local: opciones.append(item_local.title) from platformcode import platformtools index = platformtools.dialog_select(config.get_localized_string(30163), opciones) if index < 0: return [] elif item_local and index == len(opciones) - 1: filtro_canal = 'descargas' platformtools.play_video(item_local) elif index > 0: filtro_canal = opciones[index].replace("Mostra solo link ", "") itemlist = [] for nom_canal, json_path in list_canales.items(): if filtro_canal and filtro_canal != nom_canal.capitalize(): continue # Importamos el canal de la parte seleccionada try: channel = __import__('channels.%s' % nom_canal, fromlist=["channels.%s" % nom_canal]) except ImportError: exec "import channels." + nom_canal + " as channel" item_json = Item().fromjson(filetools.read(json_path)) list_servers = [] try: # FILTERTOOLS # si el canal tiene filtro se le pasa el nombre que tiene guardado para que filtre correctamente. if "list_idiomas" in item_json: # si se viene desde la biblioteca de pelisalacarta if "library_filter_show" in item: item_json.show = item.library_filter_show.get( nom_canal, "") # Ejecutamos find_videos, del canal o común if hasattr(channel, 'findvideos'): list_servers = getattr(channel, 'findvideos')(item_json) else: from core import servertools list_servers = servertools.find_video_items(item_json) except Exception as ex: logger.error("Ha fallado la funcion findvideos para el canal %s" % nom_canal) template = "An exception of type {0} occured. Arguments:\n{1!r}" message = template.format(type(ex).__name__, ex.args) logger.error(message) # Cambiarle el titulo a los servers añadiendoles el nombre del canal delante y # las infoLabels y las imagenes del item si el server no tiene for server in list_servers: if not server.action: # Ignorar las etiquetas continue server.contentChannel = server.channel server.channel = "biblioteca" server.nfo = item.nfo server.strm_path = item.strm_path # Se añade el nombre del canal si se desea if config.get_setting("quit_channel_name", "biblioteca") == 0: server.title = "%s: %s" % (nom_canal.capitalize(), server.title) server.infoLabels = item_json.infoLabels if not server.thumbnail: server.thumbnail = item.thumbnail # logger.debug("server:\n%s" % server.tostring('\n')) itemlist.append(server) # return sorted(itemlist, key=lambda it: it.title.lower()) return itemlist
def series(item): logger.info() # Descarga la pagina data = scrapertools.cache_page(item.url) #logger.info("data="+data) # Extrae las entradas ''' <a href="/anime/akane-iro-ni-somaru-saka/"> <div class="anim-list"> <div id="a49" class="anim-sinop-estilos-iz"> <div class="anim-sinopsis-dos-iz"> <div class="anim-list-genr-iz"> comedia , drama , ecchi , recuentos de la vida , romance </div> <div class="line-title"></div> Juunichi es un joven estudiante con una vida escolar muy normal junto a sus amigos y amigas en la escuela. Sin embargo, cierto día, una chica nueva llega transferida a su salón y se presenta como su "prometida". Juunichi, que no sabe nada de esto, discute con ella acerca de lo que ha dicho y, fin... </div><div class="anim-sinopsis-uno-iz"></div> </div> <!-- test --> <img onmousemove="MM_showHideLayers('a49','','show')" onmouseout="MM_showHideLayers('a49','','hide')" src="/media/cache/8e/e0/8ee04c67c17286efb07a771d48beae76.jpg" width="131" height="" class="img-til"/> <div onmousemove="MM_showHideLayers('a49','','show')" onmouseout="MM_showHideLayers('a49','','hide')" class="anime-titulo"> Akane Iro Ni Somaru Saka </div> </div> </a> ''' patron = '(<a href="[^"]+"[^<]+' patron += '<span[^<]+</span[^<]+' patron += '<div id="[^<]+<div[^<]+</div[^<]+<h5.*?</a)' matches = re.compile(patron,re.DOTALL).findall(data) itemlist = [] for match in matches: scrapedurl = scrapertools.find_single_match(match,'<a href="([^"]+)"') scrapedplot = scrapertools.find_single_match(match,'</h6>([^<]+)</div>') scrapedthumbnail = scrapertools.find_single_match(match,'src="([^"]+)"') scrapedtitle = scrapertools.find_single_match(match,'<spa[^>]+>([^<]+)</spa') title = scrapedtitle.strip() url = urlparse.urljoin(item.url,scrapedurl) thumbnail = urlparse.urljoin(item.url,scrapedthumbnail) plot = scrapertools.htmlclean(scrapedplot).strip() show = title logger.debug("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]") itemlist.append( Item(channel=item.channel, action="episodios", title=title , url=url , thumbnail=thumbnail , plot=plot , show=show, fulltitle=title, fanart=thumbnail, viewmode="movies_with_plot", folder=True) ) next_page = scrapertools.find_single_match(data,'<a href="([^"]+)" class="next">siguiente >>') if next_page!="": itemlist.append( Item(channel=item.channel, action="series", title=">> Página siguiente" , url=urlparse.urljoin(item.url,next_page, viewmode="movie_with_plot") , folder=True) ) return itemlist