def test_notify(self, username, blacklist_name=None): """ Sends a test notification to trakt with the given authentication info and returns a boolean representing success. api: The api string to use username: The username to use blacklist_name: slug of trakt list used to hide not interested show Returns: True if the request succeeded, False otherwise """ try: trakt_api = TraktAPI(sickbeard.SSL_VERIFY, sickbeard.TRAKT_TIMEOUT) trakt_api.validateAccount() if blacklist_name and blacklist_name is not None: trakt_lists = trakt_api.traktRequest("users/" + username + "/lists") found = False for trakt_list in trakt_lists: if trakt_list['ids']['slug'] == blacklist_name: return "Test notice sent successfully to Trakt" if not found: return "Trakt blacklist doesn't exists" else: return "Test notice sent successfully to Trakt" except (traktException, traktAuthException, traktServerBusy) as e: logger.log(u"Could not connect to Trakt service: %s" % ex(e), logger.WARNING) return "Test notice failed to Trakt: %s" % ex(e)
def test_encoding(self): """ Test encoding """ root_dir = 'C:\\Temp\\TV' strings = [u'Les Enfants De La T\xe9l\xe9', u'RT� One'] sickbeard.SYS_ENCODING = None try: locale.setlocale(locale.LC_ALL, "") sickbeard.SYS_ENCODING = locale.getpreferredencoding() except (locale.Error, IOError): pass # For OSes that are poorly configured I'll just randomly force UTF-8 if not sickbeard.SYS_ENCODING or sickbeard.SYS_ENCODING in ('ANSI_X3.4-1968', 'US-ASCII', 'ASCII'): sickbeard.SYS_ENCODING = 'UTF-8' for test in strings: try: show_dir = ek(os.path.join, root_dir, sanitize_filename(test)) self.assertTrue(isinstance(show_dir, unicode)) except Exception as error: # pylint: disable=broad-except ex(error)
def _makeURL(self, result): urls = [] filename = u'' if result.url.startswith('magnet'): try: torrent_hash = re.findall(r'urn:btih:([\w]{32,40})', result.url)[0].upper() try: torrent_name = re.findall('dn=([^&]+)', result.url)[0] except Exception: torrent_name = 'NO_DOWNLOAD_NAME' if len(torrent_hash) == 32: torrent_hash = b16encode(b32decode(torrent_hash)).upper() if not torrent_hash: logger.log(u"Unable to extract torrent hash from magnet: " + ex(result.url), logger.ERROR) return urls, filename urls = [x.format(torrent_hash=torrent_hash, torrent_name=torrent_name) for x in self.btCacheURLS] except Exception: logger.log(u"Unable to extract torrent hash or name from magnet: " + ex(result.url), logger.ERROR) return urls, filename else: urls = [result.url] if self.providerType == GenericProvider.TORRENT: filename = ek(os.path.join, sickbeard.TORRENT_DIR, sanitize_filename(result.name) + '.' + self.providerType) elif self.providerType == GenericProvider.NZB: filename = ek(os.path.join, sickbeard.NZB_DIR, sanitize_filename(result.name) + '.' + self.providerType) return urls, filename
def _run_extra_scripts(self, ep_obj): """ Executes any extra scripts defined in the config. :param ep_obj: The object to use when calling the extra script """ for curScriptName in sickbeard.EXTRA_SCRIPTS: # generate a safe command line string to execute the script and provide all the parameters script_cmd = [piece for piece in re.split("( |\\\".*?\\\"|'.*?')", curScriptName) if piece.strip()] script_cmd[0] = ek(os.path.abspath, script_cmd[0]) self._log(u"Absolute path to script: " + script_cmd[0], logger.DEBUG) script_cmd = script_cmd + [ep_obj.location, self.file_path, str(ep_obj.show.indexerid), str(ep_obj.season), str(ep_obj.episode), str(ep_obj.airdate)] # use subprocess to run the command and capture output self._log(u"Executing command " + str(script_cmd)) try: p = subprocess.Popen(script_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=sickbeard.PROG_DIR) out, _ = p.communicate() # @UnusedVariable self._log(u"Script result: " + str(out), logger.DEBUG) except OSError, e: self._log(u"Unable to run extra_script: " + ex(e)) except Exception, e: self._log(u"Unable to run extra_script: " + ex(e))
def _send_to_kodi(command, host=None, username=None, password=None, dest_app="KODI"): # pylint: disable=too-many-arguments """Handles communication to KODI servers via HTTP API Args: command: Dictionary of field/data pairs, encoded via urllib and passed to the KODI API via HTTP host: KODI webserver host:port username: KODI webserver username password: KODI webserver password Returns: Returns response.result for successful commands or False if there was an error """ # fill in omitted parameters if not username: username = sickbeard.KODI_USERNAME if not password: password = sickbeard.KODI_PASSWORD if not host: logger.log(u'No %s host passed, aborting update' % dest_app, logger.WARNING) return False for key in command: if isinstance(command[key], unicode): command[key] = command[key].encode('utf-8') enc_command = urllib.urlencode(command) logger.log(u"%s encoded API command: %r" % (dest_app, enc_command), logger.DEBUG) # url = 'http://%s/xbmcCmds/xbmcHttp/?%s' % (host, enc_command) # maybe need for old plex? url = 'http://%s/kodiCmds/kodiHttp/?%s' % (host, enc_command) try: req = urllib2.Request(url) # if we have a password, use authentication if password: base64string = base64.encodestring('%s:%s' % (username, password))[:-1] authheader = "Basic %s" % base64string req.add_header("Authorization", authheader) logger.log(u"Contacting %s (with auth header) via url: %s" % (dest_app, ss(url)), logger.DEBUG) else: logger.log(u"Contacting %s via url: %s" % (dest_app, ss(url)), logger.DEBUG) try: response = urllib2.urlopen(req) except (httplib.BadStatusLine, urllib2.URLError) as e: logger.log(u"Couldn't contact %s HTTP at %r : %r" % (dest_app, url, ex(e)), logger.DEBUG) return False result = response.read().decode(sickbeard.SYS_ENCODING) response.close() logger.log(u"%s HTTP response: %s" % (dest_app, result.replace('\n', '')), logger.DEBUG) return result except Exception as e: logger.log(u"Couldn't contact %s HTTP at %r : %r" % (dest_app, url, ex(e)), logger.DEBUG) return False
def _send_to_kodi(self, command, host=None, username=None, password=None): """Handles communication to KODI servers via HTTP API Args: command: Dictionary of field/data pairs, encoded via urllib and passed to the KODI API via HTTP host: KODI webserver host:port username: KODI webserver username password: KODI webserver password Returns: Returns response.result for successful commands or False if there was an error """ # fill in omitted parameters if not username: username = sickbeard.KODI_USERNAME if not password: password = sickbeard.KODI_PASSWORD if not host: logging.warning('No KODI host passed, aborting update') return False for key in command: if isinstance(command[key], unicode): command[key] = command[key].encode('utf-8') enc_command = urllib.urlencode(command) logging.debug("KODI encoded API command: " + enc_command) url = 'http://%s/kodiCmds/kodiHttp/?%s' % (host, enc_command) try: req = urllib2.Request(url) # if we have a password, use authentication if password: base64string = base64.encodestring('%s:%s' % (username, password))[:-1] authheader = "Basic %s" % base64string req.add_header("Authorization", authheader) logging.debug("Contacting KODI (with auth header) via url: " + ss(url)) else: logging.debug("Contacting KODI via url: " + ss(url)) try: response = urllib2.urlopen(req) except (httplib.BadStatusLine, urllib2.URLError) as e: logging.debug("Couldn't contact KODI HTTP at %r : %r" % (url, ex(e))) return False result = response.read().decode(sickbeard.SYS_ENCODING) response.close() logging.debug("KODI HTTP response: " + result.replace('\n', '')) return result except Exception as e: logging.debug("Couldn't contact KODI HTTP at %r : %r" % (url, ex(e))) return False
def mass_action(self, querylist=None, logTransaction=False, fetchall=False): """ Execute multiple queries :param querylist: list of queries :param logTransaction: Boolean to wrap all in one transaction :param fetchall: Boolean, when using a select query force returning all results :return: list of results """ assert hasattr(querylist, '__iter__'), 'You passed a non-iterable to mass_action: {0!r}'.format(querylist) # remove None types querylist = [i for i in querylist if i] sql_results = [] attempt = 0 with db_locks[self.filename]: self._set_row_factory() while attempt < 5: try: for qu in querylist: if len(qu) == 1: if logTransaction: logger.log(qu[0], logger.DEBUG) sql_results.append(self._execute(qu[0], fetchall=fetchall)) elif len(qu) > 1: if logTransaction: logger.log(qu[0] + " with args " + str(qu[1]), logger.DEBUG) sql_results.append(self._execute(qu[0], qu[1], fetchall=fetchall)) self.connection.commit() logger.log(u"Transaction with " + str(len(querylist)) + u" queries executed", logger.DEBUG) # finished break except sqlite3.OperationalError as e: sql_results = [] if self.connection: self.connection.rollback() if "unable to open database file" in e.args[0] or "database is locked" in e.args[0]: logger.log(u"DB error: " + ex(e), logger.WARNING) attempt += 1 time.sleep(1) else: logger.log(u"DB error: " + ex(e), logger.ERROR) raise except sqlite3.DatabaseError as e: sql_results = [] if self.connection: self.connection.rollback() logger.log(u"Fatal error executing query: " + ex(e), logger.ERROR) raise # time.sleep(0.02) return sql_results
def mass_action(self, querylist=[], logTransaction=False, fetchall=False): """ Execute multiple queries :param querylist: list of queries :param logTransaction: Boolean to wrap all in one transaction :param fetchall: Boolean, when using a select query force returning all results :return: list of results """ with self.lock: querylist = [i for i in querylist if i is not None and len(i)] sqlResult = [] attempt = 0 while attempt < 5: try: for qu in querylist: if len(qu) == 1: if logTransaction: logging.debug(qu[0]) sqlResult.append(self.execute(qu[0], fetchall=fetchall)) elif len(qu) > 1: if logTransaction: logging.debug(qu[0] + " with args " + str(qu[1])) sqlResult.append(self.execute(qu[0], qu[1], fetchall=fetchall)) logging.debug("Transaction with " + str(len(querylist)) + " queries executed") except sqlite3.OperationalError as e: sqlResult = [] if self.connection: self.connection.rollback() if "unable to open database file" in e.args[0] or "database is locked" in e.args[0]: logging.warning("DB error: {}".format(ex(e))) attempt += 1 time.sleep(1) else: logging.error("DB error: {}".format(ex(e))) raise except sqlite3.DatabaseError as e: sqlResult = [] if self.connection: self.connection.rollback() logging.error("Fatal error executing query: {}".format(ex(e))) raise finally: self.commit() break return sqlResult
def _processUpgrade(connection, upgradeClass): instance = upgradeClass(connection) logger.log(u"Checking " + prettyName(upgradeClass.__name__) + " database upgrade", logger.DEBUG) if not instance.test(): logger.log(u"Database upgrade required: " + prettyName(upgradeClass.__name__), logger.DEBUG) try: instance.execute() except sqlite3.DatabaseError, e: # attemping to restore previous DB backup and perform upgrade try: instance.execute() except: restored = False result = connection.select("SELECT db_version FROM db_version") if result: version = int(result[0]["db_version"]) # close db before attempting restore connection.close() if restoreDatabase(version): # initialize the main SB database upgradeDatabase(DBConnection(), sickbeard.mainDB.InitialSchema) restored = True if not restored: print "Error in " + str(upgradeClass.__name__) + ": " + ex(e) raise logger.log(upgradeClass.__name__ + " upgrade completed", logger.DEBUG)
def process_failed(dirName, nzbName, result): """Process a download that did not complete correctly""" if sickbeard.USE_FAILED_DOWNLOADS: processor = None try: processor = failedProcessor.FailedProcessor(dirName, nzbName) result.result = processor.process() process_fail_message = "" except FailedPostProcessingFailedException as e: result.result = False process_fail_message = ex(e) if processor: result.output += processor.log if sickbeard.DELETE_FAILED and result.result: torrent_type = get_torrent_type(dirName, nzbName) if torrent_type == TorrentType.SINGLE_FILE: delete_files(dirName, [nzbName], result) else: if delete_folder(dirName, check_empty=False): result.output += logHelper(u"Deleted folder: " + dirName, logger.DEBUG) if result.result: result.output += logHelper(u"Failed Download Processing succeeded: (" + str(nzbName) + ", " + dirName + ")") else: result.output += logHelper( u"Failed Download Processing failed: (" + str(nzbName) + ", " + dirName + "): " + process_fail_message, logger.WARNING)
def _retrieve_show_image(self, image_type, show_obj, which=None): """ Gets an image URL from theTVDB.com and TMDB.com, downloads it and returns the data. image_type: type of image to retrieve (currently supported: fanart, poster, banner) show_obj: a TVShow object to use when searching for the image which: optional, a specific numbered poster to look for Returns: the binary image data if available, or else None """ image_url = None indexer_lang = show_obj.lang try: # There's gotta be a better way of doing this but we don't wanna # change the language value elsewhere lINDEXER_API_PARMS = sickbeard.indexerApi(show_obj.indexer).api_params.copy() lINDEXER_API_PARMS['banners'] = True if indexer_lang and not indexer_lang == sickbeard.INDEXER_DEFAULT_LANGUAGE: lINDEXER_API_PARMS['language'] = indexer_lang if show_obj.dvdorder != 0: lINDEXER_API_PARMS['dvdorder'] = True t = sickbeard.indexerApi(show_obj.indexer).indexer(**lINDEXER_API_PARMS) indexer_show_obj = t[show_obj.indexerid] except (sickbeard.indexer_error, IOError), e: logger.log(u"Unable to look up show on " + sickbeard.indexerApi( show_obj.indexer).name + ", not downloading images: " + ex(e), logger.WARNING) logger.log(u"Indexer " + sickbeard.indexerApi(show_obj.indexer).name + "maybe experiencing some problems. Try again later", logger.DEBUG) return None
def _season_banners_dict(self, show_obj, season): """ Should return a dict like: result = {<season number>: {1: '<url 1>', 2: <url 2>, ...},} """ # This holds our resulting dictionary of season art result = {} indexer_lang = show_obj.lang try: # There's gotta be a better way of doing this but we don't wanna # change the language value elsewhere lINDEXER_API_PARMS = sickbeard.indexerApi(show_obj.indexer).api_params.copy() lINDEXER_API_PARMS['banners'] = True if indexer_lang and not indexer_lang == sickbeard.INDEXER_DEFAULT_LANGUAGE: lINDEXER_API_PARMS['language'] = indexer_lang t = sickbeard.indexerApi(show_obj.indexer).indexer(**lINDEXER_API_PARMS) indexer_show_obj = t[show_obj.indexerid] except (sickbeard.indexer_error, IOError), e: logger.log(u"Unable to look up show on " + sickbeard.indexerApi( show_obj.indexer).name + ", not downloading images: " + ex(e), logger.WARNING) logger.log(u"Indexer " + sickbeard.indexerApi(show_obj.indexer).name + "maybe experiencing some problems. Try again later", logger.DEBUG) return result
def _checkSabResponse(f): """ Check response from SAB :param f: Response from SAV :return: a list of (Boolean, string) which is True if SAB is not reporting an error """ try: result = f.readlines() except Exception as e: logger.log(u"Error trying to get result from SAB{}".format(ex(e)), logger.ERROR) return False, "Error from SAB" if len(result) == 0: logger.log(u"No data returned from SABnzbd, NZB not sent", logger.ERROR) return False, "No data from SAB" sabText = result[0].strip() sabJson = {} try: sabJson = json.loads(sabText) except ValueError as e: pass if sabText == "Missing authentication": logger.log(u"Incorrect username/password sent to SAB", logger.ERROR) return False, "Incorrect username/password sent to SAB" elif 'error' in sabJson: logger.log(sabJson['error'], logger.ERROR) return False, sabJson['error'] else: return True, sabText
def _verify_download(self, file_name=None): """ Checks the saved file to see if it was actually valid, if not then consider the download a failure. """ # primitive verification of torrents, just make sure we didn't get a text file or something if file_name.endswith(GenericProvider.TORRENT): try: parser = createParser(file_name) if parser: # pylint: disable=protected-access # Access to a protected member of a client class mime_type = parser._getMimeType() try: parser.stream._input.close() except Exception: pass if mime_type == 'application/x-bittorrent': return True except Exception as e: logger.log(u"Failed to validate torrent file: " + ex(e), logger.DEBUG) logger.log(u"Result is not a valid torrent file", logger.DEBUG) return False return True
def updateCache(self): # check if we should update if not self.shouldUpdate(): return try: data = self._getRSSData() if self._checkAuth(data): # clear cache self._clearCache() # set updated self.setLastUpdate() cl = [] for item in data['entries'] or []: ci = self._parseItem(item) if ci is not None: cl.append(ci) if len(cl) > 0: myDB = self._getDB() myDB.mass_action(cl) except AuthException as e: logger.log(u"Authentication error: " + ex(e), logger.ERROR) except Exception as e: logger.log(u"Error while searching " + self.provider.name + ", skipping: " + repr(e), logger.DEBUG)
def addEpisodeToTraktWatchList(self): if sickbeard.TRAKT_SYNC_WATCHLIST and sickbeard.USE_TRAKT: logger.log(u"WATCHLIST::ADD::START - Look for Episodes to Add to Trakt Watchlist", logger.DEBUG) myDB = db.DBConnection() sql_selection = 'select tv_shows.indexer, tv_shows.startyear, showid, show_name, season, episode from tv_episodes,tv_shows where tv_shows.indexer_id = tv_episodes.showid and tv_episodes.status in (' + ','.join([str(x) for x in Quality.SNATCHED + Quality.SNATCHED_PROPER + [WANTED]]) + ')' episodes = myDB.select(sql_selection) if episodes is not None: trakt_data = [] for cur_episode in episodes: trakt_id = sickbeard.indexerApi(cur_episode["indexer"]).config['trakt_id'] if not self._checkInList(trakt_id, str(cur_episode["showid"]), str(cur_episode["season"]), str(cur_episode["episode"])): logger.log(u"Adding Episode %s S%02dE%02d to watchlist" % (cur_episode["show_name"], cur_episode["season"], cur_episode["episode"]), logger.DEBUG) trakt_data.append((cur_episode["showid"], cur_episode["indexer"], cur_episode["show_name"], cur_episode["startyear"], cur_episode["season"], cur_episode["episode"])) if len(trakt_data): try: data = self.trakt_bulk_data_generate(trakt_data) self.trakt_api.traktRequest("sync/watchlist", data, method='POST') self._getEpisodeWatchlist() except traktException as e: logger.log(u"Could not connect to Trakt service. Error %s" % ex(e), logger.WARNING) logger.log(u"WATCHLIST::ADD::FINISH - Look for Episodes to Add to Trakt Watchlist", logger.DEBUG)
def _notify_emby(self, message, host=None, emby_apikey=None): """Handles notifying Emby host via HTTP API Returns: Returns True for no issue or False if there was an error """ # fill in omitted parameters if not host: host = sickbeard.EMBY_HOST if not emby_apikey: emby_apikey = sickbeard.EMBY_APIKEY url = 'http://%s/emby/Notifications/Admin' % host values = {'Name': 'SickRage', 'Description': message, 'ImageUrl': 'https://raw.githubusercontent.com/SickRage/SickRage/master/gui/slick/images/sickrage-shark-mascot.png'} data = json.dumps(values) try: req = urllib2.Request(url, data) req.add_header('X-MediaBrowser-Token', emby_apikey) req.add_header('Content-Type', 'application/json') response = urllib2.urlopen(req) result = response.read() response.close() logger.log(u'EMBY: HTTP response: ' + result.replace('\n', ''), logger.DEBUG) return True except (urllib2.URLError, IOError) as e: logger.log(u'EMBY: Warning: Couldn\'t contact Emby at ' + url + ' ' + ex(e), logger.WARNING) return False
def run_subs_scripts(video_path, scripts, *args): """Execute subtitle scripts. :param video_path: the video path :type video_path: str :param scripts: the script commands to be executed :type scripts: list of str :param args: the arguments to be passed to the script :type args: list of str """ for script_name in scripts: script_cmd = [piece for piece in re.split("( |\\\".*?\\\"|'.*?')", script_name) if piece.strip()] script_cmd.extend(str(arg) for arg in args) logger.info(u'Running subtitle %s-script: %s', 'extra' if len(args) > 1 else 'pre', script_name) # use subprocess to run the command and capture output logger.info(u'Executing command: %s', script_cmd) try: process = subprocess.Popen(script_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=sickbeard.PROG_DIR) out, _ = process.communicate() # @UnusedVariable logger.debug(u'Script result: %s', out) except Exception as error: logger.info(u'Unable to run subtitles script: %s', ex(error)) invalidate_video_cache(video_path)
def updateCache(self): # check if we should update if self.shouldUpdate(): try: data = self._getRSSData() if not self._checkAuth(data): return False # clear cache self._clearCache() # set updated self.setLastUpdate() cl = [] for item in data[b'entries']: ci = self._parseItem(item) if ci is not None: cl.append(ci) if len(cl) > 0: myDB = self._getDB() myDB.mass_action(cl) except AuthException as e: logging.error("Authentication error: {}".format(ex(e))) return False except Exception as e: logging.debug("Error while searching {}, skipping: {}".format(self.provider.name, repr(e))) return False return True
def update_cache(self): # check if we should update if not self.should_update(): return try: data = self._get_rss_data() if self._check_auth(data): # clear cache self._clear_cache() # set updated self.set_last_update() cl = [] for item in data['entries'] or []: ci = self._parse_item(item) if ci is not None: cl.append(ci) if cl: cache_db_con = self._get_db() cache_db_con.mass_action(cl) except AuthException as e: logger.log("Authentication error: " + ex(e), logger.WARNING) except Exception as e: logger.log("Error while searching " + self.provider.name + ", skipping: " + repr(e), logger.DEBUG)
def _getProperList(self): """ Walk providers for propers """ propers = {} search_date = datetime.datetime.today() - datetime.timedelta(days=2) # for each provider get a list of the origThreadName = threading.currentThread().name providers = [x for x in sickbeard.providers.sortedProviderList(sickbeard.RANDOMIZE_PROVIDERS) if x.isActive()] for curProvider in providers: threading.currentThread().name = origThreadName + " :: [" + curProvider.name + "]" logger.log(u"Searching for any new PROPER releases from " + curProvider.name) try: curPropers = curProvider.findPropers(search_date) except AuthException, e: logger.log(u"Authentication error: " + ex(e), logger.DEBUG) continue except Exception, e: logger.log(u"Error while searching " + curProvider.name + ", skipping: " + ex(e), logger.DEBUG) logger.log(traceback.format_exc(), logger.DEBUG) continue
def _api_call(self, apikey, params=None, results_per_page=1000, offset=0): server = jsonrpclib.Server(self.urls['base_url']) parsedJSON = {} try: parsedJSON = server.getTorrents(apikey, params or {}, int(results_per_page), int(offset)) time.sleep(cpu_presets[sickbeard.CPU_PRESET]) except jsonrpclib.jsonrpc.ProtocolError as error: if error.message == 'Call Limit Exceeded': logger.log("You have exceeded the limit of 150 calls per hour, per API key which is unique to your user account", logger.WARNING) else: logger.log("JSON-RPC protocol error while accessing provicer. Error: {0} ".format(repr(error)), logger.ERROR) parsedJSON = {'api-error': ex(error)} return parsedJSON except socket.timeout: logger.log("Timeout while accessing provider", logger.WARNING) except socket.error as error: # Note that sometimes timeouts are thrown as socket errors logger.log("Socket error while accessing provider. Error: {0} ".format(error[1]), logger.WARNING) except Exception as error: errorstring = str(error) if errorstring.startswith('<') and errorstring.endswith('>'): errorstring = errorstring[1:-1] logger.log("Unknown error while accessing provider. Error: {0} ".format(errorstring), logger.WARNING) return parsedJSON
def run_subs_extra_scripts(epObj, foundSubs): for curScriptName in sickbeard.SUBTITLES_EXTRA_SCRIPTS: script_cmd = [piece for piece in re.split("( |\\\".*?\\\"|'.*?')", curScriptName) if piece.strip()] script_cmd[0] = ek(os.path.abspath, script_cmd[0]) logger.log(u"Absolute path to script: " + script_cmd[0], logger.DEBUG) for video, subs in foundSubs.iteritems(): subpaths = [] for sub in subs: subpath = subliminal.subtitle.get_subtitle_path(video.name, sub.language) if os.path.isabs(sickbeard.SUBTITLES_DIR): subpath = ek(os.path.join, sickbeard.SUBTITLES_DIR, ek(os.path.basename, subpath)) elif sickbeard.SUBTITLES_DIR: subpath = ek(os.path.join, ek(os.path.dirname, subpath), sickbeard.SUBTITLES_DIR, ek(os.path.basename, subpath)) inner_cmd = script_cmd + [video.name, subpath, sub.language.opensubtitles, epObj['show.name'], str(epObj['season']), str(epObj['episode']), epObj['name'], str(epObj['show.indexerid'])] # use subprocess to run the command and capture output logger.log(u"Executing command: %s" % inner_cmd) try: p = subprocess.Popen(inner_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=sickbeard.PROG_DIR) out, err = p.communicate() # @UnusedVariable logger.log(u"Script result: %s" % out, logger.DEBUG) except Exception as e: logger.log(u"Unable to run subs_extra_script: " + ex(e))
def process_failed(dirName, nzbName, result): """Process a download that did not complete correctly""" if sickbeard.USE_FAILED_DOWNLOADS: processor = None try: processor = failedProcessor.FailedProcessor(dirName, nzbName) result.result = processor.process() process_fail_message = u"" except FailedPostProcessingFailedException as e: result.result = False process_fail_message = ex(e) if processor: result.output += processor.log if sickbeard.DELETE_FAILED and result.result: if delete_folder(dirName, check_empty=False): result.output += logHelper(u"Deleted folder: %s" % dirName, logger.DEBUG) if result.result: result.output += logHelper(u"Failed Download Processing succeeded: (%s, %s)" % (nzbName, dirName)) else: result.output += logHelper( u"Failed Download Processing failed: (%s, %s): %s" % (nzbName, dirName, process_fail_message), logger.WARNING, )
def __init__(self, filename="sickbeard.db", suffix=None, row_type=None): self.filename = filename self.suffix = suffix self.row_type = row_type try: if self.filename not in db_cons or not db_cons[self.filename]: db_locks[self.filename] = threading.Lock() self.connection = sqlite3.connect(dbFilename(self.filename, self.suffix), 20, check_same_thread=False) self.connection.text_factory = DBConnection._unicode_text_factory db_cons[self.filename] = self.connection else: self.connection = db_cons[self.filename] # start off row factory configured as before out of # paranoia but wait to do so until other potential users # of the shared connection are done using # it... technically not required as row factory is reset # in all the public methods after the lock has been # aquired with db_locks[self.filename]: self._set_row_factory() except OperationalError: logger.log(u'Please check your database owner/permissions: {0}'.format(dbFilename(self.filename, self.suffix)), logger.WARNING) except Exception as e: logger.log(u"DB error: " + ex(e), logger.ERROR) raise
def process_failed(process_path, release_name, result): """Process a download that did not complete correctly""" if sickbeard.USE_FAILED_DOWNLOADS: processor = None try: processor = failedProcessor.FailedProcessor(process_path, release_name) result.result = processor.process() process_fail_message = "" except FailedPostProcessingFailedException as e: result.result = False process_fail_message = ex(e) if processor: result.output += processor.log if sickbeard.DELETE_FAILED and result.result: if delete_folder(process_path, check_empty=False): result.output += log_helper("Deleted folder: {0}".format(process_path), logger.DEBUG) if result.result: result.output += log_helper("Failed Download Processing succeeded: ({0}, {1})".format(release_name, process_path)) else: result.output += log_helper("Failed Download Processing failed: ({0}, {1}): {2}".format(release_name, process_path, process_fail_message), logger.WARNING)
def _make_url(self, result): if not result: return '', '' urls = [] filename = u'' if result.url.startswith('magnet'): try: torrent_hash = re.findall(r'urn:btih:([\w]{32,40})', result.url)[0].upper() try: torrent_name = re.findall('dn=([^&]+)', result.url)[0] except Exception: torrent_name = 'NO_DOWNLOAD_NAME' if len(torrent_hash) == 32: torrent_hash = b16encode(b32decode(torrent_hash)).upper() if not torrent_hash: logger.log(u'Unable to extract torrent hash from magnet: %s' % ex(result.url), logger.ERROR) return urls, filename urls = [x.format(torrent_hash=torrent_hash, torrent_name=torrent_name) for x in self.bt_cache_urls] except Exception: logger.log(u'Unable to extract torrent hash or name from magnet: %s' % ex(result.url), logger.ERROR) return urls, filename else: urls = [result.url] filename = ek(join, self._get_storage_dir(), sanitize_filename(result.name) + '.' + self.provider_type) return urls, filename
def delete_folder(folder, check_empty=True): """ Removes a folder from the filesystem :param folder: Path to folder to remove :param check_empty: Boolean, check if the folder is empty before removing it, defaults to True :return: True on success, False on failure """ # check if it's a folder if not ek(os.path.isdir, folder): return False # check if it isn't TV_DOWNLOAD_DIR if sickbeard.TV_DOWNLOAD_DIR: if helpers.real_path(folder) == helpers.real_path(sickbeard.TV_DOWNLOAD_DIR): return False # check if it's empty folder when wanted checked if check_empty: check_files = ek(os.listdir, folder) if check_files: logger.log(u"Not deleting folder " + folder + " found the following files: " + str(check_files), logger.INFO) return False try: logger.log(u"Deleting folder (if it's empty): " + folder) os.rmdir(folder) except (OSError, IOError), e: logger.log(u"Warning: unable to delete folder: " + folder + ": " + ex(e), logger.WARNING) return False
def _get_ep_obj(self, show, season, episodes): """ Retrieve the TVEpisode object requested. :param show: The show object belonging to the show we want to process :param season: The season of the episode (int) :param episodes: A list of episodes to find (list of ints) :return: If the episode(s) can be found then a TVEpisode object with the correct related eps will be instantiated and returned. If the episode can't be found then None will be returned. """ root_ep = None for cur_episode in episodes: self._log(u"Retrieving episode object for " + str(season) + "x" + str(cur_episode), logger.DEBUG) # now that we've figured out which episode this file is just load it manually try: curEp = show.getEpisode(season, cur_episode) if not curEp: raise EpisodeNotFoundException() except EpisodeNotFoundException, e: self._log(u"Unable to create episode: " + ex(e), logger.DEBUG) raise EpisodePostProcessingFailedException() # associate all the episodes together under a single root episode if root_ep == None: root_ep = curEp root_ep.relatedEps = [] elif curEp not in root_ep.relatedEps: root_ep.relatedEps.append(curEp)
def run_subs_extra_scripts(episode_object, found_subtitles, video, single=False): for script_name in sickbeard.SUBTITLES_EXTRA_SCRIPTS: script_cmd = [piece for piece in re.split("( |\\\".*?\\\"|'.*?')", script_name) if piece.strip()] script_cmd[0] = ek(os.path.abspath, script_cmd[0]) logger.log(u"Absolute path to script: " + script_cmd[0], logger.DEBUG) for subtitle in found_subtitles: subtitle_path = subliminal.subtitle.get_subtitle_path(video.name, None if single else subtitle.language) inner_cmd = script_cmd + [video.name, subtitle_path, subtitle.language.opensubtitles, episode_object['show_name'], str(episode_object['season']), str(episode_object['episode']), episode_object['name'], str(episode_object['show_indexerid'])] # use subprocess to run the command and capture output logger.log(u"Executing command: %s" % inner_cmd) try: process = subprocess.Popen(inner_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=sickbeard.PROG_DIR) out, _ = process.communicate() # @UnusedVariable logger.log(u"Script result: %s" % out, logger.DEBUG) except Exception as error: logger.log(u"Unable to run subs_extra_script: " + ex(error))
def run(self, force=False): # pylint: disable=unused-argument, too-many-locals, too-many-branches, too-many-statements if self.amActive: return self.amActive = True update_timestamp = time.mktime(datetime.datetime.now().timetuple()) cache_db_con = db.DBConnection('cache.db') result = cache_db_con.select( 'SELECT `time` FROM lastUpdate WHERE provider = ?', ['theTVDB']) if result: last_update = long(result[0][0]) else: last_update = long(time.mktime(datetime.datetime.min.timetuple())) cache_db_con.action( 'INSERT INTO lastUpdate (provider, `time`) VALUES (?, ?)', ['theTVDB', last_update]) network_timezones.update_network_dict() url = 'http://thetvdb.com/api/Updates.php?type=series&time={}'.format( last_update) data = helpers.getURL(url, session=self.session, returns='text', hooks={'response': self.request_hook}) if not data: logger.log( 'Could not get the recently updated show data from {}. Retrying later. Url was: {}' .format(sickbeard.indexerApi(INDEXER_TVDB).name, url)) self.amActive = False return updated_shows = set() try: tree = etree.fromstring(data) for show in tree.findall('Series'): updated_shows.add(int(show.text)) except SyntaxError: update_timestamp = last_update pi_list = [] for cur_show in sickbeard.showList: if int(cur_show.indexer) in [INDEXER_TVRAGE]: logger.log( 'Indexer is no longer available for show [{}] '.format( cur_show.name), logger.WARNING) continue try: cur_show.nextEpisode() if sickbeard.indexerApi(cur_show.indexer).name == 'theTVDB': if cur_show.indexerid in updated_shows: pi_list.append( sickbeard.showQueueScheduler.action.updateShow( cur_show, True)) except (CantUpdateShowException, CantRefreshShowException) as error: logger.log('Automatic update failed: {}'.format(ex(error)), logger.ERROR) ui.ProgressIndicators.setIndicator( 'dailyUpdate', ui.QueueProgressIndicator('Daily Update', pi_list)) cache_db_con.action( 'UPDATE lastUpdate SET `time` = ? WHERE provider=?', [update_timestamp, 'theTVDB']) self.amActive = False
def update_library(self, ep_obj=None, host=None, # pylint: disable=too-many-arguments, too-many-locals, too-many-statements, too-many-branches username=None, password=None, plex_server_token=None, force=False): """Handles updating the Plex Media Server host via HTTP API Plex Media Server currently only supports updating the whole video library and not a specific path. Returns: Returns None for no issue, else a string of host with connection issues """ if not (sickbeard.USE_PLEX_SERVER and sickbeard.PLEX_UPDATE_LIBRARY) and not force: return None host = host or sickbeard.PLEX_SERVER_HOST if not host: logger.log(u'PLEX: No Plex Media Server host specified, check your settings', logger.DEBUG) return False if not self.get_token(username, password, plex_server_token): logger.log(u'PLEX: Error getting auth token for Plex Media Server, check your settings', logger.WARNING) return False file_location = '' if not ep_obj else ep_obj.location host_list = {x.strip() for x in host.split(',') if x.strip()} hosts_all = hosts_match = {} hosts_failed = set() for cur_host in host_list: url = 'http{0}://{1}/library/sections'.format(('', 's')[sickbeard.PLEX_SERVER_HTTPS], cur_host) try: xml_response = getURL(url, headers=self.headers, session=self.session, returns='text') if not xml_response: logger.log(u'PLEX: Error while trying to contact Plex Media Server: {0}'.format (cur_host), logger.WARNING) hosts_failed.add(cur_host) continue media_container = etree.fromstring(xml_response) except IOError as error: logger.log(u'PLEX: Error while trying to contact Plex Media Server: {0}'.format (ex(error)), logger.WARNING) hosts_failed.add(cur_host) continue except Exception as error: if 'invalid token' in str(error): logger.log(u'PLEX: Please set TOKEN in Plex settings: ', logger.WARNING) else: logger.log(u'PLEX: Error while trying to contact Plex Media Server: {0}'.format (ex(error)), logger.WARNING) hosts_failed.add(cur_host) continue sections = media_container.findall('.//Directory') if not sections: logger.log(u'PLEX: Plex Media Server not running on: {0}'.format (cur_host), logger.DEBUG) hosts_failed.add(cur_host) continue for section in sections: if 'show' == section.attrib['type']: keyed_host = [(str(section.attrib['key']), cur_host)] hosts_all.update(keyed_host) if not file_location: continue for section_location in section.findall('.//Location'): section_path = re.sub(r'[/\\]+', '/', section_location.attrib['path'].lower()) section_path = re.sub(r'^(.{,2})[/\\]', '', section_path) location_path = re.sub(r'[/\\]+', '/', file_location.lower()) location_path = re.sub(r'^(.{,2})[/\\]', '', location_path) if section_path in location_path: hosts_match.update(keyed_host) if force: return (', '.join(set(hosts_failed)), None)[not len(hosts_failed)] if hosts_match: logger.log(u'PLEX: Updating hosts where TV section paths match the downloaded show: ' + ', '.join(set(hosts_match)), logger.DEBUG) else: logger.log(u'PLEX: Updating all hosts with TV sections: ' + ', '.join(set(hosts_all)), logger.DEBUG) hosts_try = (hosts_match.copy(), hosts_all.copy())[not len(hosts_match)] for section_key, cur_host in hosts_try.iteritems(): url = 'http{0}://{1}/library/sections/{2}/refresh'.format(('', 's')[sickbeard.PLEX_SERVER_HTTPS], cur_host, section_key) try: getURL(url, headers=self.headers, session=self.session, returns='text') except Exception as error: logger.log(u'PLEX: Error updating library section for Plex Media Server: {0}'.format (ex(error)), logger.WARNING) hosts_failed.add(cur_host) return (', '.join(set(hosts_failed)), None)[not len(hosts_failed)]
def write_ep_file(self, ep_obj): """ Generates and writes ep_obj's metadata under the given path with the given filename root. Uses the episode's name with the extension in _ep_nfo_extension. ep_obj: TVEpisode object for which to create the metadata file_name_path: The file name to use for this metadata. Note that the extension will be automatically added based on _ep_nfo_extension. This should include an absolute path. Note that this method expects that _ep_data will return an ElementTree object. If your _ep_data returns data in another format yo'll need to override this method. """ data = self._ep_data(ep_obj) if not data: return False nfo_file_path = self.get_episode_file_path(ep_obj) assert isinstance(nfo_file_path, six.text_type) nfo_file_dir = ek(os.path.dirname, nfo_file_path) try: if not ek(os.path.isdir, nfo_file_dir): logger.log("Metadata dir didn't exist, creating it at " + nfo_file_dir, logger.DEBUG) ek(os.makedirs, nfo_file_dir) helpers.chmodAsParent(nfo_file_dir) logger.log("Writing episode nfo file to " + nfo_file_path, logger.DEBUG) nfo_file = io.open(nfo_file_path, 'wb') data.write(nfo_file, encoding='UTF-8') nfo_file.close() helpers.chmodAsParent(nfo_file_path) except IOError as e: logger.log("Unable to write file to " + nfo_file_path + " - are you sure the folder is writable? " + ex(e), logger.ERROR) return False return True
def update_library(self, ep_obj): # Values from config if not sickbeard.USE_PYTIVO: return False host = sickbeard.PYTIVO_HOST shareName = sickbeard.PYTIVO_SHARE_NAME tsn = sickbeard.PYTIVO_TIVO_NAME # There are two more values required, the container and file. # # container: The share name, show name and season # # file: The file name # # Some slicing and dicing of variables is required to get at these values. # # There might be better ways to arrive at the values, but this is the best I have been able to # come up with. # # Calculated values showPath = ep_obj.show.location showName = ep_obj.show.name rootShowAndSeason = ek(os.path.dirname, ep_obj.location) absPath = ep_obj.location # Some show names have colons in them which are illegal in a path location, so strip them out. # (Are there other characters?) showName = showName.replace(":", "") root = showPath.replace(showName, "") showAndSeason = rootShowAndSeason.replace(root, "") container = shareName + "/" + showAndSeason filename = "/" + absPath.replace(root, "") # Finally create the url and make request requestUrl = "http://" + host + "/TiVoConnect?" + urlencode( { 'Command': 'Push', 'Container': container, 'File': filename, 'tsn': tsn }) logger.log(u"pyTivo notification: Requesting " + requestUrl, logger.DEBUG) request = Request(requestUrl) try: response = urlopen(request) # @UnusedVariable except HTTPError as e: if hasattr(e, 'reason'): logger.log( u"pyTivo notification: Error, failed to reach a server - " + e.reason, logger.ERROR) return False elif hasattr(e, 'code'): logger.log( u"pyTivo notification: Error, the server couldn't fulfill the request - " + e.code, logger.ERROR) return False except Exception as e: logger.log(u"PYTIVO: Unknown exception: " + ex(e), logger.ERROR) return False else: logger.log( u"pyTivo notification: Successfully requested transfer of file" ) return True
def search(self, search_params, age=0, ep_obj=None): # pylint: disable=too-many-locals, too-many-branches, too-many-statements results = [] if not self.login(): return results freeleech = '&free=on' if self.freeleech else '' for mode in search_params: items = [] logger.log(u"Search Mode: %s" % mode, logger.DEBUG) for search_string in search_params[mode]: if mode != 'RSS': logger.log(u"Search string: %s " % search_string, logger.DEBUG) # URL with 50 tv-show results, or max 150 if adjusted in IPTorrents profile search_url = self.urls['search'] % (self.categories, freeleech, search_string) search_url += ';o=seeders' if mode != 'RSS' else '' logger.log(u"Search URL: %s" % search_url, logger.DEBUG) data = self.get_url(search_url) if not data: continue try: data = re.sub(r'(?im)<button.+?<[\/]button>', '', data, 0) with BS4Parser(data, 'html5lib') as html: if not html: logger.log(u"No data returned from provider", logger.DEBUG) continue if html.find(text='No Torrents Found!'): logger.log( u"Data returned from provider does not contain any torrents", logger.DEBUG) continue torrent_table = html.find('table', attrs={'class': 'torrents'}) torrents = torrent_table.find_all( 'tr') if torrent_table else [] # Continue only if one Release is found if len(torrents) < 2: logger.log( u"Data returned from provider does not contain any torrents", logger.DEBUG) continue for result in torrents[1:]: try: title = result.find_all('td')[1].find('a').text download_url = self.urls[ 'base_url'] + result.find_all( 'td')[3].find('a')['href'] seeders = int( result.find('td', attrs={ 'class': 'ac t_seeders' }).text) leechers = int( result.find('td', attrs={ 'class': 'ac t_leechers' }).text) torrent_size = result.find_all('td')[5].text size = convert_size(torrent_size) or -1 except (AttributeError, TypeError, KeyError): continue if not all([title, download_url]): continue # Filter unseeded torrent if seeders < self.minseed or leechers < self.minleech: if mode != 'RSS': logger.log( u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})" .format(title, seeders, leechers), logger.DEBUG) continue item = title, download_url, size, seeders, leechers if mode != 'RSS': logger.log( u"Found result: %s with %s seeders and %s leechers" % (title, seeders, leechers), logger.DEBUG) items.append(item) except Exception as e: logger.log(u"Failed parsing provider. Error: %r" % ex(e), logger.ERROR) # For each search mode sort all the items by seeders if available items.sort(key=lambda tup: tup[3], reverse=True) results += items return results
#or find a way to kill the access from memory if curfile in ('unrar.dll', 'unrar64.dll'): try: os.chmod(new_path, stat.S_IWRITE) os.remove(new_path) os.renames(old_path, new_path) except Exception, e: logger.log(u"Unable to update " + new_path + ': ' + ex(e), logger.DEBUG) os.remove(old_path) # Trash the updated file without moving in new path continue if os.path.isfile(new_path): os.remove(new_path) os.renames(old_path, new_path) sickbeard.CUR_COMMIT_HASH = self._newest_commit_hash sickbeard.CUR_COMMIT_BRANCH = self.branch except Exception, e: logger.log(u"Error while trying to update: " + ex(e), logger.ERROR) logger.log(u"Traceback: " + traceback.format_exc(), logger.DEBUG) return False # Notify update successful notifiers.notify_git_update(sickbeard.NEWEST_VERSION_STRING) return True def list_remote_branches(self): return [x.name for x in sickbeard.gh.get_branches() if x]
def retrieveShowMetadata(self, folder): """ Used only when mass adding Existing Shows, using previously generated Show metadata to reduce the need to query TVDB. """ empty_return = (None, None, None) assert isinstance(folder, six.text_type) metadata_path = ek(os.path.join, folder, self._show_metadata_filename) if not ek(os.path.isdir, folder) or not ek(os.path.isfile, metadata_path): logger.log("Can't load the metadata file from " + metadata_path + ", it doesn't exist", logger.DEBUG) return empty_return logger.log("Loading show info from metadata file in " + metadata_path, logger.DEBUG) try: with io.open(metadata_path, 'rb') as xmlFileObj: showXML = etree.ElementTree(file=xmlFileObj) if showXML.findtext('title') is None or (showXML.findtext('tvdbid') is None and showXML.findtext('id') is None): logger.log("Invalid info in tvshow.nfo (missing name or id): {0} {1} {2}".format(showXML.findtext('title'), showXML.findtext('tvdbid'), showXML.findtext('id'))) return empty_return name = showXML.findtext('title') indexer_id_text = showXML.findtext('tvdbid') or showXML.findtext('id') if indexer_id_text: indexer_id = try_int(indexer_id_text, None) if indexer_id is None or indexer_id < 1: logger.log("Invalid Indexer ID (" + str(indexer_id) + "), not using metadata file", logger.DEBUG) return empty_return else: logger.log("Empty <id> or <tvdbid> field in NFO, unable to find a ID, not using metadata file", logger.DEBUG) return empty_return indexer = 1 epg_url_text = showXML.findtext('episodeguide/url') if epg_url_text: epg_url = epg_url_text.lower() if str(indexer_id) in epg_url and 'tvrage' in epg_url: logger.log("Invalid Indexer ID (" + str(indexer_id) + "), not using metadata file because it has TVRage info", logger.WARNING) return empty_return except Exception as e: logger.log( "There was an error parsing your existing metadata file: '" + metadata_path + "' error: " + ex(e), logger.WARNING) return empty_return return indexer_id, name, indexer
def _retrieve_show_image(self, image_type, show_obj, which=None): """ Gets an image URL from theTVDB.com and TMDB.com, downloads it and returns the data. image_type: type of image to retrieve (currently supported: fanart, poster, banner) show_obj: a TVShow object to use when searching for the image which: optional, a specific numbered poster to look for Returns: the binary image data if available, or else None """ image_url = None indexer_lang = show_obj.lang try: # There's gotta be a better way of doing this but we don't wanna # change the language value elsewhere lINDEXER_API_PARMS = sickbeard.indexerApi(show_obj.indexer).api_params.copy() lINDEXER_API_PARMS['banners'] = True lINDEXER_API_PARMS['language'] = indexer_lang or sickbeard.INDEXER_DEFAULT_LANGUAGE if show_obj.dvdorder: lINDEXER_API_PARMS['dvdorder'] = True t = sickbeard.indexerApi(show_obj.indexer).indexer(**lINDEXER_API_PARMS) indexer_show_obj = t[show_obj.indexerid] except (sickbeard.indexer_error, IOError) as e: logger.log("Unable to look up show on " + sickbeard.indexerApi( show_obj.indexer).name + ", not downloading images: " + ex(e), logger.WARNING) logger.log("{0} may be experiencing some problems. Try again later.".format(sickbeard.indexerApi(show_obj.indexer).name), logger.DEBUG) return None if image_type not in ('fanart', 'poster', 'banner', 'poster_thumb', 'banner_thumb'): logger.log("Invalid image type " + str(image_type) + ", couldn't find it in the " + sickbeard.indexerApi( show_obj.indexer).name + " object", logger.ERROR) return None if image_type == 'poster_thumb': if getattr(indexer_show_obj, 'poster', None): image_url = re.sub('posters', '_cache/posters', indexer_show_obj['poster']) if not image_url: # Try and get images from Fanart.TV image_url = self._retrieve_show_images_from_fanart(show_obj, image_type) if not image_url: # Try and get images from TMDB image_url = self._retrieve_show_images_from_tmdb(show_obj, image_type) elif image_type == 'banner_thumb': if getattr(indexer_show_obj, 'banner', None): image_url = re.sub('graphical', '_cache/graphical', indexer_show_obj['banner']) if not image_url: # Try and get images from Fanart.TV image_url = self._retrieve_show_images_from_fanart(show_obj, image_type) else: if getattr(indexer_show_obj, image_type, None): image_url = indexer_show_obj[image_type] if not image_url: # Try and get images from Fanart.TV image_url = self._retrieve_show_images_from_fanart(show_obj, image_type) if not image_url: # Try and get images from TMDB image_url = self._retrieve_show_images_from_tmdb(show_obj, image_type) if image_url: image_data = metadata_helpers.getShowImage(image_url, which) return image_data return None
def _write_image(self, image_data, image_path, obj=None): """ Saves the data in image_data to the location image_path. Returns True/False to represent success or failure. image_data: binary image data to write to file image_path: file location to save the image to """ assert isinstance(image_path, six.text_type) # don't bother overwriting it if ek(os.path.isfile, image_path): logger.log("Image already exists, not downloading", logger.DEBUG) return False image_dir = ek(os.path.dirname, image_path) if not image_data: logger.log("Unable to retrieve image to save in {0}, skipping".format(image_path), logger.DEBUG) return False try: if not ek(os.path.isdir, image_dir): logger.log("Metadata dir didn't exist, creating it at " + image_dir, logger.DEBUG) ek(os.makedirs, image_dir) helpers.chmodAsParent(image_dir) outFile = io.open(image_path, 'wb') outFile.write(image_data) outFile.close() helpers.chmodAsParent(image_path) except IOError as e: logger.log( "Unable to write image to " + image_path + " - are you sure the show folder is writable? " + ex(e), logger.ERROR) return False return True
def _getProperList(self): # pylint: disable=too-many-locals, too-many-branches, too-many-statements """ Walk providers for propers """ propers = {} search_date = datetime.datetime.today() - datetime.timedelta(days=2) # for each provider get a list of the origThreadName = threading.currentThread().name providers = [ x for x in sickbeard.providers.sortedProviderList( sickbeard.RANDOMIZE_PROVIDERS) if x.is_active() ] for curProvider in providers: threading.currentThread( ).name = origThreadName + " :: [" + curProvider.name + "]" logger.log("Searching for any new PROPER releases from " + curProvider.name) try: curPropers = curProvider.find_propers(search_date) except AuthException as e: logger.log("Authentication error: " + ex(e), logger.WARNING) continue except Exception as e: logger.log( "Exception while searching propers in " + curProvider.name + ", skipping: " + ex(e), logger.ERROR) logger.log(traceback.format_exc(), logger.DEBUG) continue # if they haven't been added by a different provider than add the proper to the list for x in curPropers: if not re.search(r'\b(proper|repack|real)\b', x.name, re.I): logger.log( 'find_propers returned a non-proper, we have caught and skipped it.', logger.DEBUG) continue name = self._genericName(x.name) if name not in propers: logger.log("Found new proper: " + x.name, logger.DEBUG) x.provider = curProvider propers[name] = x threading.currentThread().name = origThreadName # take the list of unique propers and get it sorted by sortedPropers = sorted(propers.values(), key=operator.attrgetter('date'), reverse=True) finalPropers = [] for curProper in sortedPropers: try: parse_result = NameParser(False).parse(curProper.name) except (InvalidNameException, InvalidShowException) as error: logger.log("{0}".format(error), logger.DEBUG) continue if not parse_result.series_name: continue if not parse_result.episode_numbers: logger.log( "Ignoring " + curProper.name + " because it's for a full season rather than specific episode", logger.DEBUG) continue logger.log( "Successful match! Result " + parse_result.original_name + " matched to show " + parse_result.show.name, logger.DEBUG) # set the indexerid in the db to the show's indexerid curProper.indexerid = parse_result.show.indexerid # set the indexer in the db to the show's indexer curProper.indexer = parse_result.show.indexer # populate our Proper instance curProper.show = parse_result.show curProper.season = parse_result.season_number if parse_result.season_number is not None else 1 curProper.episode = parse_result.episode_numbers[0] curProper.release_group = parse_result.release_group curProper.version = parse_result.version curProper.quality = Quality.nameQuality(curProper.name, parse_result.is_anime) curProper.content = None # filter release bestResult = pickBestResult(curProper, parse_result.show) if not bestResult: logger.log( "Proper " + curProper.name + " were rejected by our release filters.", logger.DEBUG) continue # only get anime proper if it has release group and version if bestResult.show.is_anime and not bestResult.release_group and bestResult.version == -1: logger.log( "Proper " + bestResult.name + " doesn't have a release group and version, ignoring it", logger.DEBUG) continue # check if we actually want this proper (if it's the right quality) main_db_con = db.DBConnection() sql_results = main_db_con.select( "SELECT status FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?", [bestResult.indexerid, bestResult.season, bestResult.episode]) if not sql_results: continue # only keep the proper if we have already retrieved the same quality ep (don't get better/worse ones) oldStatus, oldQuality = Quality.splitCompositeStatus( int(sql_results[0][b"status"])) if oldStatus not in (DOWNLOADED, SNATCHED) or oldQuality != bestResult.quality: continue # check if we actually want this proper (if it's the right release group and a higher version) if bestResult.show.is_anime: main_db_con = db.DBConnection() sql_results = main_db_con.select( "SELECT release_group, version FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?", [ bestResult.indexerid, bestResult.season, bestResult.episode ]) oldVersion = int(sql_results[0][b"version"]) oldRelease_group = (sql_results[0][b"release_group"]) if -1 < oldVersion < bestResult.version: logger.log("Found new anime v" + str(bestResult.version) + " to replace existing v" + str(oldVersion)) else: continue if oldRelease_group != bestResult.release_group: logger.log("Skipping proper from release group: " + bestResult.release_group + ", does not match existing release group: " + oldRelease_group) continue # if the show is in our list and there hasn't been a proper already added for that particular episode then add it to our list of propers if bestResult.indexerid != -1 and (bestResult.indexerid, bestResult.season, bestResult.episode) not in { (p.indexerid, p.season, p.episode) for p in finalPropers }: logger.log("Found a proper that we need: " + str(bestResult.name)) finalPropers.append(bestResult) return finalPropers
def update_show_indexer_metadata(self, show_obj): if self.show_metadata and show_obj and self._has_show_metadata(show_obj): logger.log( "Metadata provider " + self.name + " updating show indexer info metadata file for " + show_obj.name, logger.DEBUG) nfo_file_path = self.get_show_file_path(show_obj) assert isinstance(nfo_file_path, six.text_type) try: with io.open(nfo_file_path, 'rb') as xmlFileObj: showXML = etree.ElementTree(file=xmlFileObj) indexerid = showXML.find('id') root = showXML.getroot() if indexerid is not None: if indexerid.text == str(show_obj.indexerid): return True indexerid.text = str(show_obj.indexerid) else: etree.SubElement(root, "id").text = str(show_obj.indexerid) # Make it purdy helpers.indentXML(root) showXML.write(nfo_file_path, encoding='UTF-8') helpers.chmodAsParent(nfo_file_path) return True except IOError as e: logger.log( "Unable to write file to " + nfo_file_path + " - are you sure the folder is writable? " + ex(e), logger.ERROR)
def update_library(self, ep_obj=None, host=None, username=None, password=None, plex_server_token=None, force=True): """Handles updating the Plex Media Server host via HTTP API Plex Media Server currently only supports updating the whole video library and not a specific path. Returns: Returns None for no issue, else a string of host with connection issues """ if sickbeard.USE_PLEX and sickbeard.PLEX_UPDATE_LIBRARY: if not sickbeard.PLEX_SERVER_HOST: logger.log( u'PLEX: No Plex Media Server host specified, check your settings', logger.DEBUG) return False if not host: host = sickbeard.PLEX_SERVER_HOST if not username: username = sickbeard.PLEX_USERNAME if not password: password = sickbeard.PLEX_PASSWORD if not plex_server_token: plex_server_token = sickbeard.PLEX_SERVER_TOKEN # if username and password were provided, fetch the auth token from plex.tv token_arg = '' if plex_server_token: token_arg = '?X-Plex-Token=' + plex_server_token elif username and password: logger.log( u'PLEX: fetching plex.tv credentials for user: '******'https://plex.tv/users/sign_in.xml', data='') authheader = 'Basic %s' % base64.encodestring( '%s:%s' % (username, password))[:-1] req.add_header('Authorization', authheader) req.add_header('X-Plex-Device-Name', 'SickRage') req.add_header('X-Plex-Product', 'SickRage Notifier') req.add_header('X-Plex-Client-Identifier', sickbeard.common.USER_AGENT) req.add_header('X-Plex-Version', '1.0') try: response = urllib2.urlopen(req) auth_tree = etree.parse(response) token = auth_tree.findall( './/authentication-token')[0].text token_arg = '?X-Plex-Token=' + token except urllib2.URLError as e: logger.log( u'PLEX: Error fetching credentials from from plex.tv for user %s: %s' % (username, ex(e)), logger.DEBUG) except (ValueError, IndexError) as e: logger.log( u'PLEX: Error parsing plex.tv response: ' + ex(e), logger.DEBUG) file_location = '' if None is ep_obj else ep_obj.location host_list = [x.strip() for x in host.split(',')] hosts_all = {} hosts_match = {} hosts_failed = [] for cur_host in host_list: url = 'http://%s/library/sections%s' % (cur_host, token_arg) try: xml_tree = etree.parse(urllib.urlopen(url)) media_container = xml_tree.getroot() except IOError, e: logger.log( u'PLEX: Error while trying to contact Plex Media Server: ' + ex(e), logger.WARNING) hosts_failed.append(cur_host) continue except Exception as e: if 'invalid token' in str(e): logger.log( u'PLEX: Please set TOKEN in Plex settings: ', logger.ERROR) else: logger.log( u'PLEX: Error while trying to contact Plex Media Server: ' + ex(e), logger.ERROR) continue sections = media_container.findall('.//Directory') if not sections: logger.log( u'PLEX: Plex Media Server not running on: ' + cur_host, logger.DEBUG) hosts_failed.append(cur_host) continue for section in sections: if 'show' == section.attrib['type']: keyed_host = [(str(section.attrib['key']), cur_host)] hosts_all.update(keyed_host) if not file_location: continue for section_location in section.findall('.//Location'): section_path = re.sub( r'[/\\]+', '/', section_location.attrib['path'].lower()) section_path = re.sub(r'^(.{,2})[/\\]', '', section_path) location_path = re.sub(r'[/\\]+', '/', file_location.lower()) location_path = re.sub(r'^(.{,2})[/\\]', '', location_path) if section_path in location_path: hosts_match.update(keyed_host)
def _send_to_plex(self, command, host, username=None, password=None): """Handles communication to Plex hosts via HTTP API Args: command: Dictionary of field/data pairs, encoded via urllib and passed to the legacy xbmcCmds HTTP API host: Plex host:port username: Plex API username password: Plex API password Returns: Returns 'OK' for successful commands or False if there was an error """ # fill in omitted parameters if not username: username = sickbeard.PLEX_CLIENT_USERNAME if not password: password = sickbeard.PLEX_CLIENT_PASSWORD if not host: logger.log(u'PLEX: No host specified, check your settings', logger.WARNING) return False for key in command: if type(command[key]) == unicode: command[key] = command[key].encode('utf-8') enc_command = urllib.urlencode(command) logger.log(u'PLEX: Encoded API command: ' + enc_command, logger.DEBUG) url = u'http://%s/xbmcCmds/xbmcHttp/?%s' % (host, enc_command) try: req = urllib2.Request(url) # if we have a password, use authentication if password: base64string = base64.encodestring('%s:%s' % (username, password))[:-1] authheader = 'Basic %s' % base64string req.add_header('Authorization', authheader) logger.log( u'PLEX: Contacting (with auth header) via url: ' + url, logger.DEBUG) else: logger.log(u'PLEX: Contacting via url: ' + url, logger.DEBUG) response = urllib2.urlopen(req) result = response.read().decode(sickbeard.SYS_ENCODING) response.close() logger.log(u'PLEX: HTTP response: ' + result.replace('\n', ''), logger.DEBUG) # could return result response = re.compile('<html><li>(.+\w)</html>').findall(result) return 'OK' except (urllib2.URLError, IOError), e: logger.log( u'PLEX: Warning: Couldn\'t contact Plex at ' + url + ' ' + ex(e), logger.WARNING) return False
def write_show_file(self, show_obj): """ Generates and writes show_obj's metadata under the given path to the filename given by get_show_file_path() show_obj: TVShow object for which to create the metadata path: An absolute or relative path where we should put the file. Note that the file name will be the default show_file_name. Note that this method expects that _show_data will return an ElementTree object. If your _show_data returns data in another format yo'll need to override this method. """ data = self._show_data(show_obj) if not data: return False nfo_file_path = self.get_show_file_path(show_obj) assert isinstance(nfo_file_path, six.text_type) nfo_file_dir = ek(os.path.dirname, nfo_file_path) try: if not ek(os.path.isdir, nfo_file_dir): logger.log("Metadata dir didn't exist, creating it at " + nfo_file_dir, logger.DEBUG) ek(os.makedirs, nfo_file_dir) helpers.chmodAsParent(nfo_file_dir) logger.log("Writing show nfo file to " + nfo_file_path, logger.DEBUG) nfo_file = io.open(nfo_file_path, 'wb') data.write(nfo_file, encoding='UTF-8') nfo_file.close() helpers.chmodAsParent(nfo_file_path) except IOError as e: logger.log("Unable to write file to " + nfo_file_path + " - are you sure the folder is writable? " + ex(e), logger.ERROR) return False return True
def unrar(path, rar_files, force, result): # pylint: disable=too-many-branches,too-many-statements """ Extracts RAR files :param path: Path to look for files in :param rar_files: Names of RAR files :param force: process currently processing items :param result: Previous results :return: List of unpacked file names """ unpacked_dirs = [] if sickbeard.UNPACK == 1 and rar_files: result.output += log_helper( "Packed Releases detected: {0}".format(rar_files), logger.DEBUG) for archive in rar_files: failure = None rar_handle = None try: archive_path = ek(os.path.join, path, archive) if already_processed(path, archive, force, result): result.output += log_helper( "Archive file already post-processed, extraction skipped: {0}" .format(archive_path), logger.DEBUG) continue if not helpers.is_rar_file(archive_path): continue result.output += log_helper( "Checking if archive is valid and contains a video: {0}". format(archive_path), logger.DEBUG) rar_handle = RarFile(archive_path) if rar_handle.needs_password(): # TODO: Add support in settings for a list of passwords to try here with rar_handle.set_password(x) result.output += log_helper( 'Archive needs a password, skipping: {0}'.format( archive_path)) continue # rar_handle.testrar() # If there are no video files in the rar, don't extract it rar_media_files = filter(helpers.is_media_file, rar_handle.namelist()) if not rar_media_files: continue rar_release_name = archive.rpartition('.')[0] # Choose the directory we'll unpack to: if sickbeard.UNPACK_DIR and os.path.isdir( sickbeard.UNPACK_DIR): # verify the unpack dir exists unpack_base_dir = sickbeard.UNPACK_DIR else: unpack_base_dir = path if sickbeard.UNPACK_DIR: # Let user know if we can't unpack there result.output += log_helper( 'Unpack directory cannot be verified. Using {0}'. format(path), logger.DEBUG) # Fix up the list for checking if already processed rar_media_files = [ os.path.join(unpack_base_dir, rar_release_name, rar_media_file) for rar_media_file in rar_media_files ] skip_rar = False for rar_media_file in rar_media_files: check_path, check_file = os.path.split(rar_media_file) if already_processed(check_path, check_file, force, result): result.output += log_helper( "Archive file already post-processed, extraction skipped: {0}" .format(rar_media_file), logger.DEBUG) skip_rar = True break if skip_rar: continue rar_extract_path = ek(os.path.join, unpack_base_dir, rar_release_name) result.output += log_helper( "Unpacking archive: {0}".format(archive), logger.DEBUG) rar_handle.extractall(path=rar_extract_path) unpacked_dirs.append(rar_extract_path) except RarCRCError: failure = ('Archive Broken', 'Unpacking failed because of a CRC error') except RarWrongPassword: failure = ( 'Incorrect RAR Password', 'Unpacking failed because of an Incorrect Rar Password') except PasswordRequired: failure = ('Rar is password protected', 'Unpacking failed because it needs a password') except RarOpenError: failure = ( 'Rar Open Error, check the parent folder and destination file permissions.', 'Unpacking failed with a File Open Error (file permissions?)' ) except RarExecError: failure = ( 'Invalid Rar Archive Usage', 'Unpacking Failed with Invalid Rar Archive Usage. Is unrar installed and on the system PATH?' ) except BadRarFile: failure = ( 'Invalid Rar Archive', 'Unpacking Failed with an Invalid Rar Archive Error') except NeedFirstVolume: continue except (Exception, Error) as e: failure = (ex(e), 'Unpacking failed') finally: if rar_handle: del rar_handle if failure: result.output += log_helper( 'Failed to extract the archive {0}: {1}'.format( archive, failure[0]), logger.WARNING) result.missed_files.append( '{0} : Unpacking failed: {1}'.format(archive, failure[1])) result.result = False continue return unpacked_dirs
try: logger.log(u"Deleting folder (if it's empty): " + folder) os.rmdir(folder) except (OSError, IOError), e: logger.log( u"Warning: unable to delete folder: " + folder + ": " + ex(e), logger.WARNING) return False else: try: logger.log(u"Deleting folder: " + folder) shutil.rmtree(folder) except (OSError, IOError), e: logger.log( u"Warning: unable to delete folder: " + folder + ": " + ex(e), logger.WARNING) return False return True def delete_files(processPath, notwantedFiles, result, force=False): """ Remove files from filesystem :param processPath: path to process :param notwantedFiles: files we do not want :param result: Processor results :param force: Boolean, force deletion, defaults to false """
def unRAR(path, rarFiles, force, result): """ Extracts RAR files :param path: Path to look for files in :param rarFiles: Names of RAR files :param force: process currently processing items :param result: Previous results :return: List of unpacked file names """ unpacked_files = [] if sickbeard.UNPACK and rarFiles: result.output += logHelper( u"Packed Releases detected: " + str(rarFiles), logger.DEBUG) for archive in rarFiles: result.output += logHelper(u"Unpacking archive: " + archive, logger.DEBUG) try: rar_handle = RarFile(os.path.join(path, archive)) # Skip extraction if any file in archive has previously been extracted skip_file = False for file_in_archive in [ os.path.basename(x.filename) for x in rar_handle.infolist() if not x.isdir ]: if already_postprocessed(path, file_in_archive, force, result): result.output += logHelper( u"Archive file already post-processed, extraction skipped: " + file_in_archive, logger.DEBUG) skip_file = True break if skip_file: continue rar_handle.extract(path=path, withSubpath=False, overwrite=False) for x in rar_handle.infolist(): if not x.isdir: basename = os.path.basename(x.filename) if basename not in unpacked_files: unpacked_files.append(basename) del rar_handle except ArchiveHeaderBroken as e: result.output += logHelper( u"Failed Unrar archive {0}: Unrar: Archive Header Broken". format(archive), logger.ERROR) result.result = False result.missedfiles.append( archive + " : Unpacking failed because the Archive Header is Broken") continue except IncorrectRARPassword: result.output += logHelper( u"Failed Unrar archive {0}: Unrar: Incorrect Rar Password". format(archive), logger.ERROR) result.result = False result.missedfiles.append( archive + " : Unpacking failed because of an Incorrect Rar Password") continue except FileOpenError: result.output += logHelper( u"Failed Unrar archive {0}: Unrar: File Open Error, check the parent folder and destination file permissions." .format(archive), logger.ERROR) result.result = False result.missedfiles.append( archive + " : Unpacking failed with a File Open Error (file permissions?)" ) continue except InvalidRARArchiveUsage: result.output += logHelper( u"Failed Unrar archive {0}: Unrar: Invalid Rar Archive Usage" .format(archive), logger.ERROR) result.result = False result.missedfiles.append( archive + " : Unpacking Failed with Invalid Rar Archive Usage") continue except InvalidRARArchive: result.output += logHelper( u"Failed Unrar archive {0}: Unrar: Invalid Rar Archive". format(archive), logger.ERROR) result.result = False result.missedfiles.append( archive + " : Unpacking Failed with an Invalid Rar Archive Error") continue except Exception, e: result.output += logHelper( u"Failed Unrar archive " + archive + ': ' + ex(e), logger.ERROR) result.result = False result.missedfiles.append( archive + " : Unpacking failed for an unknown reason") continue result.output += logHelper(u"UnRar content: " + str(unpacked_files), logger.DEBUG)
hosts_match.update(keyed_host) hosts_try = (hosts_all.copy(), hosts_match.copy())[bool(hosts_match)] host_list = [] for section_key, cur_host in hosts_try.iteritems(): url = 'http://%s/library/sections/%s/refresh%s' % ( cur_host, section_key, token_arg) try: force and urllib.urlopen(url) host_list.append(cur_host) except Exception, e: logger.log( u'PLEX: Error updating library section for Plex Media Server: ' + ex(e), logger.WARNING) hosts_failed.append(cur_host) if hosts_match: logger.log( u'PLEX: Updating hosts where TV section paths match the downloaded show: ' + ', '.join(set(host_list)), logger.DEBUG) else: logger.log( u'PLEX: Updating all hosts with TV sections: ' + ', '.join(set(host_list)), logger.DEBUG) return (', '.join(set(hosts_failed)), None)[not len(hosts_failed)] notifier = PLEXNotifier
def searchForNeededEpisodes(): """ Check providers for details on wanted episodes :return: episodes we have a search hit for """ foundResults = {} didSearch = False show_list = sickbeard.showList fromDate = datetime.date.fromordinal(1) episodes = [] for curShow in show_list: if not curShow.paused: sickbeard.name_cache.buildNameCache(curShow) episodes.extend(wantedEpisodes(curShow, fromDate)) if not episodes: # nothing wanted so early out, ie: avoid whatever abritrarily # complex thing a provider cache update entails, for example, # reading rss feeds logger.log(u"No episodes needed.", logger.INFO) return foundResults.values() origThreadName = threading.currentThread().name providers = [ x for x in sickbeard.providers.sortedProviderList( sickbeard.RANDOMIZE_PROVIDERS) if x.is_active() and x.enable_daily ] for curProvider in providers: threading.currentThread( ).name = origThreadName + " :: [" + curProvider.name + "]" curProvider.cache.updateCache() for curProvider in providers: threading.currentThread( ).name = origThreadName + " :: [" + curProvider.name + "]" curFoundResults = {} try: curFoundResults = curProvider.search_rss(episodes) except AuthException as e: logger.log(u"Authentication error: " + ex(e), logger.ERROR) continue except Exception as e: logger.log( u"Error while searching " + curProvider.name + ", skipping: " + ex(e), logger.ERROR) logger.log(traceback.format_exc(), logger.DEBUG) continue didSearch = True # pick a single result for each episode, respecting existing results for curEp in curFoundResults: if not curEp.show or curEp.show.paused: logger.log( u"Skipping {0} because the show is paused ".format( curEp.prettyName()), logger.DEBUG) continue bestResult = pickBestResult(curFoundResults[curEp], curEp.show) # if all results were rejected move on to the next episode if not bestResult: logger.log( u"All found results for " + curEp.prettyName() + " were rejected.", logger.DEBUG) continue # if it's already in the list (from another provider) and the newly found quality is no better then skip it if curEp in foundResults and bestResult.quality <= foundResults[ curEp].quality: continue foundResults[curEp] = bestResult threading.currentThread().name = origThreadName if not didSearch: logger.log( u"No NZB/Torrent providers found or enabled in the sickrage config for daily searches. Please check your settings.", logger.WARNING) return foundResults.values()
def retrieveShowMetadata(self, folder): """ Used only when mass adding Existing Shows, using previously generated Show metadata to reduce the need to query TVDB. """ empty_return = (None, None, None) metadata_path = ek(os.path.join, folder, self._show_metadata_filename) if not ek(os.path.isdir, folder) or not ek(os.path.isfile, metadata_path): logger.log(u"Can't load the metadata file from " + repr(metadata_path) + ", it doesn't exist", logger.DEBUG) return empty_return logger.log(u"Loading show info from metadata file in " + folder, logger.DEBUG) try: with ek(open, metadata_path, 'r') as xmlFileObj: showXML = etree.ElementTree(file=xmlFileObj) if showXML.findtext('title') == None \ or (showXML.findtext('tvdbid') == None and showXML.findtext('id') == None): logger.log(u"Invalid info in tvshow.nfo (missing name or id):" \ + str(showXML.findtext('title')) + " " \ + str(showXML.findtext('tvdbid')) + " " \ + str(showXML.findtext('id'))) return empty_return name = showXML.findtext('title') if showXML.findtext('tvdbid') != None: indexer_id = int(showXML.findtext('tvdbid')) elif showXML.findtext('id') != None: indexer_id = int(showXML.findtext('id')) else: logger.log(u"Empty <id> or <tvdbid> field in NFO, unable to find a ID", logger.WARNING) return empty_return if indexer_id is None: logger.log(u"Invalid Indexer ID (" + str(indexer_id) + "), not using metadata file", logger.WARNING) return empty_return indexer = None if showXML.find('episodeguide/url') != None: epg_url = showXML.findtext('episodeguide/url').lower() if str(indexer_id) in epg_url: if 'thetvdb.com' in epg_url: indexer = 1 elif 'tvrage' in epg_url: logger.log(u"Invalid Indexer ID (" + str(indexer_id) + "), not using metadata file because it has TVRage info", logger.WARNING) return empty_return except Exception, e: logger.log( u"There was an error parsing your existing metadata file: '" + metadata_path + "' error: " + ex(e), logger.WARNING) return empty_return
if check_files: logger.log(u"Not deleting folder " + folder + " found the following files: " + str(check_files), logger.INFO) return False try: logger.log(u"Deleting folder (if it's empty): " + folder) os.rmdir(folder) except (OSError, IOError), e: logger.log(u"Warning: unable to delete folder: " + folder + ": " + ex(e), logger.WARNING) return False else: try: logger.log(u"Deleting folder: " + folder) shutil.rmtree(folder) except (OSError, IOError), e: logger.log(u"Warning: unable to delete folder: " + folder + ": " + ex(e), logger.WARNING) return False return True def delete_files(processPath, notwantedFiles, result, force=False): """ Remove files from filesystem :param processPath: path to process :param notwantedFiles: files we do not want :param result: Processor results :param force: Boolean, force deletion, defaults to false """
def _sendPushover(self, msg, title, sound=None, userKey=None, apiKey=None): """ Sends a pushover notification to the address provided msg: The message to send (unicode) title: The title of the message sound: The notification sound to use userKey: The pushover user id to send the message to (or to subscribe with) apiKey: The pushover api key to use returns: True if the message succeeded, False otherwise """ if userKey is None: userKey = sickbeard.PUSHOVER_USERKEY if apiKey is None: apiKey = sickbeard.PUSHOVER_APIKEY if sound is None: sound = sickbeard.PUSHOVER_SOUND logger.log(u"Pushover API KEY in use: " + apiKey, logger.DEBUG) # build up the URL and parameters msg = msg.strip() # send the request to pushover try: if sickbeard.PUSHOVER_SOUND != "default": args = { "token": apiKey, "user": userKey, "title": title.encode('utf-8'), "message": msg.encode('utf-8'), "timestamp": int(time.time()), "retry": 60, "expire": 3600, "sound": sound, } else: # sound is default, so don't send it args = { "token": apiKey, "user": userKey, "title": title.encode('utf-8'), "message": msg.encode('utf-8'), "timestamp": int(time.time()), "retry": 60, "expire": 3600, } if sickbeard.PUSHOVER_DEVICE: args["device"] = sickbeard.PUSHOVER_DEVICE conn = httplib.HTTPSConnection("api.pushover.net:443") conn.request("POST", "/1/messages.json", urllib.urlencode(args), {"Content-type": "application/x-www-form-urlencoded"}) except urllib2.HTTPError, e: # if we get an error back that doesn't have an error code then who knows what's really happening if not hasattr(e, 'code'): logger.log(u"Pushover notification failed." + ex(e), logger.ERROR) return False else: logger.log( u"Pushover notification failed. Error code: " + str(e.code), logger.ERROR) # HTTP status 404 if the provided email address isn't a Pushover user. if e.code == 404: logger.log( u"Username is wrong/not a pushover email. Pushover will send an email to it", logger.WARNING) return False # For HTTP status code 401's, it is because you are passing in either an invalid token, or the user has not added your service. elif e.code == 401: # HTTP status 401 if the user doesn't have the service added subscribeNote = self._sendPushover(msg, title, sound=sound, userKey=userKey, apiKey=apiKey) if subscribeNote: logger.log(u"Subscription sent", logger.DEBUG) return True else: logger.log(u"Subscription could not be sent", logger.ERROR) return False # If you receive an HTTP status code of 400, it is because you failed to send the proper parameters elif e.code == 400: logger.log(u"Wrong data sent to pushover", logger.ERROR) return False # If you receive a HTTP status code of 429, it is because the message limit has been reached (free limit is 7,500) elif e.code == 429: logger.log( u"Pushover API message limit reached - try a different API key", logger.ERROR) return False
def run(self): ShowQueueItem.run(self) logger.log(u"Beginning update of " + self.show.name, logger.DEBUG) logger.log(u"Retrieving show info from " + sickbeard.indexerApi(self.show.indexer).name + "", logger.DEBUG) try: self.show.loadFromIndexer(cache=not self.force) except sickbeard.indexer_error as e: logger.log(u"Unable to contact " + sickbeard.indexerApi(self.show.indexer).name + ", aborting: " + ex(e), logger.WARNING) return except sickbeard.indexer_attributenotfound as e: logger.log(u"Data retrieved from " + sickbeard.indexerApi( self.show.indexer).name + " was incomplete, aborting: " + ex(e), logger.ERROR) return logger.log(u"Retrieving show info from IMDb", logger.DEBUG) try: self.show.loadIMDbInfo() except imdb_exceptions.IMDbError as e: logger.log(u" Something wrong on IMDb api: " + ex(e), logger.WARNING) except Exception as e: logger.log(u"Error loading IMDb info: " + ex(e), logger.ERROR) logger.log(traceback.format_exc(), logger.DEBUG) # have to save show before reading episodes from db try: self.show.saveToDB() except Exception as e: logger.log(u"Error saving show info to the database: " + ex(e), logger.ERROR) logger.log(traceback.format_exc(), logger.DEBUG) # get episode list from DB logger.log(u"Loading all episodes from the database", logger.DEBUG) DBEpList = self.show.loadEpisodesFromDB() # get episode list from TVDB logger.log(u"Loading all episodes from " + sickbeard.indexerApi(self.show.indexer).name + "", logger.DEBUG) try: IndexerEpList = self.show.loadEpisodesFromIndexer(cache=not self.force) except sickbeard.indexer_exception as e: logger.log(u"Unable to get info from " + sickbeard.indexerApi( self.show.indexer).name + ", the show info will not be refreshed: " + ex(e), logger.ERROR) IndexerEpList = None if IndexerEpList is None: logger.log(u"No data returned from " + sickbeard.indexerApi( self.show.indexer).name + ", unable to update this show", logger.ERROR) else: # for each ep we found on the Indexer delete it from the DB list for curSeason in IndexerEpList: for curEpisode in IndexerEpList[curSeason]: curEp = self.show.getEpisode(curSeason, curEpisode) curEp.saveToDB() if curSeason in DBEpList and curEpisode in DBEpList[curSeason]: del DBEpList[curSeason][curEpisode] # remaining episodes in the DB list are not on the indexer, just delete them from the DB for curSeason in DBEpList: for curEpisode in DBEpList[curSeason]: logger.log(u"Permanently deleting episode " + str(curSeason) + "x" + str( curEpisode) + " from the database", logger.INFO) curEp = self.show.getEpisode(curSeason, curEpisode) try: curEp.deleteEpisode() except EpisodeDeletedException: pass # save show again, in case episodes have changed try: self.show.saveToDB() except Exception as e: logger.log(u"Error saving show info to the database: " + ex(e), logger.ERROR) logger.log(traceback.format_exc(), logger.DEBUG) logger.log(u"Finished update of " + self.show.name, logger.DEBUG) sickbeard.showQueueScheduler.action.refreshShow(self.show, self.force) self.finish()
def delete_files(processPath, notwantedFiles, result, force=False): """ Remove files from filesystem :param processPath: path to process :param notwantedFiles: files we do not want :param result: Processor results :param force: Boolean, force deletion, defaults to false """ if not result.result and force: result.output += logHelper(u"Forcing deletion of files, even though last result was not success", logger.DEBUG) elif not result.result: return # Delete all file not needed for cur_file in notwantedFiles: cur_file_path = ek(os.path.join, processPath, cur_file) if not ek(os.path.isfile, cur_file_path): continue # Prevent error when a notwantedfiles is an associated files result.output += logHelper(u"Deleting file " + cur_file, logger.DEBUG) # check first the read-only attribute file_attribute = ek(os.stat, cur_file_path)[0] if not file_attribute & stat.S_IWRITE: # File is read-only, so make it writeable result.output += logHelper(u"Changing ReadOnly Flag for file " + cur_file, logger.DEBUG) try: ek(os.chmod, cur_file_path, stat.S_IWRITE) except OSError, e: result.output += logHelper(u"Cannot change permissions of " + cur_file_path + ': ' + ex(e), logger.DEBUG) try: ek(os.remove, cur_file_path) except OSError, e: result.output += logHelper(u"Unable to delete file " + cur_file + ': ' + str(e.strerror), logger.DEBUG)
def update(self): """ Downloads the latest source tarball from github and installs it over the existing version. """ base_url = 'http://github.com/' + self.github_org + '/' + self.github_repo tar_download_url = base_url + '/tarball/' + self.branch try: # prepare the update dir sr_update_dir = ek(os.path.join, sickbeard.PROG_DIR, u'sr-update') if os.path.isdir(sr_update_dir): logger.log(u"Clearing out update folder " + sr_update_dir + " before extracting") shutil.rmtree(sr_update_dir) logger.log(u"Creating update folder " + sr_update_dir + " before extracting") os.makedirs(sr_update_dir) # retrieve file logger.log(u"Downloading update from " + repr(tar_download_url)) tar_download_path = os.path.join(sr_update_dir, u'sr-update.tar') helpers.download_file(tar_download_url, tar_download_path, session=self.session) if not ek(os.path.isfile, tar_download_path): logger.log(u"Unable to retrieve new version from " + tar_download_url + ", can't update", logger.WARNING) return False if not ek(tarfile.is_tarfile, tar_download_path): logger.log(u"Retrieved version from " + tar_download_url + " is corrupt, can't update", logger.ERROR) return False # extract to sr-update dir logger.log(u"Extracting file " + tar_download_path) tar = tarfile.open(tar_download_path) tar.extractall(sr_update_dir) tar.close() # delete .tar.gz logger.log(u"Deleting file " + tar_download_path) os.remove(tar_download_path) # find update dir name update_dir_contents = [x for x in os.listdir(sr_update_dir) if os.path.isdir(os.path.join(sr_update_dir, x))] if len(update_dir_contents) != 1: logger.log(u"Invalid update data, update failed: " + str(update_dir_contents), logger.ERROR) return False content_dir = os.path.join(sr_update_dir, update_dir_contents[0]) # walk temp folder and move files to main folder logger.log(u"Moving files from " + content_dir + " to " + sickbeard.PROG_DIR) for dirname, dirnames, filenames in os.walk(content_dir): # @UnusedVariable dirname = dirname[len(content_dir) + 1:] for curfile in filenames: old_path = os.path.join(content_dir, dirname, curfile) new_path = os.path.join(sickbeard.PROG_DIR, dirname, curfile) # Avoid DLL access problem on WIN32/64 # These files needing to be updated manually #or find a way to kill the access from memory if curfile in ('unrar.dll', 'unrar64.dll'): try: os.chmod(new_path, stat.S_IWRITE) os.remove(new_path) os.renames(old_path, new_path) except Exception, e: logger.log(u"Unable to update " + new_path + ': ' + ex(e), logger.DEBUG) os.remove(old_path) # Trash the updated file without moving in new path continue if os.path.isfile(new_path): os.remove(new_path) os.renames(old_path, new_path) sickbeard.CUR_COMMIT_HASH = self._newest_commit_hash sickbeard.CUR_COMMIT_BRANCH = self.branch
def run(self): ShowQueueItem.run(self) logger.log(u"Removing %s" % self.show.name) self.show.deleteShow(full=self.full) if sickbeard.USE_TRAKT: try: sickbeard.traktCheckerScheduler.action.removeShowFromTraktLibrary(self.show) except Exception as e: logger.log(u"Unable to delete show from Trakt: %s. Error: %s" % (self.show.name, ex(e)), logger.WARNING) self.finish()
def _make_url(self, result): if not result: return '', '' urls = [] filename = u'' if result.url.startswith('magnet'): try: torrent_hash = re.findall(r'urn:btih:([\w]{32,40})', result.url)[0].upper() try: torrent_name = re.findall('dn=([^&]+)', result.url)[0] except Exception: torrent_name = 'NO_DOWNLOAD_NAME' if len(torrent_hash) == 32: torrent_hash = b16encode(b32decode(torrent_hash)).upper() if not torrent_hash: logger.log(u'Unable to extract torrent hash from magnet: {0!s}'.format(ex(result.url)), logger.ERROR) return urls, filename urls = [x.format(torrent_hash=torrent_hash, torrent_name=torrent_name) for x in self.bt_cache_urls] except Exception: logger.log(u'Unable to extract torrent hash or name from magnet: {0!s}'.format(ex(result.url)), logger.ERROR) return urls, filename else: urls = [result.url] filename = ek(join, self._get_storage_dir(), sanitize_filename(result.name) + '.' + self.provider_type) return urls, filename
def run(self): ShowQueueItem.run(self) logger.log(u"Starting to add show {0}".format("by ShowDir: {0}".format(self.showDir) if self.showDir else "by Indexer Id: {0}".format(self.indexer_id))) # make sure the Indexer IDs are valid try: lINDEXER_API_PARMS = sickbeard.indexerApi(self.indexer).api_params.copy() if self.lang: lINDEXER_API_PARMS['language'] = self.lang logger.log(u"" + str(sickbeard.indexerApi(self.indexer).name) + ": " + repr(lINDEXER_API_PARMS)) t = sickbeard.indexerApi(self.indexer).indexer(**lINDEXER_API_PARMS) s = t[self.indexer_id] # Let's try to create the show Dir if it's not provided. This way we force the show dir to build build using the # Indexers provided series name if not self.showDir and self.root_dir: show_name = get_showname_from_indexer(self.indexer, self.indexer_id, self.lang) if show_name: self.showDir = ek(os.path.join, self.root_dir, sanitize_filename(show_name)) dir_exists = makeDir(self.showDir) if not dir_exists: logger.log(u"Unable to create the folder {0}, can't add the show".format(self.showDir)) return chmodAsParent(self.showDir) else: logger.log(u"Unable to get a show {0}, can't add the show".format(self.showDir)) return # this usually only happens if they have an NFO in their show dir which gave us a Indexer ID that has no proper english version of the show if getattr(s, 'seriesname', None) is None: logger.log(u"Show in {} has no name on {}, probably searched with the wrong language.".format (self.showDir, sickbeard.indexerApi(self.indexer).name), logger.ERROR) ui.notifications.error("Unable to add show", "Show in " + self.showDir + " has no name on " + str(sickbeard.indexerApi( self.indexer).name) + ", probably the wrong language. Delete .nfo and add manually in the correct language.") self._finishEarly() return # if the show has no episodes/seasons if not s: logger.log(u"Show " + str(s['seriesname']) + " is on " + str( sickbeard.indexerApi(self.indexer).name) + " but contains no season/episode data.") ui.notifications.error("Unable to add show", "Show " + str(s['seriesname']) + " is on " + str(sickbeard.indexerApi( self.indexer).name) + " but contains no season/episode data.") self._finishEarly() return except Exception as e: logger.log(u"%s Error while loading information from indexer %s. Error: %r" % (self.indexer_id, sickbeard.indexerApi(self.indexer).name, ex(e)), logger.ERROR) # logger.log(u"Show name with ID %s doesn't exist on %s anymore. If you are using trakt, it will be removed from your TRAKT watchlist. If you are adding manually, try removing the nfo and adding again" % # (self.indexer_id, sickbeard.indexerApi(self.indexer).name), logger.WARNING) ui.notifications.error( "Unable to add show", "Unable to look up the show in %s on %s using ID %s, not using the NFO. Delete .nfo and try adding manually again." % (self.showDir, sickbeard.indexerApi(self.indexer).name, self.indexer_id) ) if sickbeard.USE_TRAKT: trakt_id = sickbeard.indexerApi(self.indexer).config['trakt_id'] trakt_api = TraktAPI(sickbeard.SSL_VERIFY, sickbeard.TRAKT_TIMEOUT) title = self.showDir.split("/")[-1] data = { 'shows': [ { 'title': title, 'ids': {} } ] } if trakt_id == 'tvdb_id': data['shows'][0]['ids']['tvdb'] = self.indexer_id else: data['shows'][0]['ids']['tvrage'] = self.indexer_id trakt_api.traktRequest("sync/watchlist/remove", data, method='POST') self._finishEarly() return try: newShow = TVShow(self.indexer, self.indexer_id, self.lang) newShow.loadFromIndexer() self.show = newShow # set up initial values self.show.location = self.showDir self.show.subtitles = self.subtitles if self.subtitles is not None else sickbeard.SUBTITLES_DEFAULT self.show.quality = self.quality if self.quality else sickbeard.QUALITY_DEFAULT self.show.flatten_folders = self.flatten_folders if self.flatten_folders is not None else sickbeard.FLATTEN_FOLDERS_DEFAULT self.show.anime = self.anime if self.anime is not None else sickbeard.ANIME_DEFAULT self.show.scene = self.scene if self.scene is not None else sickbeard.SCENE_DEFAULT self.show.paused = self.paused if self.paused is not None else False # set up default new/missing episode status logger.log(u"Setting all episodes to the specified default status: " + str(self.show.default_ep_status)) self.show.default_ep_status = self.default_status if self.show.anime: self.show.release_groups = BlackAndWhiteList(self.show.indexerid) if self.blacklist: self.show.release_groups.set_black_keywords(self.blacklist) if self.whitelist: self.show.release_groups.set_white_keywords(self.whitelist) # # be smartish about this # if self.show.genre and "talk show" in self.show.genre.lower(): # self.show.air_by_date = 1 # if self.show.genre and "documentary" in self.show.genre.lower(): # self.show.air_by_date = 0 # if self.show.classification and "sports" in self.show.classification.lower(): # self.show.sports = 1 except sickbeard.indexer_exception as e: logger.log( u"Unable to add show due to an error with " + sickbeard.indexerApi(self.indexer).name + ": " + ex(e), logger.ERROR) if self.show: ui.notifications.error( "Unable to add " + str(self.show.name) + " due to an error with " + sickbeard.indexerApi( self.indexer).name + "") else: ui.notifications.error( "Unable to add show due to an error with " + sickbeard.indexerApi(self.indexer).name + "") self._finishEarly() return except MultipleShowObjectsException: logger.log(u"The show in " + self.showDir + " is already in your show list, skipping", logger.WARNING) ui.notifications.error('Show skipped', "The show in " + self.showDir + " is already in your show list") self._finishEarly() return except Exception as e: logger.log(u"Error trying to add show: " + ex(e), logger.ERROR) logger.log(traceback.format_exc(), logger.DEBUG) self._finishEarly() raise logger.log(u"Retrieving show info from IMDb", logger.DEBUG) try: self.show.loadIMDbInfo() except imdb_exceptions.IMDbError as e: logger.log(u" Something wrong on IMDb api: " + ex(e), logger.WARNING) except Exception as e: logger.log(u"Error loading IMDb info: " + ex(e), logger.ERROR) try: self.show.saveToDB() except Exception as e: logger.log(u"Error saving the show to the database: " + ex(e), logger.ERROR) logger.log(traceback.format_exc(), logger.DEBUG) self._finishEarly() raise # add it to the show list sickbeard.showList.append(self.show) try: self.show.loadEpisodesFromIndexer() except Exception as e: logger.log( u"Error with " + sickbeard.indexerApi(self.show.indexer).name + ", not creating episode list: " + ex(e), logger.ERROR) logger.log(traceback.format_exc(), logger.DEBUG) # update internal name cache name_cache.buildNameCache(self.show) try: self.show.loadEpisodesFromDir() except Exception as e: logger.log(u"Error searching dir for episodes: " + ex(e), logger.ERROR) logger.log(traceback.format_exc(), logger.DEBUG) # if they set default ep status to WANTED then run the backlog to search for episodes # FIXME: This needs to be a backlog queue item!!! if self.show.default_ep_status == WANTED: logger.log(u"Launching backlog for this show since its episodes are WANTED") sickbeard.backlogSearchScheduler.action.searchBacklog([self.show]) self.show.writeMetadata() self.show.updateMetadata() self.show.populateCache() self.show.flushEpisodes() if sickbeard.USE_TRAKT: # if there are specific episodes that need to be added by trakt sickbeard.traktCheckerScheduler.action.manageNewShow(self.show) # add show to trakt.tv library if sickbeard.TRAKT_SYNC: sickbeard.traktCheckerScheduler.action.addShowToTraktLibrary(self.show) if sickbeard.TRAKT_SYNC_WATCHLIST: logger.log(u"update watchlist") notifiers.trakt_notifier.update_watchlist(show_obj=self.show) # Load XEM data to DB for show sickbeard.scene_numbering.xem_refresh(self.show.indexerid, self.show.indexer, force=True) # check if show has XEM mapping so we can determin if searches should go by scene numbering or indexer numbering. if not self.scene and sickbeard.scene_numbering.get_xem_numbering_for_show(self.show.indexerid, self.show.indexer): self.show.scene = 1 # After initial add, set to default_status_after. self.show.default_ep_status = self.default_status_after self.finish()
def _ep_data(self, ep_obj): """ Creates an elementTree XML structure for an KODI-style episode.nfo and returns the resulting data object. show_obj: a TVEpisode instance to create the NFO for """ eps_to_write = [ep_obj] + ep_obj.relatedEps indexer_lang = ep_obj.show.lang lINDEXER_API_PARMS = sickbeard.indexerApi( ep_obj.show.indexer).api_params.copy() lINDEXER_API_PARMS['actors'] = True lINDEXER_API_PARMS[ 'language'] = indexer_lang or sickbeard.INDEXER_DEFAULT_LANGUAGE if ep_obj.show.dvdorder: lINDEXER_API_PARMS['dvdorder'] = True try: t = sickbeard.indexerApi( ep_obj.show.indexer).indexer(**lINDEXER_API_PARMS) myShow = t[ep_obj.show.indexerid] except sickbeard.indexer_shownotfound as e: raise ShowNotFoundException(e.message) except sickbeard.indexer_error as e: logger.log( "Unable to connect to " + sickbeard.indexerApi(ep_obj.show.indexer).name + " while creating meta files - skipping - " + ex(e), logger.ERROR) return if len(eps_to_write) > 1: rootNode = etree.Element("kodimultiepisode") else: rootNode = etree.Element("episodedetails") # write an NFO containing info for all matching episodes for curEpToWrite in eps_to_write: try: myEp = myShow[curEpToWrite.season][curEpToWrite.episode] except (sickbeard.indexer_episodenotfound, sickbeard.indexer_seasonnotfound): logger.log( "Metadata writer is unable to find episode {0:d}x{1:d} of {2} on {3}..." "has it been removed? Should I delete from db?".format( curEpToWrite.season, curEpToWrite.episode, curEpToWrite.show.name, sickbeard.indexerApi(ep_obj.show.indexer).name)) return None if not getattr(myEp, 'firstaired', None): myEp["firstaired"] = str(datetime.date.fromordinal(1)) if not getattr(myEp, 'episodename', None): logger.log("Not generating nfo because the ep has no title", logger.DEBUG) return None logger.log( "Creating metadata for episode " + str(ep_obj.season) + "x" + str(ep_obj.episode), logger.DEBUG) if len(eps_to_write) > 1: episode = etree.SubElement(rootNode, "episodedetails") else: episode = rootNode if getattr(myEp, 'episodename', None): title = etree.SubElement(episode, "title") title.text = myEp['episodename'] if getattr(myShow, 'seriesname', None): showtitle = etree.SubElement(episode, "showtitle") showtitle.text = myShow['seriesname'] season = etree.SubElement(episode, "season") season.text = str(curEpToWrite.season) episodenum = etree.SubElement(episode, "episode") episodenum.text = str(curEpToWrite.episode) uniqueid = etree.SubElement(episode, "uniqueid") uniqueid.text = str(curEpToWrite.indexerid) if curEpToWrite.airdate != datetime.date.fromordinal(1): aired = etree.SubElement(episode, "aired") aired.text = str(curEpToWrite.airdate) if getattr(myEp, 'overview', None): plot = etree.SubElement(episode, "plot") plot.text = myEp['overview'] if curEpToWrite.season and getattr(myShow, 'runtime', None): runtime = etree.SubElement(episode, "runtime") runtime.text = myShow["runtime"] if getattr(myEp, 'airsbefore_season', None): displayseason = etree.SubElement(episode, "displayseason") displayseason.text = myEp['airsbefore_season'] if getattr(myEp, 'airsbefore_episode', None): displayepisode = etree.SubElement(episode, "displayepisode") displayepisode.text = myEp['airsbefore_episode'] if getattr(myEp, 'filename', None): thumb = etree.SubElement(episode, "thumb") thumb.text = myEp['filename'].strip() # watched = etree.SubElement(episode, "watched") # watched.text = 'false' if getattr(myEp, 'rating', None): rating = etree.SubElement(episode, "rating") rating.text = myEp['rating'] if getattr(myEp, 'writer', None) and isinstance( myEp['writer'], six.string_types): for writer in self._split_info(myEp['writer']): cur_writer = etree.SubElement(episode, "credits") cur_writer.text = writer if getattr(myEp, 'director', None) and isinstance( myEp['director'], six.string_types): for director in self._split_info(myEp['director']): cur_director = etree.SubElement(episode, "director") cur_director.text = director if getattr(myEp, 'gueststars', None) and isinstance( myEp['gueststars'], six.string_types): for actor in self._split_info(myEp['gueststars']): cur_actor = etree.SubElement(episode, "actor") cur_actor_name = etree.SubElement(cur_actor, "name") cur_actor_name.text = actor if getattr(myShow, '_actors', None): for actor in myShow['_actors']: cur_actor = etree.SubElement(episode, "actor") if 'name' in actor and actor['name'].strip(): cur_actor_name = etree.SubElement(cur_actor, "name") cur_actor_name.text = actor['name'].strip() else: continue if 'role' in actor and actor['role'].strip(): cur_actor_role = etree.SubElement(cur_actor, "role") cur_actor_role.text = actor['role'].strip() if 'image' in actor and actor['image'].strip(): cur_actor_thumb = etree.SubElement(cur_actor, "thumb") cur_actor_thumb.text = actor['image'].strip() # Make it purdy helpers.indentXML(rootNode) data = etree.ElementTree(rootNode) return data
class MediaBrowserMetadata(generic.GenericMetadata): """ Metadata generation class for Media Browser 2.x/3.x - Standard Mode. The following file structure is used: show_root/series.xml (show metadata) show_root/folder.jpg (poster) show_root/backdrop.jpg (fanart) show_root/Season ##/folder.jpg (season thumb) show_root/Season ##/filename.ext (*) show_root/Season ##/metadata/filename.xml (episode metadata) show_root/Season ##/metadata/filename.jpg (episode thumb) """ def __init__(self, show_metadata=False, episode_metadata=False, fanart=False, poster=False, banner=False, episode_thumbnails=False, season_posters=False, season_banners=False, season_all_poster=False, season_all_banner=False): generic.GenericMetadata.__init__(self, show_metadata, episode_metadata, fanart, poster, banner, episode_thumbnails, season_posters, season_banners, season_all_poster, season_all_banner) self.name = 'MediaBrowser' self._ep_nfo_extension = 'xml' self._show_metadata_filename = 'series.xml' self.fanart_name = "backdrop.jpg" self.poster_name = "folder.jpg" # web-ui metadata template self.eg_show_metadata = "series.xml" self.eg_episode_metadata = "Season##\\metadata\\<i>filename</i>.xml" self.eg_fanart = "backdrop.jpg" self.eg_poster = "folder.jpg" self.eg_banner = "banner.jpg" self.eg_episode_thumbnails = "Season##\\metadata\\<i>filename</i>.jpg" self.eg_season_posters = "Season##\\folder.jpg" self.eg_season_banners = "Season##\\banner.jpg" self.eg_season_all_poster = "<i>not supported</i>" self.eg_season_all_banner = "<i>not supported</i>" # Override with empty methods for unsupported features def retrieveShowMetadata(self, folder): # while show metadata is generated, it is not supported for our lookup return None, None, None def create_season_all_poster(self, show_obj): pass def create_season_all_banner(self, show_obj): pass def get_episode_file_path(self, ep_obj): """ Returns a full show dir/metadata/episode.xml path for MediaBrowser episode metadata files ep_obj: a TVEpisode object to get the path for """ if ek(os.path.isfile, ep_obj.location): xml_file_name = replace_extension(ek(os.path.basename, ep_obj.location), self._ep_nfo_extension) metadata_dir_name = ek(os.path.join, ek(os.path.dirname, ep_obj.location), 'metadata') xml_file_path = ek(os.path.join, metadata_dir_name, xml_file_name) else: logger.log(u"Episode location doesn't exist: " + str(ep_obj.location), logger.DEBUG) return '' return xml_file_path @staticmethod def get_episode_thumb_path(ep_obj): """ Returns a full show dir/metadata/episode.jpg path for MediaBrowser episode thumbs. ep_obj: a TVEpisode object to get the path from """ if ek(os.path.isfile, ep_obj.location): tbn_file_name = replace_extension(ek(os.path.basename, ep_obj.location), 'jpg') metadata_dir_name = ek(os.path.join, ek(os.path.dirname, ep_obj.location), 'metadata') tbn_file_path = ek(os.path.join, metadata_dir_name, tbn_file_name) else: return None return tbn_file_path @staticmethod def get_season_poster_path(show_obj, season): """ Season thumbs for MediaBrowser go in Show Dir/Season X/folder.jpg If no season folder exists, None is returned """ dir_list = [x for x in ek(os.listdir, show_obj.location) if ek(os.path.isdir, ek(os.path.join, show_obj.location, x))] season_dir_regex = r'^Season\s+(\d+)$' season_dir = None for cur_dir in dir_list: # MediaBrowser 1.x only supports 'Specials' # MediaBrowser 2.x looks to only support 'Season 0' # MediaBrowser 3.x looks to mimic KODI/Plex support if season == 0 and cur_dir == "Specials": season_dir = cur_dir break match = re.match(season_dir_regex, cur_dir, re.I) if not match: continue cur_season = int(match.group(1)) if cur_season == season: season_dir = cur_dir break if not season_dir: logger.log(u"Unable to find a season dir for season " + str(season), logger.DEBUG) return None logger.log(u"Using " + str(season_dir) + "/folder.jpg as season dir for season " + str(season), logger.DEBUG) return ek(os.path.join, show_obj.location, season_dir, 'folder.jpg') @staticmethod def get_season_banner_path(show_obj, season): """ Season thumbs for MediaBrowser go in Show Dir/Season X/banner.jpg If no season folder exists, None is returned """ dir_list = [x for x in ek(os.listdir, show_obj.location) if ek(os.path.isdir, ek(os.path.join, show_obj.location, x))] season_dir_regex = r'^Season\s+(\d+)$' season_dir = None for cur_dir in dir_list: # MediaBrowser 1.x only supports 'Specials' # MediaBrowser 2.x looks to only support 'Season 0' # MediaBrowser 3.x looks to mimic KODI/Plex support if season == 0 and cur_dir == "Specials": season_dir = cur_dir break match = re.match(season_dir_regex, cur_dir, re.I) if not match: continue cur_season = int(match.group(1)) if cur_season == season: season_dir = cur_dir break if not season_dir: logger.log(u"Unable to find a season dir for season " + str(season), logger.DEBUG) return None logger.log(u"Using " + str(season_dir) + "/banner.jpg as season dir for season " + str(season), logger.DEBUG) return ek(os.path.join, show_obj.location, season_dir, 'banner.jpg') def _show_data(self, show_obj): """ Creates an elementTree XML structure for a MediaBrowser-style series.xml returns the resulting data object. show_obj: a TVShow instance to create the NFO for """ indexer_lang = show_obj.lang # There's gotta be a better way of doing this but we don't wanna # change the language value elsewhere lINDEXER_API_PARMS = sickbeard.indexerApi(show_obj.indexer).api_params.copy() lINDEXER_API_PARMS['actors'] = True if indexer_lang and not indexer_lang == sickbeard.INDEXER_DEFAULT_LANGUAGE: lINDEXER_API_PARMS['language'] = indexer_lang if show_obj.dvdorder != 0: lINDEXER_API_PARMS['dvdorder'] = True t = sickbeard.indexerApi(show_obj.indexer).indexer(**lINDEXER_API_PARMS) tv_node = etree.Element("Series") try: myShow = t[int(show_obj.indexerid)] except sickbeard.indexer_shownotfound: logger.log(u"Unable to find show with id " + str(show_obj.indexerid) + " on " + sickbeard.indexerApi( show_obj.indexer).name + ", skipping it", logger.ERROR) raise except sickbeard.indexer_error: logger.log( u"" + sickbeard.indexerApi(show_obj.indexer).name + " is down, can't use its data to make the NFO", logger.ERROR) raise # check for title and id if not (getattr(myShow, 'seriesname', None) and getattr(myShow, 'id', None)): logger.log(u"Incomplete info for show with id " + str(show_obj.indexerid) + " on " + sickbeard.indexerApi( show_obj.indexer).name + ", skipping it") return False if getattr(myShow, 'id', None): indexerid = etree.SubElement(tv_node, "id") indexerid.text = str(myShow['id']) if getattr(myShow, 'seriesname', None): SeriesName = etree.SubElement(tv_node, "SeriesName") SeriesName.text = myShow['seriesname'] if getattr(myShow, 'status', None): Status = etree.SubElement(tv_node, "Status") Status.text = myShow['status'] if getattr(myShow, 'network', None): Network = etree.SubElement(tv_node, "Network") Network.text = myShow['network'] if getattr(myShow, 'airs_time', None): Airs_Time = etree.SubElement(tv_node, "Airs_Time") Airs_Time.text = myShow['airs_time'] if getattr(myShow, 'airs_dayofweek', None): Airs_DayOfWeek = etree.SubElement(tv_node, "Airs_DayOfWeek") Airs_DayOfWeek.text = myShow['airs_dayofweek'] FirstAired = etree.SubElement(tv_node, "FirstAired") if getattr(myShow, 'firstaired', None): FirstAired.text = myShow['firstaired'] if getattr(myShow, 'contentrating', None): ContentRating = etree.SubElement(tv_node, "ContentRating") ContentRating.text = myShow['contentrating'] MPAARating = etree.SubElement(tv_node, "MPAARating") MPAARating.text = myShow['contentrating'] certification = etree.SubElement(tv_node, "certification") certification.text = myShow['contentrating'] MetadataType = etree.SubElement(tv_node, "Type") MetadataType.text = "Series" if getattr(myShow, 'overview', None): Overview = etree.SubElement(tv_node, "Overview") Overview.text = myShow['overview'] if getattr(myShow, 'firstaired', None): PremiereDate = etree.SubElement(tv_node, "PremiereDate") PremiereDate.text = myShow['firstaired'] if getattr(myShow, 'rating', None): Rating = etree.SubElement(tv_node, "Rating") Rating.text = myShow['rating'] if getattr(myShow, 'firstaired', None): try: year_text = str(datetime.datetime.strptime(myShow['firstaired'], dateFormat).year) if year_text: ProductionYear = etree.SubElement(tv_node, "ProductionYear") ProductionYear.text = year_text except Exception: pass if getattr(myShow, 'runtime', None): RunningTime = etree.SubElement(tv_node, "RunningTime") RunningTime.text = myShow['runtime'] Runtime = etree.SubElement(tv_node, "Runtime") Runtime.text = myShow['runtime'] if getattr(myShow, 'imdb_id', None): imdb_id = etree.SubElement(tv_node, "IMDB_ID") imdb_id.text = myShow['imdb_id'] imdb_id = etree.SubElement(tv_node, "IMDB") imdb_id.text = myShow['imdb_id'] imdb_id = etree.SubElement(tv_node, "IMDbId") imdb_id.text = myShow['imdb_id'] if getattr(myShow, 'zap2it_id', None): Zap2ItId = etree.SubElement(tv_node, "Zap2ItId") Zap2ItId.text = myShow['zap2it_id'] if getattr(myShow, 'genre', None) and isinstance(myShow["genre"], basestring): Genres = etree.SubElement(tv_node, "Genres") for genre in myShow['genre'].split('|'): if genre.strip(): cur_genre = etree.SubElement(Genres, "Genre") cur_genre.text = genre.strip() Genre = etree.SubElement(tv_node, "Genre") Genre.text = "|".join([x.strip() for x in myShow["genre"].split('|') if x.strip()]) if getattr(myShow, 'network', None): Studios = etree.SubElement(tv_node, "Studios") Studio = etree.SubElement(Studios, "Studio") Studio.text = myShow['network'] if getattr(myShow, '_actors', None): Persons = etree.SubElement(tv_node, "Persons") for actor in myShow['_actors']: if not ('name' in actor and actor['name'].strip()): continue cur_actor = etree.SubElement(Persons, "Person") cur_actor_name = etree.SubElement(cur_actor, "Name") cur_actor_name.text = actor['name'].strip() cur_actor_type = etree.SubElement(cur_actor, "Type") cur_actor_type.text = "Actor" if 'role' in actor and actor['role'].strip(): cur_actor_role = etree.SubElement(cur_actor, "Role") cur_actor_role.text = actor['role'].strip() helpers.indentXML(tv_node) data = etree.ElementTree(tv_node) return data def _ep_data(self, ep_obj): """ Creates an elementTree XML structure for a MediaBrowser style episode.xml and returns the resulting data object. show_obj: a TVShow instance to create the NFO for """ eps_to_write = [ep_obj] + ep_obj.relatedEps persons_dict = { 'Director': [], 'GuestStar': [], 'Writer': [] } indexer_lang = ep_obj.show.lang try: lINDEXER_API_PARMS = sickbeard.indexerApi(ep_obj.show.indexer).api_params.copy() lINDEXER_API_PARMS['actors'] = True if indexer_lang and not indexer_lang == sickbeard.INDEXER_DEFAULT_LANGUAGE: lINDEXER_API_PARMS['language'] = indexer_lang if ep_obj.show.dvdorder != 0: lINDEXER_API_PARMS['dvdorder'] = True t = sickbeard.indexerApi(ep_obj.show.indexer).indexer(**lINDEXER_API_PARMS) myShow = t[ep_obj.show.indexerid] except sickbeard.indexer_shownotfound, e: raise ShowNotFoundException(e.message) except sickbeard.indexer_error, e: logger.log(u"Unable to connect to " + sickbeard.indexerApi( ep_obj.show.indexer).name + " while creating meta files - skipping - " + ex(e), logger.ERROR) return False