def send_growl(title , msg, gtype): """ Send Growl message """ global _GROWL, _GROWL_REG for n in (0, 1): if not _GROWL_REG: _GROWL = None if not _GROWL: _GROWL, error = register_growl() if _GROWL: assert isinstance(_GROWL, GrowlNotifier) _GROWL_REG = True if not isinstance(msg, str) and not isinstance(msg, unicode): msg = str(msg) logging.debug('Send to Growl: %s %s %s', gtype, latin1(title), latin1(msg)) try: ret = _GROWL.notify( noteType = Tx(NOTIFICATION.get(gtype, 'other')), title = title, description = unicoder(msg), ) if ret is None or isinstance(ret, bool): return None elif ret[0] == '401': _GROWL = False else: logging.debug('Growl error %s', ret) return 'Growl error %s', ret except socket.error, err: error = 'Growl error %s' % err logging.debug(error) return error except:
def repair_job(self, folder, new_nzb=None): """ Reconstruct admin for a single job folder, optionally with new NZB """ def all_verified(path): """ Return True when all sets have been successfully verified """ verified = sabnzbd.load_data(VERIFIED_FILE, path, remove=False) or {'x':False} return not bool([True for x in verified if not verified[x]]) name = os.path.basename(folder) path = os.path.join(folder, JOB_ADMIN) if hasattr(new_nzb, 'filename'): filename = new_nzb.filename else: filename = '' if not filename: if not all_verified(path): filename = globber(path, '*.gz') if len(filename) > 0: logging.debug('Repair job %s by reparsing stored NZB', latin1(name)) sabnzbd.add_nzbfile(filename[0], pp=None, script=None, cat=None, priority=None, nzbname=name, reuse=True) else: logging.debug('Repair job %s without stored NZB', latin1(name)) nzo = NzbObject(name, 0, pp=None, script=None, nzb='', cat=None, priority=None, nzbname=name, reuse=True) self.add(nzo) else: remove_all(path, '*.gz') logging.debug('Repair job %s with new NZB (%s)', latin1(name), latin1(filename)) sabnzbd.add_nzbfile(new_nzb, pp=None, script=None, cat=None, priority=None, nzbname=name, reuse=True)
def repair_job(self, folder, new_nzb=None): """ Reconstruct admin for a single job folder, optionally with new NZB """ def all_verified(path): """ Return True when all sets have been successfully verified """ verified = sabnzbd.load_data(VERIFIED_FILE, path, remove=False) or { 'x': False } return not bool([True for x in verified if not verified[x]]) nzo_id = None name = os.path.basename(folder) path = os.path.join(folder, JOB_ADMIN) if hasattr(new_nzb, 'filename'): filename = new_nzb.filename else: filename = '' if not filename: if not all_verified(path): filename = globber(path, '*.gz') if len(filename) > 0: logging.debug('Repair job %s by reparsing stored NZB', latin1(name)) nzo_id = sabnzbd.add_nzbfile(filename[0], pp=None, script=None, cat=None, priority=None, nzbname=name, reuse=True)[1] else: logging.debug('Repair job %s without stored NZB', latin1(name)) nzo = NzbObject(name, 0, pp=None, script=None, nzb='', cat=None, priority=None, nzbname=name, reuse=True) self.add(nzo) nzo_id = nzo.nzo_id else: remove_all(path, '*.gz') logging.debug('Repair job %s with new NZB (%s)', latin1(name), latin1(filename)) nzo_id = sabnzbd.add_nzbfile(new_nzb, pp=None, script=None, cat=None, priority=None, nzbname=name, reuse=True)[1] return nzo_id
def run(self): import sabnzbd.nzbqueue while 1: job = self.queue.get() if not job: logging.info("Shutting down") break nzo, nzf = job if nzf: sabnzbd.CheckFreeSpace() filename = sanitize_filename(nzf.filename) nzf.filename = filename dupe = nzo.check_for_dupe(nzf) filepath = get_filepath(cfg.download_dir.get_path(), nzo, filename) if filepath: logging.info('Decoding %s %s', filepath, nzf.type) try: filepath = _assemble(nzf, filepath, dupe) except IOError, (errno, strerror): # 28 == disk full => pause downloader if errno == 28: logging.error(Ta('Disk full! Forcing Pause')) else: logging.error(Ta('Disk error on creating file %s'), latin1(filepath)) # Pause without saving sabnzbd.downloader.Downloader.do.pause(save=False) except: logging.error('Fatal error in Assembler', exc_info = True) break
def repair_job(self, folder, new_nzb=None): """ Reconstruct admin for a single job folder, optionally with new NZB """ name = os.path.basename(folder) path = os.path.join(folder, JOB_ADMIN) if new_nzb is None or not new_nzb.filename: filename = globber(path, '*.gz') if len(filename) > 0: logging.debug('Repair job %s by reparsing stored NZB', latin1(name)) sabnzbd.add_nzbfile(filename[0], pp=None, script=None, cat=None, priority=None, nzbname=name, reuse=True) else: logging.debug('Repair job %s without stored NZB', latin1(name)) nzo = NzbObject(name, 0, pp=None, script=None, nzb='', cat=None, priority=None, nzbname=name, reuse=True) self.add(nzo) else: remove_all(path, '*.gz') logging.debug('Repair job %s without new NZB (%s)', latin1(name), latin1(new_nzb.filename)) sabnzbd.add_nzbfile(new_nzb, pp=None, script=None, cat=None, priority=None, nzbname=name, reuse=True)
def send_notify_osd(title, message): """ Send a message to NotifyOSD """ global _NTFOSD error = 'NotifyOSD not working' if sabnzbd.cfg.ntfosd_enable(): icon = os.path.join(sabnzbd.DIR_PROG, 'sabnzbd.ico') _NTFOSD = _NTFOSD or pynotify.init('icon-summary-body') if _NTFOSD: logging.info('Send to NotifyOSD: %s / %s', latin1(title), latin1(message)) try: note = pynotify.Notification(title, message, icon) note.show() except: # Apparently not implemented on this system logging.info(error) return error return None else: return error else: return 'Not enabled'
def get_serv_parms(service): """ Get the service command line parameters from Registry """ import _winreg value = [] try: key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, _SERVICE_KEY + service) for n in xrange(_winreg.QueryInfoKey(key)[1]): name, value, val_type = _winreg.EnumValue(key, n) if name == _SERVICE_PARM: break _winreg.CloseKey(key) except WindowsError: pass for n in xrange(len(value)): value[n] = latin1(value[n]) return value
def linux_shutdown(): """ Make Linux system shutdown, never returns """ if not HAVE_DBUS: os._exit(0) proxy, interface = _get_sessionproxy() if proxy: if proxy.CanShutdown(): proxy.Shutdown(dbus_interface=interface) else: proxy, interface, pinterface = _get_systemproxy('ConsoleKit') if proxy: if proxy.CanStop(dbus_interface=interface): try: proxy.Stop(dbus_interface=interface) except dbus.exceptions.DBusException, msg: logging.info('Received a DBus exception %s', latin1(msg)) else:
def run(self): import sabnzbd.nzbqueue while 1: job = self.queue.get() if not job: logging.info("Shutting down") break nzo, nzf = job if nzf: sabnzbd.CheckFreeSpace() filename = sanitize_filename(nzf.filename) nzf.filename = filename dupe = nzo.check_for_dupe(nzf) filepath = get_filepath(cfg.download_dir.get_path(), nzo, filename) if filepath: logging.info('Decoding %s %s', filepath, nzf.type) try: filepath = _assemble(nzf, filepath, dupe) except IOError, (errno, strerror): if nzo.deleted: # Job was deleted, ignore error pass else: # 28 == disk full => pause downloader if errno == 28: logging.error(Ta('Disk full! Forcing Pause')) else: logging.error( Ta('Disk error on creating file %s'), latin1(filepath)) # Pause without saving sabnzbd.downloader.Downloader.do.pause(save=False) except: logging.error('Fatal error in Assembler', exc_info=True) break
def linux_standby(): """ Make Linux system go into standby, returns after wakeup """ if not HAVE_DBUS: return proxy, interface = _get_sessionproxy() if proxy: if proxy.CanSuspend(): proxy.Suspend(dbus_interface=interface) else: proxy, interface, pinterface = _get_systemproxy('UPower') if not proxy: proxy, interface, pinterface = _get_systemproxy('DeviceKit') if proxy: if proxy.Get(interface, 'can-suspend', dbus_interface=pinterface): try: proxy.Suspend(dbus_interface=interface) except dbus.exceptions.DBusException, msg: logging.info('Received a DBus exception %s', latin1(msg)) else:
def get_descriptions(nzo, match, name): ''' If present, get a description from the nzb name. A description has to be after the matched item, seperated either like ' - Description' or '_-_Description' ''' if nzo: ep_name = latin1(nzo.nzo_info.get('episodename') or nzo.meta.get('episodename', (None,))[0]) else: ep_name = '' if not ep_name: if match: ep_name = name[match.end():] # Need to improve for multi ep support else: ep_name = name ep_name = ep_name.strip(' _.') if ep_name.startswith('-'): ep_name = ep_name.strip('- _.') if '.' in ep_name and ' ' not in ep_name: ep_name = ep_name.replace('.', ' ') ep_name = ep_name.replace('_', ' ') ep_name2 = ep_name.replace(" - ", "-").replace(" ", ".") ep_name3 = ep_name.replace(" ", "_") return ep_name, ep_name2, ep_name3
def errormsg(msg): logging.error(latin1(msg)) return msg
def run_feed(self, feed=None, download=False, ignoreFirst=False, force=False, readout=True): """ Run the query for one URI and apply filters """ self.shutdown = False def dup_title(title): """ Check if this title was in this or other feeds Return matching feed name """ title = title.lower() for fd in self.jobs: for lk in self.jobs[fd]: item = self.jobs[fd][lk] if item.get('status', ' ')[0] == 'D' and \ item.get('title', '').lower() == title: return fd return '' if not feed: return 'No such feed' newlinks = [] new_downloads = [] # Preparations, get options try: feeds = config.get_rss()[feed] except KeyError: logging.error(Ta('Incorrect RSS feed description "%s"'), feed) logging.info("Traceback: ", exc_info=True) return T('Incorrect RSS feed description "%s"') % feed uri = feeds.uri() defCat = feeds.cat() if not notdefault(defCat): defCat = None defPP = feeds.pp() if not notdefault(defPP): defPP = None defScript = feeds.script() if not notdefault(defScript): defScript = None defPrio = feeds.priority() if not notdefault(defPrio): defPrio = None # Preparations, convert filters to regex's regexes = [] reTypes = [] reCats = [] rePPs = [] rePrios = [] reScripts = [] reEnabled = [] for filter in feeds.filters(): reCat = filter[0] if defCat in ('', '*'): reCat = None reCats.append(reCat) rePPs.append(filter[1]) reScripts.append(filter[2]) reTypes.append(filter[3]) regexes.append(convert_filter(filter[4])) rePrios.append(filter[5]) reEnabled.append(filter[6] != '0') regcount = len(regexes) # Set first if this is the very first scan of this URI first = (feed not in self.jobs) and ignoreFirst # Add sabnzbd's custom User Agent feedparser.USER_AGENT = 'SABnzbd+/%s' % sabnzbd.version.__version__ # Check for nzbs.org if 'nzbs.org/' in uri and not ('&dl=1' in uri): uri += '&dl=1' # Read the RSS feed msg = None entries = None if readout: uri = uri.replace(' ', '%20') logging.debug("Running feedparser on %s", uri) d = feedparser.parse(uri.replace('feed://', 'http://')) logging.debug("Done parsing %s", uri) if not d: msg = Ta('Failed to retrieve RSS from %s: %s') % (uri, '?') logging.info(msg) return unicoder(msg) status = d.get('status', 999) if status in (401, 402, 403): msg = Ta('Do not have valid authentication for feed %s') % feed logging.info(msg) return unicoder(msg) entries = d.get('entries') if 'bozo_exception' in d and not entries: msg = Ta('Failed to retrieve RSS from %s: %s') % ( uri, xml_name(str(d['bozo_exception']))) logging.info(msg) return unicoder(msg) if not entries: msg = Ta('RSS Feed %s was empty') % uri logging.info(msg) if feed not in self.jobs: self.jobs[feed] = {} jobs = self.jobs[feed] if readout: if not entries: return unicoder(msg) else: entries = jobs.keys() order = 0 # Filter out valid new links for entry in entries: if self.shutdown: return if readout: try: link, category = _get_link(uri, entry) except (AttributeError, IndexError): link = None category = '' logging.info(Ta('Incompatible feed') + ' ' + uri) logging.info("Traceback: ", exc_info=True) return T('Incompatible feed') category = latin1(category) # Make sure only latin-1 encodable characters occur atitle = latin1(entry.title) title = unicoder(atitle) else: link = entry category = jobs[link].get('orgcat', '') if category in ('', '*'): category = None atitle = latin1(jobs[link].get('title', '')) title = unicoder(atitle) if link: # Make sure spaces are quoted in the URL if 'nzbclub.com' in link: link, path = os.path.split(link.strip()) link = '%s/%s' % (link, urllib.quote(path)) else: link = link.strip().replace(' ', '%20') newlinks.append(link) if link in jobs: jobstat = jobs[link].get('status', ' ')[0] else: jobstat = 'N' if jobstat in 'NGB' or (jobstat == 'X' and readout): # Match this title against all filters logging.debug('Trying title %s', atitle) result = False myCat = defCat myPP = defPP myScript = defScript myPrio = defPrio n = 0 # Match against all filters until an postive or negative match for n in xrange(regcount): if reEnabled[n]: if category and reTypes[n] == 'C': found = re.search(regexes[n], category) if not found: logging.debug("Filter rejected on rule %d", n) result = False break else: if regexes[n]: found = re.search(regexes[n], title) else: found = False if reTypes[n] == 'M' and not found: logging.debug("Filter rejected on rule %d", n) result = False break if found and reTypes[n] == 'A': logging.debug("Filter matched on rule %d", n) result = True break if found and reTypes[n] == 'R': logging.debug("Filter rejected on rule %d", n) result = False break if len(reCats): if notdefault(reCats[n]): myCat = reCats[n] elif category and not defCat: myCat = cat_convert(category) if myCat: myCat, catPP, catScript, catPrio = cat_to_opts( myCat) else: myCat = catPP = catScript = catPrio = None if notdefault(rePPs[n]): myPP = rePPs[n] elif not (reCats[n] or category): myPP = catPP if notdefault(reScripts[n]): myScript = reScripts[n] elif not (notdefault(reCats[n]) or category): myScript = catScript if rePrios[n] not in (str(DEFAULT_PRIORITY), ''): myPrio = rePrios[n] elif not ((rePrios[n] != str(DEFAULT_PRIORITY)) or category): myPrio = catPrio if cfg.no_dupes() and dup_title(title): if cfg.no_dupes() == 1: logging.info("Ignoring duplicate job %s", atitle) continue else: myPrio = DUP_PRIORITY act = download and not first if link in jobs: act = act and not jobs[link].get('status', '').endswith('*') act = act or force star = first or jobs[link].get('status', '').endswith('*') else: star = first if result: _HandleLink(jobs, link, title, 'G', category, myCat, myPP, myScript, act, star, order, priority=myPrio, rule=str(n)) if act: new_downloads.append(title) else: _HandleLink(jobs, link, title, 'B', category, myCat, myPP, myScript, False, star, order, priority=myPrio, rule=str(n)) order += 1 # Send email if wanted and not "forced" if new_downloads and cfg.email_rss() and not force: emailer.rss_mail(feed, new_downloads) remove_obsolete(jobs, newlinks) return ''
class Assembler(Thread): do = None # Link to the instance of this method def __init__(self, queue=None): Thread.__init__(self) if queue: self.queue = queue else: self.queue = Queue.Queue() Assembler.do = self def stop(self): self.process(None) def process(self, job): self.queue.put(job) def run(self): import sabnzbd.nzbqueue while 1: job = self.queue.get() if not job: logging.info("Shutting down") break nzo, nzf = job if nzf: sabnzbd.CheckFreeSpace() filename = sanitize_filename(nzf.filename) nzf.filename = filename dupe = nzo.check_for_dupe(nzf) filepath = get_filepath(cfg.download_dir.get_path(), nzo, filename) if filepath: logging.info('Decoding %s %s', filepath, nzf.type) try: filepath = _assemble(nzf, filepath, dupe) except IOError, (errno, strerror): if nzo.deleted: # Job was deleted, ignore error pass else: # 28 == disk full => pause downloader if errno == 28: logging.error(Ta('Disk full! Forcing Pause')) else: logging.error( Ta('Disk error on creating file %s'), latin1(filepath)) # Pause without saving sabnzbd.downloader.Downloader.do.pause(save=False) except: logging.error('Fatal error in Assembler', exc_info=True) break nzf.remove_admin() setname = nzf.setname if nzf.is_par2 and (nzo.md5packs.get(setname) is None): pack = GetMD5Hashes(filepath)[0] if pack: nzo.md5packs[setname] = pack logging.debug('Got md5pack for set %s', setname) if check_encrypted_rar(nzo, filepath): if cfg.pause_on_pwrar() == 1: logging.warning( Ta('WARNING: Paused job "%s" because of encrypted RAR file' ), latin1(nzo.final_name)) nzo.pause() else: logging.warning( Ta('WARNING: Aborted job "%s" because of encrypted RAR file' ), latin1(nzo.final_name)) nzo.fail_msg = T('Aborted, encryption detected') import sabnzbd.nzbqueue sabnzbd.nzbqueue.NzbQueue.do.end_job(nzo) nzf.completed = True
def run_feed(self, feed=None, download=False, ignoreFirst=False, force=False, readout=True): """ Run the query for one URI and apply filters """ self.shutdown = False def dup_title(title): """ Check if this title was in this or other feeds Return matching feed name """ title = title.lower() for fd in self.jobs: for lk in self.jobs[fd]: item = self.jobs[fd][lk] if item.get('status', ' ')[0] == 'D' and \ item.get('title', '').lower() == title: return fd return '' if not feed: return 'No such feed' newlinks = [] new_downloads = [] # Preparations, get options try: feeds = config.get_rss()[feed] except KeyError: logging.error(Ta('Incorrect RSS feed description "%s"'), feed) logging.info("Traceback: ", exc_info = True) return T('Incorrect RSS feed description "%s"') % feed uri = feeds.uri() defCat = feeds.cat() if not notdefault(defCat): defCat = None defPP = feeds.pp() if not notdefault(defPP): defPP = None defScript = feeds.script() if not notdefault(defScript): defScript = None defPrio = feeds.priority() if not notdefault(defPrio): defPrio = None # Preparations, convert filters to regex's regexes = [] reTypes = [] reCats = [] rePPs = [] rePrios = [] reScripts = [] reEnabled = [] for filter in feeds.filters(): reCat = filter[0] if defCat in ('', '*'): reCat = None reCats.append(reCat) rePPs.append(filter[1]) reScripts.append(filter[2]) reTypes.append(filter[3]) regexes.append(convert_filter(filter[4])) rePrios.append(filter[5]) reEnabled.append(filter[6] != '0') regcount = len(regexes) # Set first if this is the very first scan of this URI first = (feed not in self.jobs) and ignoreFirst # Add sabnzbd's custom User Agent feedparser.USER_AGENT = 'SABnzbd+/%s' % sabnzbd.version.__version__ # Check for nzbs.org if 'nzbs.org/' in uri and not ('&dl=1' in uri): uri += '&dl=1' # Read the RSS feed msg = None entries = None if readout: uri = uri.replace(' ', '%20') logging.debug("Running feedparser on %s", uri) d = feedparser.parse(uri.replace('feed://', 'http://')) logging.debug("Done parsing %s", uri) if not d: msg = Ta('Failed to retrieve RSS from %s: %s') % (uri, '?') logging.info(msg) return unicoder(msg) status = d.get('status', 999) if status in (401, 402, 403): msg = Ta('Do not have valid authentication for feed %s') % feed logging.info(msg) return unicoder(msg) entries = d.get('entries') if 'bozo_exception' in d and not entries: msg = Ta('Failed to retrieve RSS from %s: %s') % (uri, xml_name(str(d['bozo_exception']))) logging.info(msg) return unicoder(msg) if not entries: msg = Ta('RSS Feed %s was empty') % uri logging.info(msg) if feed not in self.jobs: self.jobs[feed] = {} jobs = self.jobs[feed] if readout: if not entries: return unicoder(msg) else: entries = jobs.keys() order = 0 # Filter out valid new links for entry in entries: if self.shutdown: return if readout: try: link, category = _get_link(uri, entry) except (AttributeError, IndexError): link = None category = '' logging.info(Ta('Incompatible feed') + ' ' + uri) logging.info("Traceback: ", exc_info = True) return T('Incompatible feed') category = latin1(category) # Make sure only latin-1 encodable characters occur atitle = latin1(entry.title) title = unicoder(atitle) else: link = entry category = jobs[link].get('orgcat', '') if category in ('', '*'): category = None atitle = latin1(jobs[link].get('title', '')) title = unicoder(atitle) if link: # Make sure spaces are quoted in the URL if 'nzbclub.com' in link: link, path = os.path.split(link.strip()) link = '%s/%s' % (link, urllib.quote(path)) else: link = link.strip().replace(' ','%20') newlinks.append(link) if link in jobs: jobstat = jobs[link].get('status', ' ')[0] else: jobstat = 'N' if jobstat in 'NGB' or (jobstat == 'X' and readout): # Match this title against all filters logging.debug('Trying title %s', atitle) result = False myCat = defCat myPP = defPP myScript = defScript myPrio = defPrio n = 0 # Match against all filters until an postive or negative match for n in xrange(regcount): if reEnabled[n]: if category and reTypes[n] == 'C': found = re.search(regexes[n], category) if not found: logging.debug("Filter rejected on rule %d", n) result = False break else: if regexes[n]: found = re.search(regexes[n], title) else: found = False if reTypes[n] == 'M' and not found: logging.debug("Filter rejected on rule %d", n) result = False break if found and reTypes[n] == 'A': logging.debug("Filter matched on rule %d", n) result = True break if found and reTypes[n] == 'R': logging.debug("Filter rejected on rule %d", n) result = False break if len(reCats): if notdefault(reCats[n]): myCat = reCats[n] elif category and not defCat: myCat = cat_convert(category) if myCat: myCat, catPP, catScript, catPrio = cat_to_opts(myCat) else: myCat = catPP = catScript = catPrio = None if notdefault(rePPs[n]): myPP = rePPs[n] elif not (reCats[n] or category): myPP = catPP if notdefault(reScripts[n]): myScript = reScripts[n] elif not (notdefault(reCats[n]) or category): myScript = catScript if rePrios[n] not in (str(DEFAULT_PRIORITY), ''): myPrio = rePrios[n] elif not ((rePrios[n] != str(DEFAULT_PRIORITY)) or category): myPrio = catPrio if cfg.no_dupes() and dup_title(title): if cfg.no_dupes() == 1: logging.info("Ignoring duplicate job %s", atitle) continue else: myPrio = DUP_PRIORITY act = download and not first if link in jobs: act = act and not jobs[link].get('status', '').endswith('*') act = act or force star = first or jobs[link].get('status', '').endswith('*') else: star = first if result: _HandleLink(jobs, link, title, 'G', category, myCat, myPP, myScript, act, star, order, priority=myPrio, rule=str(n)) if act: new_downloads.append(title) else: _HandleLink(jobs, link, title, 'B', category, myCat, myPP, myScript, False, star, order, priority=myPrio, rule=str(n)) order += 1 # Send email if wanted and not "forced" if new_downloads and cfg.email_rss() and not force: emailer.rss_mail(feed, new_downloads) remove_obsolete(jobs, newlinks) return ''
def get_titles(nzo, match, name, titleing=False): ''' The title will be the part before the match Clean it up and title() it ''.title() isn't very good under python so this contains a lot of little hacks to make it better and for more control ''' if nzo: title = latin1(nzo.nzo_info.get('propername') or nzo.meta.get('propername', (None,))[0]) else: title = '' if not title: if match: name = name[:match.start()] # Replace .US. with (US) if cfg.tv_sort_countries() == 1: for rep in COUNTRY_REP: # (us) > (US) name = replace_word(name, rep.lower(), rep) # (Us) > (US) name = replace_word(name, titler(rep), rep) # .US. > (US) dotted_country = '.%s.' % (rep.strip('()')) name = replace_word(name, dotted_country, rep) # Remove .US. and (US) elif cfg.tv_sort_countries() == 2: for rep in COUNTRY_REP: # Remove (US) name = replace_word(name, rep, '') dotted_country = '.%s.' % (rep.strip('()')) # Remove .US. name = replace_word(name, dotted_country, '.') title = name.replace('.', ' ').replace('_', ' ') title = title.strip().strip('(').strip('_').strip('-').strip().strip('_') if titleing: title = titler(title) # title the show name so it is in a consistant letter case #title applied uppercase to 's Python bug? title = title.replace("'S", "'s") # Replace titled country names, (Us) with (US) and so on if cfg.tv_sort_countries() == 1: for rep in COUNTRY_REP: title = title.replace(titler(rep), rep) # Remove country names, ie (Us) elif cfg.tv_sort_countries() == 2: for rep in COUNTRY_REP: title = title.replace(titler(rep), '').strip() # Make sure some words such as 'and' or 'of' stay lowercased. for x in LOWERCASE: xtitled = titler(x) title = replace_word(title, xtitled, x) # Make sure some words such as 'III' or 'IV' stay uppercased. for x in UPPERCASE: xtitled = titler(x) title = replace_word(title, xtitled, x) # Make sure the first letter of the title is always uppercase if title: title = titler(title[0]) + title[1:] # The title with spaces replaced by dots dots = title.replace(" - ", "-").replace(' ','.').replace('_','.') dots = dots.replace('(', '.').replace(')','.').replace('..','.').rstrip('.') # The title with spaces replaced by underscores underscores = title.replace(' ','_').replace('.','_').replace('__','_').rstrip('_') return title, dots, underscores