def Process(self): module = '[FAILED-DOWNLOAD]' myDB = db.DBConnection() if self.nzb_name and self.nzb_folder: self._log('Failed download has been detected: ' + self.nzb_name + ' in ' + self.nzb_folder) #since this has already been passed through the search module, which holds the IssueID in the nzblog, #let's find the matching nzbname and pass it the IssueID in order to mark it as Failed and then return #to the search module and continue trucking along. nzbname = self.nzb_name #remove extensions from nzb_name if they somehow got through (Experimental most likely) extensions = ('.cbr', '.cbz') if nzbname.lower().endswith(extensions): fd, ext = os.path.splitext(nzbname) self._log("Removed extension from nzb: " + ext) nzbname = re.sub(str(ext), '', str(nzbname)) #replace spaces nzbname = re.sub(' ', '.', str(nzbname)) nzbname = re.sub('[\,\:\?\'\(\)]', '', str(nzbname)) nzbname = re.sub('[\&]', 'and', str(nzbname)) nzbname = re.sub('_', '.', str(nzbname)) logger.fdebug(module + ' After conversions, nzbname is : ' + str(nzbname)) self._log("nzbname: " + str(nzbname)) nzbiss = myDB.selectone("SELECT * from nzblog WHERE nzbname=?", [nzbname]).fetchone() if nzbiss is None: self._log( "Failure - could not initially locate nzbfile in my database to rename." ) logger.fdebug(module + ' Failure - could not locate nzbfile initially') # if failed on spaces, change it all to decimals and try again. nzbname = re.sub('_', '.', str(nzbname)) self._log("trying again with this nzbname: " + str(nzbname)) logger.fdebug( module + ' Trying to locate nzbfile again with nzbname of : ' + str(nzbname)) nzbiss = myDB.selectone("SELECT * from nzblog WHERE nzbname=?", [nzbname]).fetchone() if nzbiss is None: logger.error( module + ' Unable to locate downloaded file to rename. PostProcessing aborted.' ) self._log( 'Unable to locate downloaded file to rename. PostProcessing aborted.' ) self.valreturn.append({ "self.log": self.log, "mode": 'stop' }) return self.queue.put(self.valreturn) else: self._log("I corrected and found the nzb as : " + str(nzbname)) logger.fdebug(module + ' Auto-corrected and found the nzb as : ' + str(nzbname)) issueid = nzbiss['IssueID'] else: issueid = nzbiss['IssueID'] logger.fdebug(module + ' Issueid: ' + str(issueid)) sarc = nzbiss['SARC'] #use issueid to get publisher, series, year, issue number else: issueid = self.issueid nzbiss = myDB.selectone("SELECT * from nzblog WHERE IssueID=?", [issueid]).fetchone() if nzbiss is None: logger.info( module + ' Cannot locate corresponding record in download history. This will be implemented soon.' ) self.valreturn.append({"self.log": self.log, "mode": 'stop'}) return self.queue.put(self.valreturn) nzbname = nzbiss['NZBName'] # find the provider. self.prov = nzbiss['PROVIDER'] logger.info(module + ' Provider: ' + self.prov) # grab the id. self.id = nzbiss['ID'] logger.info(module + ' ID: ' + self.id) annchk = "no" if 'annual' in nzbname.lower(): logger.info(module + ' Annual detected.') annchk = "yes" issuenzb = myDB.selectone( "SELECT * from annuals WHERE IssueID=? AND ComicName NOT NULL", [issueid]).fetchone() else: issuenzb = myDB.selectone( "SELECT * from issues WHERE IssueID=? AND ComicName NOT NULL", [issueid]).fetchone() if issuenzb is not None: logger.info(module + ' issuenzb found.') if helpers.is_number(issueid): sandwich = int(issuenzb['IssueID']) else: logger.info(module + ' issuenzb not found.') #if it's non-numeric, it contains a 'G' at the beginning indicating it's a multi-volume #using GCD data. Set sandwich to 1 so it will bypass and continue post-processing. if 'S' in issueid: sandwich = issueid elif 'G' in issueid or '-' in issueid: sandwich = 1 if helpers.is_number(sandwich): if sandwich < 900000: # if sandwich is less than 900000 it's a normal watchlist download. Bypass. pass else: logger.info( 'Failed download handling for story-arcs and one-off\'s are not supported yet. Be patient!' ) self._log( ' Unable to locate downloaded file to rename. PostProcessing aborted.' ) self.valreturn.append({"self.log": self.log, "mode": 'stop'}) return self.queue.put(self.valreturn) comicid = issuenzb['ComicID'] issuenumOG = issuenzb['Issue_Number'] logger.info(module + ' Successfully detected as : ' + issuenzb['ComicName'] + ' issue: ' + str(issuenzb['Issue_Number']) + ' that was downloaded using ' + self.prov) self._log('Successfully detected as : ' + issuenzb['ComicName'] + ' issue: ' + str(issuenzb['Issue_Number']) + ' downloaded using ' + self.prov) logger.info(module + ' Marking as a Failed Download.') self._log('Marking as a Failed Download.') ctrlVal = {"IssueID": issueid} Vals = {"Status": 'Failed'} myDB.upsert("issues", Vals, ctrlVal) ctrlVal = {"ID": self.id, "Provider": self.prov, "NZBName": nzbname} Vals = { "Status": 'Failed', "ComicName": issuenzb['ComicName'], "Issue_Number": issuenzb['Issue_Number'], "IssueID": issueid, "ComicID": comicid, "DateFailed": helpers.now() } myDB.upsert("failed", Vals, ctrlVal) logger.info(module + ' Successfully marked as Failed.') self._log('Successfully marked as Failed.') if mylar.CONFIG.FAILED_AUTO: logger.info( module + ' Sending back to search to see if we can find something that will not fail.' ) self._log( 'Sending back to search to see if we can find something better that will not fail.' ) self.valreturn.append({ "self.log": self.log, "mode": 'retry', "issueid": issueid, "comicid": comicid, "comicname": issuenzb['ComicName'], "issuenumber": issuenzb['Issue_Number'], "annchk": annchk }) return self.queue.put(self.valreturn) else: logger.info( module + ' Stopping search here as automatic handling of failed downloads is not enabled *hint*' ) self._log( 'Stopping search here as automatic handling of failed downloads is not enabled *hint*' ) self.valreturn.append({"self.log": self.log, "mode": 'stop'}) return self.queue.put(self.valreturn)
def configure(self, update=False): try: if not any([ self.SAB_HOST is None, self.SAB_HOST == '', 'http://' in self.SAB_HOST[:7], 'https://' in self.SAB_HOST[:8] ]): self.SAB_HOST = 'http://' + self.SAB_HOST if self.SAB_HOST.endswith('/'): logger.fdebug( "Auto-correcting trailing slash in SABnzbd url (not required)" ) self.SAB_HOST = self.SAB_HOST[:-1] except: pass if any([self.HTTP_ROOT is None, self.HTTP_ROOT == '/']): self.HTTP_ROOT = '/' else: if not self.HTTP_ROOT.endswith('/'): self.HTTP_ROOT += '/' if not update: logger.fdebug('Log dir: %s' % self.LOG_DIR) if self.LOG_DIR is None: self.LOG_DIR = os.path.join(mylar.DATA_DIR, 'logs') if not os.path.exists(self.LOG_DIR): try: os.makedirs(self.LOG_DIR) except OSError: if not mylar.QUIET: logger.warn( 'Unable to create the log directory. Logging to screen only.' ) # if not update: # logger.fdebug('[Cache Check] Cache directory currently set to : ' + self.CACHE_DIR) # Put the cache dir in the data dir for now if not self.CACHE_DIR: self.CACHE_DIR = os.path.join(str(mylar.DATA_DIR), 'cache') if not update: logger.fdebug( '[Cache Check] Cache directory not found in configuration. Defaulting location to : ' + self.CACHE_DIR) if not os.path.exists(self.CACHE_DIR): try: os.makedirs(self.CACHE_DIR) except OSError: logger.error( '[Cache Check] Could not create cache dir. Check permissions of datadir: ' + mylar.DATA_DIR) ## Sanity checking if any([ self.COMICVINE_API is None, self.COMICVINE_API == 'None', self.COMICVINE_API == '' ]): logger.error( 'No User Comicvine API key specified. I will not work very well due to api limits - http://api.comicvine.com/ and get your own free key.' ) self.COMICVINE_API = None if self.SEARCH_INTERVAL < 360: logger.fdebug( 'Search interval too low. Resetting to 6 hour minimum') self.SEARCH_INTERVAL = 360 if self.SEARCH_DELAY < 1: logger.fdebug( "Minimum search delay set for 1 minute to avoid hammering.") self.SEARCH_DELAY = 1 if self.RSS_CHECKINTERVAL < 20: logger.fdebug( "Minimum RSS Interval Check delay set for 20 minutes to avoid hammering." ) self.RSS_CHECKINTERVAL = 20 if not helpers.is_number(self.CHMOD_DIR): logger.fdebug( "CHMOD Directory value is not a valid numeric - please correct. Defaulting to 0777" ) self.CHMOD_DIR = '0777' if not helpers.is_number(self.CHMOD_FILE): logger.fdebug( "CHMOD File value is not a valid numeric - please correct. Defaulting to 0660" ) self.CHMOD_FILE = '0660' if self.FILE_OPTS is None: self.FILE_OPTS = 'move' if any([self.FILE_OPTS == 'hardlink', self.FILE_OPTS == 'softlink']): #we can't have metatagging enabled with hard/soft linking. Forcibly disable it here just in case it's set on load. self.ENABLE_META = False if self.BLACKLISTED_PUBLISHERS is not None and type( self.BLACKLISTED_PUBLISHERS) == unicode: setattr(self, 'BLACKLISTED_PUBLISHERS', self.BLACKLISTED_PUBLISHERS.split(', ')) if all([ self.AUTHENTICATION == 0, self.HTTP_USERNAME is not None, self.HTTP_PASSWORD is not None ]): #set it to the default login prompt if nothing selected. self.AUTHENTICATION = 1 elif all([self.HTTP_USERNAME is None, self.HTTP_PASSWORD is None]): self.AUTHENTICATION = 0 #comictagger - force to use included version if option is enabled. if self.ENABLE_META: mylar.CMTAGGER_PATH = mylar.PROG_DIR #we need to make sure the default folder setting for the comictagger settings exists so things don't error out mylar.CT_SETTINGSPATH = os.path.join(mylar.PROG_DIR, 'lib', 'comictaggerlib', 'ct_settings') if not update: logger.fdebug( 'Setting ComicTagger settings default path to : ' + mylar.CT_SETTINGSPATH) if not os.path.exists(mylar.CT_SETTINGSPATH): try: os.mkdir(mylar.CT_SETTINGSPATH) except OSError, e: if e.errno != errno.EEXIST: logger.error( 'Unable to create setting directory for ComicTagger. This WILL cause problems when tagging.' ) else: logger.fdebug( 'Successfully created ComicTagger Settings location.')
def configure(self, update=False): try: if not any([self.SAB_HOST is None, self.SAB_HOST == '', 'http://' in self.SAB_HOST[:7], 'https://' in self.SAB_HOST[:8]]): self.SAB_HOST = 'http://' + self.SAB_HOST if self.SAB_HOST.endswith('/'): logger.fdebug("Auto-correcting trailing slash in SABnzbd url (not required)") self.SAB_HOST = self.SAB_HOST[:-1] except: pass if any([self.HTTP_ROOT is None, self.HTTP_ROOT == '/']): self.HTTP_ROOT = '/' else: if not self.HTTP_ROOT.endswith('/'): self.HTTP_ROOT += '/' if not update: logger.fdebug('Log dir: %s' % self.LOG_DIR) if self.LOG_DIR is None: self.LOG_DIR = os.path.join(mylar.DATA_DIR, 'logs') if not os.path.exists(self.LOG_DIR): try: os.makedirs(self.LOG_DIR) except OSError: if not mylar.QUIET: logger.warn('Unable to create the log directory. Logging to screen only.') # if not update: # logger.fdebug('[Cache Check] Cache directory currently set to : ' + self.CACHE_DIR) # Put the cache dir in the data dir for now if not self.CACHE_DIR: self.CACHE_DIR = os.path.join(str(mylar.DATA_DIR), 'cache') if not update: logger.fdebug('[Cache Check] Cache directory not found in configuration. Defaulting location to : ' + self.CACHE_DIR) if not os.path.exists(self.CACHE_DIR): try: os.makedirs(self.CACHE_DIR) except OSError: logger.error('[Cache Check] Could not create cache dir. Check permissions of datadir: ' + mylar.DATA_DIR) if not self.SECURE_DIR: self.SECURE_DIR = os.path.join(mylar.DATA_DIR, '.secure') if not os.path.exists(self.SECURE_DIR): try: os.makedirs(self.SECURE_DIR) except OSError: logger.error('[Secure DIR Check] Could not create secure directory. Check permissions of datadir: ' + mylar.DATA_DIR) #make sure the cookies.dat file is not in cache for f in glob.glob(os.path.join(self.CACHE_DIR, '.32p_cookies.dat')): try: if os.path.isfile(f): shutil.move(f, os.path.join(self.SECURE_DIR, '.32p_cookies.dat')) except Exception as e: logger.error('SECURE-DIR-MOVE] Unable to move cookies file into secure location. This is a fatal error.') sys.exit() if self.CLEANUP_CACHE is True: logger.fdebug('[Cache Cleanup] Cache Cleanup initiated. Will delete items from cache that are no longer needed.') cache_types = ['*.nzb', '*.torrent', '*.zip', '*.html', 'mylar_*'] cntr = 0 for x in cache_types: for f in glob.glob(os.path.join(self.CACHE_DIR,x)): try: if os.path.isdir(f): shutil.rmtree(f) else: os.remove(f) except Exception as e: logger.warn('[ERROR] Unable to remove %s from cache. Could be a possible permissions issue ?' % f) cntr+=1 if cntr > 1: logger.fdebug('[Cache Cleanup] Cache Cleanup finished. Cleaned %s items' % cntr) else: logger.fdebug('[Cache Cleanup] Cache Cleanup finished. Nothing to clean!') if all([self.GRABBAG_DIR is None, self.DESTINATION_DIR is not None]): self.GRABBAG_DIR = os.path.join(self.DESTINATION_DIR, 'Grabbag') logger.fdebug('[Grabbag Directory] Setting One-Off directory to default location: %s' % self.GRABBAG_DIR) ## Sanity checking if any([self.COMICVINE_API is None, self.COMICVINE_API == 'None', self.COMICVINE_API == '']): logger.error('No User Comicvine API key specified. I will not work very well due to api limits - http://api.comicvine.com/ and get your own free key.') self.COMICVINE_API = None if self.SEARCH_INTERVAL < 360: logger.fdebug('Search interval too low. Resetting to 6 hour minimum') self.SEARCH_INTERVAL = 360 if self.SEARCH_DELAY < 1: logger.fdebug("Minimum search delay set for 1 minute to avoid hammering.") self.SEARCH_DELAY = 1 if self.RSS_CHECKINTERVAL < 20: logger.fdebug("Minimum RSS Interval Check delay set for 20 minutes to avoid hammering.") self.RSS_CHECKINTERVAL = 20 if self.ENABLE_RSS is True and mylar.RSS_STATUS == 'Paused': mylar.RSS_STATUS = 'Waiting' elif self.ENABLE_RSS is False and mylar.RSS_STATUS == 'Waiting': mylar.RSS_STATUS = 'Paused' if not helpers.is_number(self.CHMOD_DIR): logger.fdebug("CHMOD Directory value is not a valid numeric - please correct. Defaulting to 0777") self.CHMOD_DIR = '0777' if not helpers.is_number(self.CHMOD_FILE): logger.fdebug("CHMOD File value is not a valid numeric - please correct. Defaulting to 0660") self.CHMOD_FILE = '0660' if self.FILE_OPTS is None: self.FILE_OPTS = 'move' if any([self.FILE_OPTS == 'hardlink', self.FILE_OPTS == 'softlink']): #we can't have metatagging enabled with hard/soft linking. Forcibly disable it here just in case it's set on load. self.ENABLE_META = False if self.BLACKLISTED_PUBLISHERS is not None and type(self.BLACKLISTED_PUBLISHERS) == unicode: setattr(self, 'BLACKLISTED_PUBLISHERS', self.BLACKLISTED_PUBLISHERS.split(', ')) if all([self.AUTHENTICATION == 0, self.HTTP_USERNAME is not None, self.HTTP_PASSWORD is not None]): #set it to the default login prompt if nothing selected. self.AUTHENTICATION = 1 elif all([self.HTTP_USERNAME is None, self.HTTP_PASSWORD is None]): self.AUTHENTICATION = 0 if all([self.IGNORE_TOTAL is True, self.IGNORE_HAVETOTAL is True]): self.IGNORE_TOTAL = False self.IGNORE_HAVETOTAL = False logger.warn('You cannot have both ignore_total and ignore_havetotal enabled in the config.ini at the same time. Set only ONE to true - disabling both until this is resolved.') #comictagger - force to use included version if option is enabled. if self.ENABLE_META: mylar.CMTAGGER_PATH = mylar.PROG_DIR #we need to make sure the default folder setting for the comictagger settings exists so things don't error out mylar.CT_SETTINGSPATH = os.path.join(mylar.PROG_DIR, 'lib', 'comictaggerlib', 'ct_settings') if not update: logger.fdebug('Setting ComicTagger settings default path to : ' + mylar.CT_SETTINGSPATH) if not os.path.exists(mylar.CT_SETTINGSPATH): try: os.mkdir(mylar.CT_SETTINGSPATH) except OSError,e: if e.errno != errno.EEXIST: logger.error('Unable to create setting directory for ComicTagger. This WILL cause problems when tagging.') else: logger.fdebug('Successfully created ComicTagger Settings location.')
def Process(self): module = '[FAILED-DOWNLOAD]' myDB = db.DBConnection() if self.nzb_name and self.nzb_folder: self._log('Failed download has been detected: ' + self.nzb_name + ' in ' + self.nzb_folder) #since this has already been passed through the search module, which holds the IssueID in the nzblog, #let's find the matching nzbname and pass it the IssueID in order to mark it as Failed and then return #to the search module and continue trucking along. nzbname = self.nzb_name #remove extensions from nzb_name if they somehow got through (Experimental most likely) extensions = ('.cbr', '.cbz') if nzbname.lower().endswith(extensions): fd, ext = os.path.splitext(nzbname) self._log("Removed extension from nzb: " + ext) nzbname = re.sub(str(ext), '', str(nzbname)) #replace spaces nzbname = re.sub(' ', '.', str(nzbname)) nzbname = re.sub('[\,\:\?\'\(\)]', '', str(nzbname)) nzbname = re.sub('[\&]', 'and', str(nzbname)) nzbname = re.sub('_', '.', str(nzbname)) logger.fdebug(module + ' After conversions, nzbname is : ' + str(nzbname)) self._log("nzbname: " + str(nzbname)) nzbiss = myDB.selectone("SELECT * from nzblog WHERE nzbname=?", [nzbname]).fetchone() if nzbiss is None: self._log("Failure - could not initially locate nzbfile in my database to rename.") logger.fdebug(module + ' Failure - could not locate nzbfile initially') # if failed on spaces, change it all to decimals and try again. nzbname = re.sub('_', '.', str(nzbname)) self._log("trying again with this nzbname: " + str(nzbname)) logger.fdebug(module + ' Trying to locate nzbfile again with nzbname of : ' + str(nzbname)) nzbiss = myDB.selectone("SELECT * from nzblog WHERE nzbname=?", [nzbname]).fetchone() if nzbiss is None: logger.error(module + ' Unable to locate downloaded file to rename. PostProcessing aborted.') self._log('Unable to locate downloaded file to rename. PostProcessing aborted.') self.valreturn.append({"self.log": self.log, "mode": 'stop'}) return self.queue.put(self.valreturn) else: self._log("I corrected and found the nzb as : " + str(nzbname)) logger.fdebug(module + ' Auto-corrected and found the nzb as : ' + str(nzbname)) issueid = nzbiss['IssueID'] else: issueid = nzbiss['IssueID'] logger.fdebug(module + ' Issueid: ' + str(issueid)) sarc = nzbiss['SARC'] #use issueid to get publisher, series, year, issue number else: issueid = self.issueid nzbiss = myDB.selectone("SELECT * from nzblog WHERE IssueID=?", [issueid]).fetchone() if nzbiss is None: logger.info(module + ' Cannot locate corresponding record in download history. This will be implemented soon.') self.valreturn.append({"self.log": self.log, "mode": 'stop'}) return self.queue.put(self.valreturn) nzbname = nzbiss['NZBName'] # find the provider. self.prov = nzbiss['PROVIDER'] logger.info(module + ' Provider: ' + self.prov) # grab the id. self.id = nzbiss['ID'] logger.info(module + ' ID: ' + self.id) annchk = "no" if 'annual' in nzbname.lower(): logger.info(module + ' Annual detected.') annchk = "yes" issuenzb = myDB.selectone("SELECT * from annuals WHERE IssueID=? AND ComicName NOT NULL", [issueid]).fetchone() else: issuenzb = myDB.selectone("SELECT * from issues WHERE IssueID=? AND ComicName NOT NULL", [issueid]).fetchone() if issuenzb is not None: logger.info(module + ' issuenzb found.') if helpers.is_number(issueid): sandwich = int(issuenzb['IssueID']) else: logger.info(module + ' issuenzb not found.') #if it's non-numeric, it contains a 'G' at the beginning indicating it's a multi-volume #using GCD data. Set sandwich to 1 so it will bypass and continue post-processing. if 'S' in issueid: sandwich = issueid elif 'G' in issueid or '-' in issueid: sandwich = 1 if helpers.is_number(sandwich): if sandwich < 900000: # if sandwich is less than 900000 it's a normal watchlist download. Bypass. pass else: logger.info('Failed download handling for story-arcs and one-off\'s are not supported yet. Be patient!') self._log(' Unable to locate downloaded file to rename. PostProcessing aborted.') self.valreturn.append({"self.log": self.log, "mode": 'stop'}) return self.queue.put(self.valreturn) comicid = issuenzb['ComicID'] issuenumOG = issuenzb['Issue_Number'] logger.info(module + ' Successfully detected as : ' + issuenzb['ComicName'] + ' issue: ' + str(issuenzb['Issue_Number']) + ' that was downloaded using ' + self.prov) self._log('Successfully detected as : ' + issuenzb['ComicName'] + ' issue: ' + str(issuenzb['Issue_Number']) + ' downloaded using ' + self.prov) logger.info(module + ' Marking as a Failed Download.') self._log('Marking as a Failed Download.') ctrlVal = {"IssueID": issueid} Vals = {"Status": 'Failed'} myDB.upsert("issues", Vals, ctrlVal) ctrlVal = {"ID": self.id, "Provider": self.prov, "NZBName": nzbname} Vals = {"Status": 'Failed', "ComicName": issuenzb['ComicName'], "Issue_Number": issuenzb['Issue_Number']} myDB.upsert("failed", Vals, ctrlVal) logger.info(module + ' Successfully marked as Failed.') self._log('Successfully marked as Failed.') if mylar.FAILED_AUTO: logger.info(module + ' Sending back to search to see if we can find something that will not fail.') self._log('Sending back to search to see if we can find something better that will not fail.') self.valreturn.append({"self.log": self.log, "mode": 'retry', "issueid": issueid, "comicid": comicid, "comicname": issuenzb['ComicName'], "issuenumber": issuenzb['Issue_Number'], "annchk": annchk}) return self.queue.put(self.valreturn) else: logger.info(module + ' Stopping search here as automatic handling of failed downloads is not enabled *hint*') self._log('Stopping search here as automatic handling of failed downloads is not enabled *hint*') self.valreturn.append({"self.log": self.log, "mode": 'stop'}) return self.queue.put(self.valreturn)
issueid = nzbiss['IssueID'] logger.fdebug("issueid:" + str(issueid)) sarc = nzbiss['SARC'] #use issueid to get publisher, series, year, issue number annchk = "no" if 'annual' in nzbname.lower(): logger.info("annual detected.") annchk = "yes" issuenzb = myDB.selectone("SELECT * from annuals WHERE IssueID=? AND ComicName NOT NULL", [issueid]).fetchone() else: issuenzb = myDB.selectone("SELECT * from issues WHERE IssueID=? AND ComicName NOT NULL", [issueid]).fetchone() if issuenzb is not None: logger.info("issuenzb found.") if helpers.is_number(issueid): sandwich = int(issuenzb['IssueID']) else: logger.info("issuenzb not found.") #if it's non-numeric, it contains a 'G' at the beginning indicating it's a multi-volume #using GCD data. Set sandwich to 1 so it will bypass and continue post-processing. if 'S' in issueid: sandwich = issueid elif 'G' in issueid or '-' in issueid: sandwich = 1 if helpers.is_number(sandwich): if sandwich < 900000: # if sandwich is less than 900000 it's a normal watchlist download. Bypass. pass else: if issuenzb is None or 'S' in sandwich or int(sandwich) >= 900000:
def configure(self, update=False): try: if not any([self.SAB_HOST is None, self.SAB_HOST == '', 'http://' in self.SAB_HOST[:7], 'https://' in self.SAB_HOST[:8]]): self.SAB_HOST = 'http://' + self.SAB_HOST if self.SAB_HOST.endswith('/'): logger.fdebug("Auto-correcting trailing slash in SABnzbd url (not required)") self.SAB_HOST = self.SAB_HOST[:-1] except: pass if any([self.HTTP_ROOT is None, self.HTTP_ROOT == '/']): self.HTTP_ROOT = '/' else: if not self.HTTP_ROOT.endswith('/'): self.HTTP_ROOT += '/' if not update: logger.fdebug('Log dir: %s' % self.LOG_DIR) if self.LOG_DIR is None: self.LOG_DIR = os.path.join(mylar.DATA_DIR, 'logs') if not os.path.exists(self.LOG_DIR): try: os.makedirs(self.LOG_DIR) except OSError: if not mylar.QUIET: logger.warn('Unable to create the log directory. Logging to screen only.') # if not update: # logger.fdebug('[Cache Check] Cache directory currently set to : ' + self.CACHE_DIR) # Put the cache dir in the data dir for now if not self.CACHE_DIR: self.CACHE_DIR = os.path.join(str(mylar.DATA_DIR), 'cache') if not update: logger.fdebug('[Cache Check] Cache directory not found in configuration. Defaulting location to : ' + self.CACHE_DIR) if not os.path.exists(self.CACHE_DIR): try: os.makedirs(self.CACHE_DIR) except OSError: logger.error('[Cache Check] Could not create cache dir. Check permissions of datadir: ' + mylar.DATA_DIR) ## Sanity checking if any([self.COMICVINE_API is None, self.COMICVINE_API == 'None', self.COMICVINE_API == '']): logger.error('No User Comicvine API key specified. I will not work very well due to api limits - http://api.comicvine.com/ and get your own free key.') self.COMICVINE_API = None if self.SEARCH_INTERVAL < 360: logger.fdebug('Search interval too low. Resetting to 6 hour minimum') self.SEARCH_INTERVAL = 360 if self.SEARCH_DELAY < 1: logger.fdebug("Minimum search delay set for 1 minute to avoid hammering.") self.SEARCH_DELAY = 1 if self.RSS_CHECKINTERVAL < 20: logger.fdebug("Minimum RSS Interval Check delay set for 20 minutes to avoid hammering.") self.RSS_CHECKINTERVAL = 20 if not helpers.is_number(self.CHMOD_DIR): logger.fdebug("CHMOD Directory value is not a valid numeric - please correct. Defaulting to 0777") self.CHMOD_DIR = '0777' if not helpers.is_number(self.CHMOD_FILE): logger.fdebug("CHMOD File value is not a valid numeric - please correct. Defaulting to 0660") self.CHMOD_FILE = '0660' if self.FILE_OPTS is None: self.FILE_OPTS = 'move' if any([self.FILE_OPTS == 'hardlink', self.FILE_OPTS == 'softlink']): #we can't have metatagging enabled with hard/soft linking. Forcibly disable it here just in case it's set on load. self.ENABLE_META = False if self.BLACKLISTED_PUBLISHERS is not None and type(self.BLACKLISTED_PUBLISHERS) == unicode: setattr(self, 'BLACKLISTED_PUBLISHERS', self.BLACKLISTED_PUBLISHERS.split(', ')) if all([self.AUTHENTICATION == 0, self.HTTP_USERNAME is not None, self.HTTP_PASSWORD is not None]): #set it to the default login prompt if nothing selected. self.AUTHENTICATION = 1 elif all([self.HTTP_USERNAME is None, self.HTTP_PASSWORD is None]): self.AUTHENTICATION = 0 #comictagger - force to use included version if option is enabled. if self.ENABLE_META: mylar.CMTAGGER_PATH = mylar.PROG_DIR #we need to make sure the default folder setting for the comictagger settings exists so things don't error out mylar.CT_SETTINGSPATH = os.path.join(mylar.PROG_DIR, 'lib', 'comictaggerlib', 'ct_settings') if not update: logger.fdebug('Setting ComicTagger settings default path to : ' + mylar.CT_SETTINGSPATH) if not os.path.exists(mylar.CT_SETTINGSPATH): try: os.mkdir(mylar.CT_SETTINGSPATH) except OSError,e: if e.errno != errno.EEXIST: logger.error('Unable to create setting directory for ComicTagger. This WILL cause problems when tagging.') else: logger.fdebug('Successfully created ComicTagger Settings location.')
def Process(self): self._log("nzb name: " + str(self.nzb_name), logger.DEBUG) self._log("nzb folder: " + str(self.nzb_folder), logger.DEBUG) logger.fdebug("nzb name: " + str(self.nzb_name)) logger.fdebug("nzb folder: " + str(self.nzb_folder)) if mylar.USE_SABNZBD==0: logger.fdebug("Not using SABNzbd") else: # if the SAB Directory option is enabled, let's use that folder name and append the jobname. if mylar.SAB_DIRECTORY is not None and mylar.SAB_DIRECTORY is not 'None' and len(mylar.SAB_DIRECTORY) > 4: self.nzb_folder = os.path.join(mylar.SAB_DIRECTORY, self.nzb_name).encode(mylar.SYS_ENCODING) #lookup nzb_name in nzblog table to get issueid #query SAB to find out if Replace Spaces enabled / not as well as Replace Decimals #http://localhost:8080/sabnzbd/api?mode=set_config§ion=misc&keyword=dirscan_speed&value=5 querysab = str(mylar.SAB_HOST) + "/api?mode=get_config§ion=misc&output=xml&apikey=" + str(mylar.SAB_APIKEY) #logger.info("querysab_string:" + str(querysab)) file = urllib2.urlopen(querysab) data = file.read() file.close() dom = parseString(data) sabreps = dom.getElementsByTagName('replace_spaces')[0].firstChild.wholeText sabrepd = dom.getElementsByTagName('replace_dots')[0].firstChild.wholeText logger.fdebug("SAB Replace Spaces: " + str(sabreps)) logger.fdebug("SAB Replace Dots: " + str(sabrepd)) if mylar.USE_NZBGET==1: logger.fdebug("Using NZBGET") logger.fdebug("NZB name as passed from NZBGet: " + self.nzb_name) myDB = db.DBConnection() nzbname = self.nzb_name #remove extensions from nzb_name if they somehow got through (Experimental most likely) extensions = ('.cbr', '.cbz') if nzbname.lower().endswith(extensions): fd, ext = os.path.splitext(nzbname) self._log("Removed extension from nzb: " + ext, logger.DEBUG) nzbname = re.sub(str(ext), '', str(nzbname)) #replace spaces nzbname = re.sub(' ', '.', str(nzbname)) nzbname = re.sub('[\,\:\?]', '', str(nzbname)) nzbname = re.sub('[\&]', 'and', str(nzbname)) logger.fdebug("After conversions, nzbname is : " + str(nzbname)) # if mylar.USE_NZBGET==1: # nzbname=self.nzb_name self._log("nzbname: " + str(nzbname), logger.DEBUG) nzbiss = myDB.action("SELECT * from nzblog WHERE nzbname=?", [nzbname]).fetchone() if nzbiss is None: self._log("Failure - could not initially locate nzbfile in my database to rename.", logger.DEBUG) logger.fdebug("Failure - could not locate nzbfile initially.") # if failed on spaces, change it all to decimals and try again. nzbname = re.sub('_', '.', str(nzbname)) self._log("trying again with this nzbname: " + str(nzbname), logger.DEBUG) logger.fdebug("trying again with nzbname of : " + str(nzbname)) nzbiss = myDB.action("SELECT * from nzblog WHERE nzbname=?", [nzbname]).fetchone() if nzbiss is None: logger.error(u"Unable to locate downloaded file to rename. PostProcessing aborted.") return else: self._log("I corrected and found the nzb as : " + str(nzbname)) logger.fdebug("auto-corrected and found the nzb as : " + str(nzbname)) issueid = nzbiss['IssueID'] else: issueid = nzbiss['IssueID'] print "issueid:" + str(issueid) #use issueid to get publisher, series, year, issue number issuenzb = myDB.action("SELECT * from issues WHERE issueid=?", [issueid]).fetchone() if helpers.is_number(issueid): sandwich = int(issuenzb['IssueID']) else: #if it's non-numeric, it contains a 'G' at the beginning indicating it's a multi-volume #using GCD data. Set sandwich to 1 so it will bypass and continue post-processing. sandwich = 1 if issuenzb is None or sandwich >= 900000: # this has no issueID, therefore it's a one-off or a manual post-proc. # At this point, let's just drop it into the Comic Location folder and forget about it.. self._log("One-off mode enabled for Post-Processing. All I'm doing is moving the file untouched into the Grab-bag directory.", logger.DEBUG) logger.info("One-off mode enabled for Post-Processing. Will move into Grab-bag directory.") self._log("Grab-Bag Directory set to : " + mylar.GRABBAG_DIR, logger.DEBUG) for root, dirnames, filenames in os.walk(self.nzb_folder): for filename in filenames: if filename.lower().endswith(extensions): ofilename = filename path, ext = os.path.splitext(ofilename) if mylar.GRABBAG_DIR: grdst = mylar.GRABBAG_DIR else: grdst = mylar.DESTINATION_DIR grab_dst = os.path.join(grdst, ofilename) self._log("Destination Path : " + grab_dst, logger.DEBUG) grab_src = os.path.join(self.nzb_folder, ofilename) self._log("Source Path : " + grab_src, logger.DEBUG) logger.info("Moving " + str(ofilename) + " into grab-bag directory : " + str(grdst)) try: shutil.move(grab_src, grab_dst) except (OSError, IOError): self.log("Failed to move directory - check directories and manually re-run.", logger.DEBUG) logger.debug("Failed to move directory - check directories and manually re-run.") return self.log #tidyup old path try: shutil.rmtree(self.nzb_folder) except (OSError, IOError): self._log("Failed to remove temporary directory.", logger.DEBUG) logger.debug("Failed to remove temporary directory - check directory and manually re-run.") return self.log logger.debug("Removed temporary directory : " + str(self.nzb_folder)) self._log("Removed temporary directory : " + self.nzb_folder, logger.DEBUG) #delete entry from nzblog table myDB.action('DELETE from nzblog WHERE issueid=?', [issueid]) return self.log comicid = issuenzb['ComicID'] issuenumOG = issuenzb['Issue_Number'] #issueno = str(issuenum).split('.')[0] #new CV API - removed all decimals...here we go AGAIN! issuenum = issuenumOG issue_except = 'None' if 'au' in issuenum.lower(): issuenum = re.sub("[^0-9]", "", issuenum) issue_except = ' AU' if '.' in issuenum: iss_find = issuenum.find('.') iss_b4dec = issuenum[:iss_find] iss_decval = issuenum[iss_find+1:] if int(iss_decval) == 0: iss = iss_b4dec issdec = int(iss_decval) issueno = str(iss) self._log("Issue Number: " + str(issueno), logger.DEBUG) logger.fdebug("Issue Number: " + str(issueno)) else: if len(iss_decval) == 1: iss = iss_b4dec + "." + iss_decval issdec = int(iss_decval) * 10 else: iss = iss_b4dec + "." + iss_decval.rstrip('0') issdec = int(iss_decval.rstrip('0')) * 10 issueno = iss_b4dec self._log("Issue Number: " + str(iss), logger.DEBUG) logger.fdebug("Issue Number: " + str(iss)) else: iss = issuenum issueno = str(iss) # issue zero-suppression here if mylar.ZERO_LEVEL == "0": zeroadd = "" else: if mylar.ZERO_LEVEL_N == "none": zeroadd = "" elif mylar.ZERO_LEVEL_N == "0x": zeroadd = "0" elif mylar.ZERO_LEVEL_N == "00x": zeroadd = "00" logger.fdebug("Zero Suppression set to : " + str(mylar.ZERO_LEVEL_N)) if str(len(issueno)) > 1: if int(issueno) < 10: self._log("issue detected less than 10", logger.DEBUG) if '.' in iss: if int(iss_decval) > 0: issueno = str(iss) prettycomiss = str(zeroadd) + str(iss) else: prettycomiss = str(zeroadd) + str(int(issueno)) else: prettycomiss = str(zeroadd) + str(iss) if issue_except != 'None': prettycomiss = str(prettycomiss) + issue_except self._log("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ". Issue will be set as : " + str(prettycomiss), logger.DEBUG) elif int(issueno) >= 10 and int(issueno) < 100: self._log("issue detected greater than 10, but less than 100", logger.DEBUG) if mylar.ZERO_LEVEL_N == "none": zeroadd = "" else: zeroadd = "0" if '.' in iss: if int(iss_decval) > 0: issueno = str(iss) prettycomiss = str(zeroadd) + str(iss) else: prettycomiss = str(zeroadd) + str(int(issueno)) else: prettycomiss = str(zeroadd) + str(iss) if issue_except != 'None': prettycomiss = str(prettycomiss) + issue_except self._log("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ".Issue will be set as : " + str(prettycomiss), logger.DEBUG) else: self._log("issue detected greater than 100", logger.DEBUG) if '.' in iss: if int(iss_decval) > 0: issueno = str(iss) prettycomiss = str(issueno) if issue_except != 'None': prettycomiss = str(prettycomiss) + issue_except self._log("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ". Issue will be set as : " + str(prettycomiss), logger.DEBUG) else: prettycomiss = str(issueno) self._log("issue length error - cannot determine length. Defaulting to None: " + str(prettycomiss), logger.DEBUG) logger.fdebug("Pretty Comic Issue is : " + str(prettycomiss)) issueyear = issuenzb['IssueDate'][:4] self._log("Issue Year: " + str(issueyear), logger.DEBUG) logger.fdebug("Issue Year : " + str(issueyear)) comicnzb= myDB.action("SELECT * from comics WHERE comicid=?", [comicid]).fetchone() publisher = comicnzb['ComicPublisher'] self._log("Publisher: " + publisher, logger.DEBUG) logger.fdebug("Publisher: " + str(publisher)) #we need to un-unicode this to make sure we can write the filenames properly for spec.chars series = comicnzb['ComicName'].encode('ascii', 'ignore').strip() self._log("Series: " + series, logger.DEBUG) logger.fdebug("Series: " + str(series)) seriesyear = comicnzb['ComicYear'] self._log("Year: " + seriesyear, logger.DEBUG) logger.fdebug("Year: " + str(seriesyear)) comlocation = comicnzb['ComicLocation'] self._log("Comic Location: " + comlocation, logger.DEBUG) logger.fdebug("Comic Location: " + str(comlocation)) comversion = comicnzb['ComicVersion'] self._log("Comic Version: " + str(comversion), logger.DEBUG) logger.fdebug("Comic Version: " + str(comversion)) if comversion is None: comversion = 'None' #if comversion is None, remove it so it doesn't populate with 'None' if comversion == 'None': chunk_f_f = re.sub('\$VolumeN','',mylar.FILE_FORMAT) chunk_f = re.compile(r'\s+') chunk_file_format = chunk_f.sub(' ', chunk_f_f) self._log("No version # found for series - tag will not be available for renaming.", logger.DEBUG) logger.fdebug("No version # found for series, removing from filename") logger.fdebug("new format is now: " + str(chunk_file_format)) else: chunk_file_format = mylar.FILE_FORMAT #Run Pre-script if mylar.ENABLE_PRE_SCRIPTS: nzbn = self.nzb_name #original nzb name nzbf = self.nzb_folder #original nzb folder #name, comicyear, comicid , issueid, issueyear, issue, publisher #create the dic and send it. seriesmeta = [] seriesmetadata = {} seriesmeta.append({ 'name': series, 'comicyear': seriesyear, 'comicid': comicid, 'issueid': issueid, 'issueyear': issueyear, 'issue': issuenum, 'publisher': publisher }) seriesmetadata['seriesmeta'] = seriesmeta self._run_pre_scripts(nzbn, nzbf, seriesmetadata ) #rename file and move to new path #nfilename = series + " " + issueno + " (" + seriesyear + ")" file_values = {'$Series': series, '$Issue': prettycomiss, '$Year': issueyear, '$series': series.lower(), '$Publisher': publisher, '$publisher': publisher.lower(), '$VolumeY': 'V' + str(seriesyear), '$VolumeN': comversion } for root, dirnames, filenames in os.walk(self.nzb_folder): for filename in filenames: if filename.lower().endswith(extensions): ofilename = filename path, ext = os.path.splitext(ofilename) self._log("Original Filename: " + ofilename, logger.DEBUG) self._log("Original Extension: " + ext, logger.DEBUG) logger.fdebug("Original Filname: " + str(ofilename)) logger.fdebug("Original Extension: " + str(ext)) if mylar.FILE_FORMAT == '' or not mylar.RENAME_FILES: self._log("Rename Files isn't enabled...keeping original filename.", logger.DEBUG) logger.fdebug("Rename Files isn't enabled - keeping original filename.") #check if extension is in nzb_name - will screw up otherwise if ofilename.lower().endswith(extensions): nfilename = ofilename[:-4] else: nfilename = ofilename else: nfilename = helpers.replace_all(chunk_file_format, file_values) if mylar.REPLACE_SPACES: #mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot nfilename = nfilename.replace(' ', mylar.REPLACE_CHAR) nfilename = re.sub('[\,\:\?]', '', nfilename) self._log("New Filename: " + nfilename, logger.DEBUG) logger.fdebug("New Filename: " + str(nfilename)) src = os.path.join(self.nzb_folder, ofilename) filechecker.validateAndCreateDirectory(comlocation, True) if mylar.LOWERCASE_FILENAMES: dst = (comlocation + "/" + nfilename + ext).lower() else: dst = comlocation + "/" + nfilename + ext.lower() self._log("Source:" + src, logger.DEBUG) self._log("Destination:" + dst, logger.DEBUG) logger.fdebug("Source: " + str(src)) logger.fdebug("Destination: " + str(dst)) os.rename(os.path.join(self.nzb_folder, str(ofilename)), os.path.join(self.nzb_folder,str(nfilename + ext))) src = os.path.join(self.nzb_folder, str(nfilename + ext)) try: shutil.move(src, dst) except (OSError, IOError): self._log("Failed to move directory - check directories and manually re-run.", logger.DEBUG) self._log("Post-Processing ABORTED.", logger.DEBUG) return #tidyup old path try: shutil.rmtree(self.nzb_folder) except (OSError, IOError): self._log("Failed to remove temporary directory - check directory and manually re-run.", logger.DEBUG) self._log("Post-Processing ABORTED.", logger.DEBUG) return self._log("Removed temporary directory : " + str(self.nzb_folder), logger.DEBUG) #delete entry from nzblog table myDB.action('DELETE from nzblog WHERE issueid=?', [issueid]) #force rescan of files updater.forceRescan(comicid) logger.info(u"Post-Processing completed for: " + series + " issue: " + str(issuenumOG) ) self._log(u"Post Processing SUCCESSFULL! ", logger.DEBUG) if mylar.PROWL_ENABLED: pushmessage = series + '(' + issueyear + ') - issue #' + issuenumOG logger.info(u"Prowl request") prowl = notifiers.PROWL() prowl.notify(pushmessage,"Download and Postprocessing completed") if mylar.NMA_ENABLED: nma = notifiers.NMA() nma.notify(series, str(issueyear), str(issuenumOG)) if mylar.PUSHOVER_ENABLED: pushmessage = series + ' (' + str(issueyear) + ') - issue #' + str(issuenumOG) logger.info(u"Pushover request") pushover = notifiers.PUSHOVER() pushover.notify(pushmessage, "Download and Post-Processing completed") # retrieve/create the corresponding comic objects if mylar.ENABLE_EXTRA_SCRIPTS: folderp = str(dst) #folder location after move/rename nzbn = self.nzb_name #original nzb name filen = str(nfilename + ext) #new filename #name, comicyear, comicid , issueid, issueyear, issue, publisher #create the dic and send it. seriesmeta = [] seriesmetadata = {} seriesmeta.append({ 'name': series, 'comicyear': seriesyear, 'comicid': comicid, 'issueid': issueid, 'issueyear': issueyear, 'issue': issuenum, 'publisher': publisher }) seriesmetadata['seriesmeta'] = seriesmeta self._run_extra_scripts(nzbname, self.nzb_folder, filen, folderp, seriesmetadata ) return self.log
def Process(self): self._log("nzb name: " + str(self.nzb_name), logger.DEBUG) self._log("nzb folder: " + str(self.nzb_folder), logger.DEBUG) logger.fdebug("nzb name: " + str(self.nzb_name)) logger.fdebug("nzb folder: " + str(self.nzb_folder)) if mylar.USE_SABNZBD==0: logger.fdebug("Not using SABNzbd") else: # if the SAB Directory option is enabled, let's use that folder name and append the jobname. if mylar.SAB_DIRECTORY is not None and mylar.SAB_DIRECTORY is not 'None' and len(mylar.SAB_DIRECTORY) > 4: self.nzb_folder = os.path.join(mylar.SAB_DIRECTORY, self.nzb_name).encode(mylar.SYS_ENCODING) #lookup nzb_name in nzblog table to get issueid #query SAB to find out if Replace Spaces enabled / not as well as Replace Decimals #http://localhost:8080/sabnzbd/api?mode=set_config§ion=misc&keyword=dirscan_speed&value=5 querysab = str(mylar.SAB_HOST) + "/api?mode=get_config§ion=misc&output=xml&apikey=" + str(mylar.SAB_APIKEY) #logger.info("querysab_string:" + str(querysab)) file = urllib2.urlopen(querysab) data = file.read() file.close() dom = parseString(data) try: sabreps = dom.getElementsByTagName('replace_spaces')[0].firstChild.wholeText except: errorm = dom.getElementsByTagName('error')[0].firstChild.wholeText logger.error(u"Error detected attempting to retrieve SAB data : " + errorm) return sabrepd = dom.getElementsByTagName('replace_dots')[0].firstChild.wholeText logger.fdebug("SAB Replace Spaces: " + str(sabreps)) logger.fdebug("SAB Replace Dots: " + str(sabrepd)) if mylar.USE_NZBGET==1: logger.fdebug("Using NZBGET") logger.fdebug("NZB name as passed from NZBGet: " + self.nzb_name) myDB = db.DBConnection() nzbname = self.nzb_name #remove extensions from nzb_name if they somehow got through (Experimental most likely) extensions = ('.cbr', '.cbz') if nzbname.lower().endswith(extensions): fd, ext = os.path.splitext(nzbname) self._log("Removed extension from nzb: " + ext, logger.DEBUG) nzbname = re.sub(str(ext), '', str(nzbname)) #replace spaces nzbname = re.sub(' ', '.', str(nzbname)) nzbname = re.sub('[\,\:\?]', '', str(nzbname)) nzbname = re.sub('[\&]', 'and', str(nzbname)) logger.fdebug("After conversions, nzbname is : " + str(nzbname)) # if mylar.USE_NZBGET==1: # nzbname=self.nzb_name self._log("nzbname: " + str(nzbname), logger.DEBUG) nzbiss = myDB.action("SELECT * from nzblog WHERE nzbname=?", [nzbname]).fetchone() if nzbiss is None: self._log("Failure - could not initially locate nzbfile in my database to rename.", logger.DEBUG) logger.fdebug("Failure - could not locate nzbfile initially.") # if failed on spaces, change it all to decimals and try again. nzbname = re.sub('_', '.', str(nzbname)) self._log("trying again with this nzbname: " + str(nzbname), logger.DEBUG) logger.fdebug("trying again with nzbname of : " + str(nzbname)) nzbiss = myDB.action("SELECT * from nzblog WHERE nzbname=?", [nzbname]).fetchone() if nzbiss is None: logger.error(u"Unable to locate downloaded file to rename. PostProcessing aborted.") return else: self._log("I corrected and found the nzb as : " + str(nzbname)) logger.fdebug("auto-corrected and found the nzb as : " + str(nzbname)) issueid = nzbiss['IssueID'] else: issueid = nzbiss['IssueID'] logger.fdebug("issueid:" + str(issueid)) sarc = nzbiss['SARC'] #use issueid to get publisher, series, year, issue number issuenzb = myDB.action("SELECT * from issues WHERE issueid=?", [issueid]).fetchone() if issuenzb is not None: if helpers.is_number(issueid): sandwich = int(issuenzb['IssueID']) else: #if it's non-numeric, it contains a 'G' at the beginning indicating it's a multi-volume #using GCD data. Set sandwich to 1 so it will bypass and continue post-processing. if 'S' in issueid: sandwich = issueid elif 'G' in issueid: sandwich = 1 if helpers.is_number(sandwich): if sandwich < 900000: # if sandwich is less than 900000 it's a normal watchlist download. Bypass. pass else: if issuenzb is None or 'S' in sandwich or int(sandwich) >= 900000: # this has no issueID, therefore it's a one-off or a manual post-proc. # At this point, let's just drop it into the Comic Location folder and forget about it.. if 'S' in sandwich: self._log("One-off STORYARC mode enabled for Post-Processing for " + str(sarc)) logger.info("One-off STORYARC mode enabled for Post-Processing for " + str(sarc)) if mylar.STORYARCDIR: storyarcd = os.path.join(mylar.DESTINATION_DIR, "StoryArcs", sarc) self._log("StoryArc Directory set to : " + storyarcd, logger.DEBUG) else: self._log("Grab-Bag Directory set to : " + mylar.GRABBAG_DIR, logger.DEBUG) else: self._log("One-off mode enabled for Post-Processing. All I'm doing is moving the file untouched into the Grab-bag directory.", logger.DEBUG) logger.info("One-off mode enabled for Post-Processing. Will move into Grab-bag directory.") self._log("Grab-Bag Directory set to : " + mylar.GRABBAG_DIR, logger.DEBUG) for root, dirnames, filenames in os.walk(self.nzb_folder): for filename in filenames: if filename.lower().endswith(extensions): ofilename = filename path, ext = os.path.splitext(ofilename) if 'S' in sandwich: if mylar.STORYARCDIR: grdst = storyarcd else: grdst = mylar.DESTINATION_DIR else: if mylar.GRABBAG_DIR: grdst = mylar.GRABBAG_DIR else: grdst = mylar.DESTINATION_DIR filechecker.validateAndCreateDirectory(grdst, True) grab_dst = os.path.join(grdst, ofilename) self._log("Destination Path : " + grab_dst, logger.DEBUG) logger.info("Destination Path : " + grab_dst) grab_src = os.path.join(self.nzb_folder, ofilename) self._log("Source Path : " + grab_src, logger.DEBUG) logger.info("Source Path : " + grab_src) logger.info("Moving " + str(ofilename) + " into directory : " + str(grdst)) try: shutil.move(grab_src, grab_dst) except (OSError, IOError): self._log("Failed to move directory - check directories and manually re-run.", logger.DEBUG) logger.debug("Failed to move directory - check directories and manually re-run.") return #tidyup old path try: shutil.rmtree(self.nzb_folder) except (OSError, IOError): self._log("Failed to remove temporary directory.", logger.DEBUG) logger.debug("Failed to remove temporary directory - check directory and manually re-run.") return logger.debug("Removed temporary directory : " + str(self.nzb_folder)) self._log("Removed temporary directory : " + self.nzb_folder, logger.DEBUG) #delete entry from nzblog table myDB.action('DELETE from nzblog WHERE issueid=?', [issueid]) if 'S' in issueid: issuearcid = re.sub('S', '', issueid) logger.info("IssueArcID is : " + str(issuearcid)) ctrlVal = {"IssueArcID": issuearcid} newVal = {"Status": "Downloaded", "Location": grab_dst } myDB.upsert("readinglist",newVal,ctrlVal) logger.info("updated status to Downloaded") return self.log comicid = issuenzb['ComicID'] issuenumOG = issuenzb['Issue_Number'] #issueno = str(issuenum).split('.')[0] #new CV API - removed all decimals...here we go AGAIN! issuenum = issuenumOG issue_except = 'None' if 'au' in issuenum.lower(): issuenum = re.sub("[^0-9]", "", issuenum) issue_except = ' AU' if '.' in issuenum: iss_find = issuenum.find('.') iss_b4dec = issuenum[:iss_find] iss_decval = issuenum[iss_find+1:] if int(iss_decval) == 0: iss = iss_b4dec issdec = int(iss_decval) issueno = str(iss) self._log("Issue Number: " + str(issueno), logger.DEBUG) logger.fdebug("Issue Number: " + str(issueno)) else: if len(iss_decval) == 1: iss = iss_b4dec + "." + iss_decval issdec = int(iss_decval) * 10 else: iss = iss_b4dec + "." + iss_decval.rstrip('0') issdec = int(iss_decval.rstrip('0')) * 10 issueno = iss_b4dec self._log("Issue Number: " + str(iss), logger.DEBUG) logger.fdebug("Issue Number: " + str(iss)) else: iss = issuenum issueno = str(iss) # issue zero-suppression here if mylar.ZERO_LEVEL == "0": zeroadd = "" else: if mylar.ZERO_LEVEL_N == "none": zeroadd = "" elif mylar.ZERO_LEVEL_N == "0x": zeroadd = "0" elif mylar.ZERO_LEVEL_N == "00x": zeroadd = "00" logger.fdebug("Zero Suppression set to : " + str(mylar.ZERO_LEVEL_N)) if str(len(issueno)) > 1: if int(issueno) < 10: self._log("issue detected less than 10", logger.DEBUG) if '.' in iss: if int(iss_decval) > 0: issueno = str(iss) prettycomiss = str(zeroadd) + str(iss) else: prettycomiss = str(zeroadd) + str(int(issueno)) else: prettycomiss = str(zeroadd) + str(iss) if issue_except != 'None': prettycomiss = str(prettycomiss) + issue_except self._log("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ". Issue will be set as : " + str(prettycomiss), logger.DEBUG) elif int(issueno) >= 10 and int(issueno) < 100: self._log("issue detected greater than 10, but less than 100", logger.DEBUG) if mylar.ZERO_LEVEL_N == "none": zeroadd = "" else: zeroadd = "0" if '.' in iss: if int(iss_decval) > 0: issueno = str(iss) prettycomiss = str(zeroadd) + str(iss) else: prettycomiss = str(zeroadd) + str(int(issueno)) else: prettycomiss = str(zeroadd) + str(iss) if issue_except != 'None': prettycomiss = str(prettycomiss) + issue_except self._log("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ".Issue will be set as : " + str(prettycomiss), logger.DEBUG) else: self._log("issue detected greater than 100", logger.DEBUG) if '.' in iss: if int(iss_decval) > 0: issueno = str(iss) prettycomiss = str(issueno) if issue_except != 'None': prettycomiss = str(prettycomiss) + issue_except self._log("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ". Issue will be set as : " + str(prettycomiss), logger.DEBUG) else: prettycomiss = str(issueno) self._log("issue length error - cannot determine length. Defaulting to None: " + str(prettycomiss), logger.DEBUG) logger.fdebug("Pretty Comic Issue is : " + str(prettycomiss)) issueyear = issuenzb['IssueDate'][:4] self._log("Issue Year: " + str(issueyear), logger.DEBUG) logger.fdebug("Issue Year : " + str(issueyear)) comicnzb= myDB.action("SELECT * from comics WHERE comicid=?", [comicid]).fetchone() publisher = comicnzb['ComicPublisher'] self._log("Publisher: " + publisher, logger.DEBUG) logger.fdebug("Publisher: " + str(publisher)) #we need to un-unicode this to make sure we can write the filenames properly for spec.chars series = comicnzb['ComicName'].encode('ascii', 'ignore').strip() self._log("Series: " + series, logger.DEBUG) logger.fdebug("Series: " + str(series)) seriesyear = comicnzb['ComicYear'] self._log("Year: " + seriesyear, logger.DEBUG) logger.fdebug("Year: " + str(seriesyear)) comlocation = comicnzb['ComicLocation'] self._log("Comic Location: " + comlocation, logger.DEBUG) logger.fdebug("Comic Location: " + str(comlocation)) comversion = comicnzb['ComicVersion'] self._log("Comic Version: " + str(comversion), logger.DEBUG) logger.fdebug("Comic Version: " + str(comversion)) if comversion is None: comversion = 'None' #if comversion is None, remove it so it doesn't populate with 'None' if comversion == 'None': chunk_f_f = re.sub('\$VolumeN','',mylar.FILE_FORMAT) chunk_f = re.compile(r'\s+') chunk_file_format = chunk_f.sub(' ', chunk_f_f) self._log("No version # found for series - tag will not be available for renaming.", logger.DEBUG) logger.fdebug("No version # found for series, removing from filename") logger.fdebug("new format is now: " + str(chunk_file_format)) else: chunk_file_format = mylar.FILE_FORMAT #Run Pre-script if mylar.ENABLE_PRE_SCRIPTS: nzbn = self.nzb_name #original nzb name nzbf = self.nzb_folder #original nzb folder #name, comicyear, comicid , issueid, issueyear, issue, publisher #create the dic and send it. seriesmeta = [] seriesmetadata = {} seriesmeta.append({ 'name': series, 'comicyear': seriesyear, 'comicid': comicid, 'issueid': issueid, 'issueyear': issueyear, 'issue': issuenum, 'publisher': publisher }) seriesmetadata['seriesmeta'] = seriesmeta self._run_pre_scripts(nzbn, nzbf, seriesmetadata ) #rename file and move to new path #nfilename = series + " " + issueno + " (" + seriesyear + ")" file_values = {'$Series': series, '$Issue': prettycomiss, '$Year': issueyear, '$series': series.lower(), '$Publisher': publisher, '$publisher': publisher.lower(), '$VolumeY': 'V' + str(seriesyear), '$VolumeN': comversion } ofilename = None for root, dirnames, filenames in os.walk(self.nzb_folder): for filename in filenames: if filename.lower().endswith(extensions): ofilename = filename path, ext = os.path.splitext(ofilename) if ofilename is None: logger.error(u"Aborting PostProcessing - the filename doesn't exist in the location given. Make sure that " + str(self.nzb_folder) + " exists and is the correct location.") return self._log("Original Filename: " + ofilename, logger.DEBUG) self._log("Original Extension: " + ext, logger.DEBUG) logger.fdebug("Original Filname: " + str(ofilename)) logger.fdebug("Original Extension: " + str(ext)) if mylar.FILE_FORMAT == '' or not mylar.RENAME_FILES: self._log("Rename Files isn't enabled...keeping original filename.", logger.DEBUG) logger.fdebug("Rename Files isn't enabled - keeping original filename.") #check if extension is in nzb_name - will screw up otherwise if ofilename.lower().endswith(extensions): nfilename = ofilename[:-4] else: nfilename = ofilename else: nfilename = helpers.replace_all(chunk_file_format, file_values) if mylar.REPLACE_SPACES: #mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot nfilename = nfilename.replace(' ', mylar.REPLACE_CHAR) nfilename = re.sub('[\,\:\?]', '', nfilename) self._log("New Filename: " + nfilename, logger.DEBUG) logger.fdebug("New Filename: " + str(nfilename)) src = os.path.join(self.nzb_folder, ofilename) filechecker.validateAndCreateDirectory(comlocation, True) if mylar.LOWERCASE_FILENAMES: dst = (comlocation + "/" + nfilename + ext).lower() else: dst = comlocation + "/" + nfilename + ext.lower() self._log("Source:" + src, logger.DEBUG) self._log("Destination:" + dst, logger.DEBUG) logger.fdebug("Source: " + str(src)) logger.fdebug("Destination: " + str(dst)) os.rename(os.path.join(self.nzb_folder, str(ofilename)), os.path.join(self.nzb_folder,str(nfilename + ext))) src = os.path.join(self.nzb_folder, str(nfilename + ext)) try: shutil.move(src, dst) except (OSError, IOError): self._log("Failed to move directory - check directories and manually re-run.", logger.DEBUG) self._log("Post-Processing ABORTED.", logger.DEBUG) return #tidyup old path try: shutil.rmtree(self.nzb_folder) except (OSError, IOError): self._log("Failed to remove temporary directory - check directory and manually re-run.", logger.DEBUG) self._log("Post-Processing ABORTED.", logger.DEBUG) return self._log("Removed temporary directory : " + str(self.nzb_folder), logger.DEBUG) #delete entry from nzblog table myDB.action('DELETE from nzblog WHERE issueid=?', [issueid]) #update snatched table to change status to Downloaded updater.foundsearch(comicid, issueid, down='True') #force rescan of files updater.forceRescan(comicid) logger.info(u"Post-Processing completed for: " + series + " issue: " + str(issuenumOG) ) self._log(u"Post Processing SUCCESSFULL! ", logger.DEBUG) if mylar.PROWL_ENABLED: pushmessage = series + '(' + issueyear + ') - issue #' + issuenumOG logger.info(u"Prowl request") prowl = notifiers.PROWL() prowl.notify(pushmessage,"Download and Postprocessing completed") if mylar.NMA_ENABLED: nma = notifiers.NMA() nma.notify(series, str(issueyear), str(issuenumOG)) if mylar.PUSHOVER_ENABLED: pushmessage = series + ' (' + str(issueyear) + ') - issue #' + str(issuenumOG) logger.info(u"Pushover request") pushover = notifiers.PUSHOVER() pushover.notify(pushmessage, "Download and Post-Processing completed") # retrieve/create the corresponding comic objects if mylar.ENABLE_EXTRA_SCRIPTS: folderp = str(dst) #folder location after move/rename nzbn = self.nzb_name #original nzb name filen = str(nfilename + ext) #new filename #name, comicyear, comicid , issueid, issueyear, issue, publisher #create the dic and send it. seriesmeta = [] seriesmetadata = {} seriesmeta.append({ 'name': series, 'comicyear': seriesyear, 'comicid': comicid, 'issueid': issueid, 'issueyear': issueyear, 'issue': issuenum, 'publisher': publisher }) seriesmetadata['seriesmeta'] = seriesmeta self._run_extra_scripts(nzbname, self.nzb_folder, filen, folderp, seriesmetadata ) return self.log
annchk = "no" if 'annual' in nzbname.lower(): logger.info("annual detected.") annchk = "yes" issuenzb = myDB.action( "SELECT * from annuals WHERE IssueID=? AND ComicName NOT NULL", [issueid]).fetchone() else: issuenzb = myDB.action( "SELECT * from issues WHERE IssueID=? AND ComicName NOT NULL", [issueid]).fetchone() if issuenzb is not None: logger.info("issuenzb found.") if helpers.is_number(issueid): sandwich = int(issuenzb['IssueID']) else: logger.info("issuenzb not found.") #if it's non-numeric, it contains a 'G' at the beginning indicating it's a multi-volume #using GCD data. Set sandwich to 1 so it will bypass and continue post-processing. if 'S' in issueid: sandwich = issueid elif 'G' in issueid or '-' in issueid: sandwich = 1 if helpers.is_number(sandwich): if sandwich < 900000: # if sandwich is less than 900000 it's a normal watchlist download. Bypass. pass else: if issuenzb is None or 'S' in sandwich or int(