def searchforissue(issueid=None, new=False): myDB = db.DBConnection() if not issueid: myDB = db.DBConnection() results = myDB.select('SELECT * from issues WHERE Status="Wanted"') new = True for result in results: comic = myDB.action('SELECT * from comics WHERE ComicID=?', [result['ComicID']]).fetchone() foundNZB = "none" SeriesYear = comic['ComicYear'] if result['IssueDate'] == None: ComicYear = comic['ComicYear'] else: ComicYear = str(result['IssueDate'])[:4] if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL) and (mylar.SAB_HOST): foundNZB = search_init(result['ComicName'], result['Issue_Number'], str(ComicYear), comic['ComicYear']) if foundNZB == "yes": #print ("found!") updater.foundsearch(result['ComicID'], result['IssueID']) else: pass #print ("not found!") else: result = myDB.action('SELECT * FROM issues where IssueID=?', [issueid]).fetchone() ComicID = result['ComicID'] comic = myDB.action('SELECT * FROM comics where ComicID=?', [ComicID]).fetchone() SeriesYear = comic['ComicYear'] if result['IssueDate'] == None: IssueYear = comic['ComicYear'] else: IssueYear = str(result['IssueDate'])[:4] foundNZB = "none" if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL) and (mylar.SAB_HOST): foundNZB = search_init(result['ComicName'], result['Issue_Number'], str(IssueYear), comic['ComicYear']) if foundNZB == "yes": #print ("found!") updater.foundsearch(ComicID=result['ComicID'], IssueID=result['IssueID']) else: pass
def searchforissue(issueid=None, new=False): myDB = db.DBConnection() if not issueid: myDB = db.DBConnection() results = myDB.select('SELECT * from issues WHERE Status="Wanted"') new = True for result in results: comic = myDB.action('SELECT * from comics WHERE ComicID=?', [result['ComicID']]).fetchone() foundNZB = "none" SeriesYear = comic['ComicYear'] AlternateSearch = comic['AlternateSearch'] IssueDate = result['IssueDate'] UseFuzzy = comic['UseFuzzy'] if result['IssueDate'] == None: ComicYear = comic['ComicYear'] else: ComicYear = str(result['IssueDate'])[:4] if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL or mylar.NEWZNAB or mylar.NZBX) and (mylar.SAB_HOST): foundNZB = search_init(result['ComicName'], result['Issue_Number'], str(ComicYear), comic['ComicYear'], IssueDate, result['IssueID'], AlternateSearch, UseFuzzy) if foundNZB == "yes": #print ("found!") updater.foundsearch(result['ComicID'], result['IssueID']) else: pass #print ("not found!") else: result = myDB.action('SELECT * FROM issues where IssueID=?', [issueid]).fetchone() ComicID = result['ComicID'] comic = myDB.action('SELECT * FROM comics where ComicID=?', [ComicID]).fetchone() SeriesYear = comic['ComicYear'] AlternateSearch = comic['AlternateSearch'] IssueDate = result['IssueDate'] UseFuzzy = comic['UseFuzzy'] if result['IssueDate'] == None: IssueYear = comic['ComicYear'] else: IssueYear = str(result['IssueDate'])[:4] foundNZB = "none" if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL or mylar.NEWZNAB or mylar.NZBX) and (mylar.SAB_HOST): foundNZB = search_init(result['ComicName'], result['Issue_Number'], str(IssueYear), comic['ComicYear'], IssueDate, result['IssueID'], AlternateSearch, UseFuzzy) if foundNZB == "yes": #print ("found!") updater.foundsearch(ComicID=result['ComicID'], IssueID=result['IssueID']) else: pass
def searchforissue(issueid=None, new=False): myDB = db.DBConnection() if not issueid: myDB = db.DBConnection() results = myDB.select('SELECT * from issues WHERE Status="Wanted"') new = True for result in results: comic = myDB.action("SELECT * from comics WHERE ComicID=?", [result["ComicID"]]).fetchone() foundNZB = "none" SeriesYear = comic["ComicYear"] if result["IssueDate"] == None: ComicYear = comic["ComicYear"] else: ComicYear = str(result["IssueDate"])[:4] if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL) and (mylar.SAB_HOST): foundNZB = search_init(result["ComicName"], result["Issue_Number"], str(ComicYear), comic["ComicYear"]) if foundNZB == "yes": # print ("found!") updater.foundsearch(result["ComicID"], result["IssueID"]) else: pass # print ("not found!") else: result = myDB.action("SELECT * FROM issues where IssueID=?", [issueid]).fetchone() ComicID = result["ComicID"] comic = myDB.action("SELECT * FROM comics where ComicID=?", [ComicID]).fetchone() SeriesYear = comic["ComicYear"] if result["IssueDate"] == None: IssueYear = comic["ComicYear"] else: IssueYear = str(result["IssueDate"])[:4] foundNZB = "none" if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL) and (mylar.SAB_HOST): foundNZB = search_init(result["ComicName"], result["Issue_Number"], str(IssueYear), comic["ComicYear"]) if foundNZB == "yes": # print ("found!") updater.foundsearch(ComicID=result["ComicID"], IssueID=result["IssueID"]) else: pass
def searchIssueIDList(issuelist): myDB = db.DBConnection() for issueid in issuelist: issue = myDB.action('SELECT * from issues WHERE IssueID=?', [issueid]).fetchone() comic = myDB.action('SELECT * from comics WHERE ComicID=?', [issue['ComicID']]).fetchone() print ("Checking for issue: " + str(issue['Issue_Number'])) foundNZB = "none" SeriesYear = comic['ComicYear'] if issue['IssueDate'] == None: ComicYear = comic['ComicYear'] else: ComicYear = str(issue['IssueDate'])[:4] if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL) and (mylar.SAB_HOST): foundNZB = search_init(comic['ComicName'], issue['Issue_Number'], str(ComicYear), comic['ComicYear'], issue['IssueDate']) if foundNZB == "yes": #print ("found!") updater.foundsearch(ComicID=issue['ComicID'], IssueID=issue['IssueID']) else: pass
def queueissue(self, mode, ComicName=None, ComicID=None, ComicYear=None, ComicIssue=None, IssueID=None, new=False, redirect=None): now = datetime.datetime.now() myDB = db.DBConnection() #mode dictates type of queue - either 'want' for individual comics, or 'series' for series watchlist. if ComicID is None and mode == 'series': issue = None raise cherrypy.HTTPRedirect("searchit?name=%s&issue=%s&mode=%s" % (ComicName, 'None', 'series')) elif ComicID is None and mode == 'pullseries': # we can limit the search by including the issue # and searching for # comics that have X many issues raise cherrypy.HTTPRedirect("searchit?name=%s&issue=%s&mode=%s" % (ComicName, 'None', 'pullseries')) elif ComicID is None and mode == 'pullwant': #this is for marking individual comics from the pullist to be downloaded. #because ComicID and IssueID will both be None due to pullist, it's probably #better to set both to some generic #, and then filter out later... cyear = myDB.action("SELECT SHIPDATE FROM weekly").fetchone() ComicYear = str(cyear['SHIPDATE'])[:4] if ComicYear == '': ComicYear = now.year logger.info(u"Marking " + ComicName + " " + ComicIssue + " as wanted...") foundcom = search.search_init(ComicName=ComicName, IssueNumber=ComicIssue, ComicYear=ComicYear, SeriesYear=None, IssueDate=cyear['SHIPDATE'], IssueID=IssueID) if foundcom == "yes": logger.info(u"Downloaded " + ComicName + " " + ComicIssue ) return elif mode == 'want': cdname = myDB.action("SELECT ComicName from comics where ComicID=?", [ComicID]).fetchone() ComicName = cdname['ComicName'] logger.info(u"Marking " + ComicName + " issue: " + ComicIssue + " as wanted...") #--- #this should be on it's own somewhere if IssueID is not None: controlValueDict = {"IssueID": IssueID} newStatus = {"Status": "Wanted"} myDB.upsert("issues", newStatus, controlValueDict) #for future reference, the year should default to current year (.datetime) issues = myDB.action("SELECT IssueDate FROM issues WHERE IssueID=?", [IssueID]).fetchone() if ComicYear == None: ComicYear = str(issues['IssueDate'])[:4] miy = myDB.action("SELECT * FROM comics WHERE ComicID=?", [ComicID]).fetchone() SeriesYear = miy['ComicYear'] AlternateSearch = miy['AlternateSearch'] foundcom = search.search_init(ComicName, ComicIssue, ComicYear, SeriesYear, issues['IssueDate'], IssueID, AlternateSearch) if foundcom == "yes": # file check to see if issue exists and update 'have' count if IssueID is not None: return updater.foundsearch(ComicID, IssueID) if ComicID: raise cherrypy.HTTPRedirect("artistPage?ComicID=%s" % ComicID) else: raise cherrypy.HTTPRedirect(redirect)
def GCDimport(gcomicid, pullupd=None,imported=None,ogcname=None): # this is for importing via GCD only and not using CV. # used when volume spanning is discovered for a Comic (and can't be added using CV). # Issue Counts are wrong (and can't be added). # because Comicvine ComicID and GCD ComicID could be identical at some random point, let's distinguish. # CV = comicid, GCD = gcomicid :) (ie. CV=2740, GCD=G3719) gcdcomicid = gcomicid myDB = db.DBConnection() # We need the current minimal info in the database instantly # so we don't throw a 500 error when we redirect to the artistPage controlValueDict = {"ComicID": gcdcomicid} comic = myDB.action('SELECT ComicName, ComicYear, Total, ComicPublished, ComicImage, ComicLocation, ComicPublisher FROM comics WHERE ComicID=?', [gcomicid]).fetchone() ComicName = comic[0] ComicYear = comic[1] ComicIssues = comic[2] ComicPublished = comic[3] comlocation = comic[5] ComicPublisher = comic[6] #ComicImage = comic[4] #print ("Comic:" + str(ComicName)) newValueDict = {"Status": "Loading"} myDB.upsert("comics", newValueDict, controlValueDict) # we need to lookup the info for the requested ComicID in full now #comic = cv.getComic(comicid,'comic') if not comic: logger.warn("Error fetching comic. ID for : " + gcdcomicid) if dbcomic is None: newValueDict = {"ComicName": "Fetch failed, try refreshing. (%s)" % (gcdcomicid), "Status": "Active"} else: newValueDict = {"Status": "Active"} myDB.upsert("comics", newValueDict, controlValueDict) return #run the re-sortorder here in order to properly display the page if pullupd is None: helpers.ComicSort(comicorder=mylar.COMICSORT, imported=gcomicid) if ComicName.startswith('The '): sortname = ComicName[4:] else: sortname = ComicName logger.info(u"Now adding/updating: " + ComicName) #--Now that we know ComicName, let's try some scraping #--Start # gcd will return issue details (most importantly publishing date) comicid = gcomicid[1:] resultURL = "/series/" + str(comicid) + "/" gcdinfo=parseit.GCDdetails(comseries=None, resultURL=resultURL, vari_loop=0, ComicID=gcdcomicid, TotalIssues=ComicIssues, issvariation=None, resultPublished=None) if gcdinfo == "No Match": logger.warn("No matching result found for " + ComicName + " (" + ComicYear + ")" ) updater.no_searchresults(gcomicid) nomatch = "true" return nomatch logger.info(u"Sucessfully retrieved details for " + ComicName ) # print ("Series Published" + parseit.resultPublished) #--End ComicImage = gcdinfo['ComicImage'] #comic book location on machine # setup default location here if comlocation is None: # let's remove the non-standard characters here. u_comicnm = ComicName u_comicname = u_comicnm.encode('ascii', 'ignore').strip() if ':' in u_comicname or '/' in u_comicname or ',' in u_comicname: comicdir = u_comicname if ':' in comicdir: comicdir = comicdir.replace(':','') if '/' in comicdir: comicdir = comicdir.replace('/','-') if ',' in comicdir: comicdir = comicdir.replace(',','') else: comicdir = u_comicname series = comicdir publisher = ComicPublisher year = ComicYear #do work to generate folder path values = {'$Series': series, '$Publisher': publisher, '$Year': year, '$series': series.lower(), '$publisher': publisher.lower(), '$Volume': year } if mylar.FOLDER_FORMAT == '': comlocation = mylar.DESTINATION_DIR + "/" + comicdir + " (" + comic['ComicYear'] + ")" else: comlocation = mylar.DESTINATION_DIR + "/" + helpers.replace_all(mylar.FOLDER_FORMAT, values) #comlocation = mylar.DESTINATION_DIR + "/" + comicdir + " (" + ComicYear + ")" if mylar.DESTINATION_DIR == "": logger.error(u"There is no general directory specified - please specify in Config/Post-Processing.") return if mylar.REPLACE_SPACES: #mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot comlocation = comlocation.replace(' ', mylar.REPLACE_CHAR) #if it doesn't exist - create it (otherwise will bugger up later on) if os.path.isdir(str(comlocation)): logger.info(u"Directory (" + str(comlocation) + ") already exists! Continuing...") else: #print ("Directory doesn't exist!") #try: # os.makedirs(str(comlocation)) # logger.info(u"Directory successfully created at: " + str(comlocation)) #except OSError: # logger.error(u"Could not create comicdir : " + str(comlocation)) filechecker.validateAndCreateDirectory(comlocation, True) comicIssues = gcdinfo['totalissues'] #let's download the image... if os.path.exists(mylar.CACHE_DIR):pass else: #let's make the dir. try: os.makedirs(str(mylar.CACHE_DIR)) logger.info(u"Cache Directory successfully created at: " + str(mylar.CACHE_DIR)) except OSError: logger.error(u"Could not create cache dir : " + str(mylar.CACHE_DIR)) coverfile = os.path.join(mylar.CACHE_DIR, str(gcomicid) + ".jpg") #try: urllib.urlretrieve(str(ComicImage), str(coverfile)) try: with open(str(coverfile)) as f: ComicImage = os.path.join('cache',str(gcomicid) + ".jpg") #this is for Firefox when outside the LAN...it works, but I don't know how to implement it #without breaking the normal flow for inside the LAN (above) #ComicImage = "http://" + str(mylar.HTTP_HOST) + ":" + str(mylar.HTTP_PORT) + "/cache/" + str(comi$ logger.info(u"Sucessfully retrieved cover for " + ComicName) #if the comic cover local is checked, save a cover.jpg to the series folder. if mylar.COMIC_COVER_LOCAL: comiclocal = os.path.join(str(comlocation) + "/cover.jpg") shutil.copy(ComicImage,comiclocal) except IOError as e: logger.error(u"Unable to save cover locally at this time.") #if comic['ComicVersion'].isdigit(): # comicVol = "v" + comic['ComicVersion'] #else: # comicVol = None controlValueDict = {"ComicID": gcomicid} newValueDict = {"ComicName": ComicName, "ComicSortName": sortname, "ComicYear": ComicYear, "Total": comicIssues, "ComicLocation": comlocation, #"ComicVersion": comicVol, "ComicImage": ComicImage, #"ComicPublisher": comic['ComicPublisher'], #"ComicPublished": comicPublished, "DateAdded": helpers.today(), "Status": "Loading"} myDB.upsert("comics", newValueDict, controlValueDict) #comicsort here... #run the re-sortorder here in order to properly display the page if pullupd is None: helpers.ComicSort(sequence='update') logger.info(u"Sucessfully retrieved issue details for " + ComicName ) n = 0 iscnt = int(comicIssues) issnum = [] issname = [] issdate = [] int_issnum = [] #let's start issue #'s at 0 -- thanks to DC for the new 52 reboot! :) latestiss = "0" latestdate = "0000-00-00" #print ("total issues:" + str(iscnt)) #---removed NEW code here--- logger.info(u"Now adding/updating issues for " + ComicName) bb = 0 while (bb <= iscnt): #---NEW.code try: gcdval = gcdinfo['gcdchoice'][bb] #print ("gcdval: " + str(gcdval)) except IndexError: #account for gcd variation here if gcdinfo['gcdvariation'] == 'gcd': #print ("gcd-variation accounted for.") issdate = '0000-00-00' int_issnum = int ( issis / 1000 ) break if 'nn' in str(gcdval['GCDIssue']): #no number detected - GN, TP or the like logger.warn(u"Non Series detected (Graphic Novel, etc) - cannot proceed at this time.") updater.no_searchresults(comicid) return elif '.' in str(gcdval['GCDIssue']): issst = str(gcdval['GCDIssue']).find('.') issb4dec = str(gcdval['GCDIssue'])[:issst] #if the length of decimal is only 1 digit, assume it's a tenth decis = str(gcdval['GCDIssue'])[issst+1:] if len(decis) == 1: decisval = int(decis) * 10 issaftdec = str(decisval) if len(decis) == 2: decisval = int(decis) issaftdec = str(decisval) if int(issaftdec) == 0: issaftdec = "00" gcd_issue = issb4dec + "." + issaftdec gcdis = (int(issb4dec) * 1000) + decisval else: gcdis = int(str(gcdval['GCDIssue'])) * 1000 gcd_issue = str(gcdval['GCDIssue']) #get the latest issue / date using the date. int_issnum = int( gcdis / 1000 ) issdate = str(gcdval['GCDDate']) issid = "G" + str(gcdval['IssueID']) if gcdval['GCDDate'] > latestdate: latestiss = str(gcd_issue) latestdate = str(gcdval['GCDDate']) #print("(" + str(bb) + ") IssueID: " + str(issid) + " IssueNo: " + str(gcd_issue) + " Date" + str(issdate) ) #---END.NEW. # check if the issue already exists iss_exists = myDB.action('SELECT * from issues WHERE IssueID=?', [issid]).fetchone() # Only change the status & add DateAdded if the issue is not already in the database if iss_exists is None: newValueDict['DateAdded'] = helpers.today() #adjust for inconsistencies in GCD date format - some dates have ? which borks up things. if "?" in str(issdate): issdate = "0000-00-00" controlValueDict = {"IssueID": issid} newValueDict = {"ComicID": gcomicid, "ComicName": ComicName, "Issue_Number": gcd_issue, "IssueDate": issdate, "Int_IssueNumber": int_issnum } #print ("issueid:" + str(controlValueDict)) #print ("values:" + str(newValueDict)) if mylar.AUTOWANT_ALL: newValueDict['Status'] = "Wanted" elif issdate > helpers.today() and mylar.AUTOWANT_UPCOMING: newValueDict['Status'] = "Wanted" else: newValueDict['Status'] = "Skipped" if iss_exists: #print ("Existing status : " + str(iss_exists['Status'])) newValueDict['Status'] = iss_exists['Status'] myDB.upsert("issues", newValueDict, controlValueDict) bb+=1 # logger.debug(u"Updating comic cache for " + ComicName) # cache.getThumb(ComicID=issue['issueid']) # logger.debug(u"Updating cache for: " + ComicName) # cache.getThumb(ComicIDcomicid) controlValueStat = {"ComicID": gcomicid} newValueStat = {"Status": "Active", "LatestIssue": latestiss, "LatestDate": latestdate, "LastUpdated": helpers.now() } myDB.upsert("comics", newValueStat, controlValueStat) if mylar.CVINFO: if not os.path.exists(comlocation + "/cvinfo"): with open(comlocation + "/cvinfo","w") as text_file: text_file.write("http://www.comicvine.com/volume/49-" + str(comicid)) logger.info(u"Updating complete for: " + ComicName) #move the files...if imported is not empty (meaning it's not from the mass importer.) if imported is None or imported == 'None': pass else: if mylar.IMP_MOVE: logger.info("Mass import - Move files") moveit.movefiles(gcomicid,comlocation,ogcname) else: logger.info("Mass import - Moving not Enabled. Setting Archived Status for import.") moveit.archivefiles(gcomicid,ogcname) #check for existing files... updater.forceRescan(gcomicid) if pullupd is None: # lets' check the pullist for anyting at this time as well since we're here. if mylar.AUTOWANT_UPCOMING and 'Present' in ComicPublished: logger.info(u"Checking this week's pullist for new issues of " + ComicName) updater.newpullcheck(comic['ComicName'], gcomicid) #here we grab issues that have been marked as wanted above... results = myDB.select("SELECT * FROM issues where ComicID=? AND Status='Wanted'", [gcomicid]) if results: logger.info(u"Attempting to grab wanted issues for : " + ComicName) for result in results: foundNZB = "none" if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL or mylar.NEWZNAB or mylar.NZBX) and (mylar.SAB_HOST): foundNZB = search.searchforissue(result['IssueID']) if foundNZB == "yes": updater.foundsearch(result['ComicID'], result['IssueID']) else: logger.info(u"No issues marked as wanted for " + ComicName) logger.info(u"Finished grabbing what I could.")
def addComictoDB(comicid): # Putting this here to get around the circular import. Will try to use this to update images at later date. from mylar import cache myDB = db.DBConnection() # myDB.action('DELETE from blacklist WHERE ComicID=?', [comicid]) # We need the current minimal info in the database instantly # so we don't throw a 500 error when we redirect to the artistPage controlValueDict = {"ComicID": comicid} dbcomic = myDB.action('SELECT * FROM comics WHERE ComicID=?', [comicid]).fetchone() if dbcomic is None: newValueDict = {"ComicName": "Comic ID: %s" % (comicid), "Status": "Loading"} else: newValueDict = {"Status": "Loading"} myDB.upsert("comics", newValueDict, controlValueDict) # we need to lookup the info for the requested ComicID in full now comic = cv.getComic(comicid,'comic') if not comic: logger.warn("Error fetching comic. ID for : " + comicid) if dbcomic is None: newValueDict = {"ComicName": "Fetch failed, try refreshing. (%s)" % (comicid), "Status": "Active"} else: newValueDict = {"Status": "Active"} myDB.upsert("comics", newValueDict, controlValueDict) return if comic['ComicName'].startswith('The '): sortname = comic['ComicName'][4:] else: sortname = comic['ComicName'] logger.info(u"Now adding/updating: " + comic['ComicName']) #--Now that we know ComicName, let's try some scraping #--Start # gcd will return issue details (most importantly publishing date) gcdinfo=parseit.GCDScraper(comic['ComicName'], comic['ComicYear'], comic['ComicIssues'], comicid) if gcdinfo == "No Match": logger.warn("No matching result found for " + comic['ComicName'] + " (" + comic['ComicYear'] + ")" ) updater.no_searchresults(comicid) return logger.info(u"Sucessfully retrieved details for " + comic['ComicName'] ) # print ("Series Published" + parseit.resultPublished) #--End #comic book location on machine # setup default location here if ':' in comic['ComicName']: comicdir = comic['ComicName'].replace(':','') else: comicdir = comic['ComicName'] comlocation = mylar.DESTINATION_DIR + "/" + comicdir + " (" + comic['ComicYear'] + ")" if mylar.DESTINATION_DIR == "": logger.error(u"There is no general directory specified - please specify in Config/Post-Processing.") return if mylar.REPLACE_SPACES: #mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot comlocation = comlocation.replace(' ', mylar.REPLACE_CHAR) #if it doesn't exist - create it (otherwise will bugger up later on) if os.path.isdir(str(comlocation)): logger.info(u"Directory (" + str(comlocation) + ") already exists! Continuing...") else: #print ("Directory doesn't exist!") try: os.makedirs(str(comlocation)) logger.info(u"Directory successfully created at: " + str(comlocation)) except OSError.e: if e.errno != errno.EEXIST: raise #print ("root dir for series: " + comlocation) #try to account for CV not updating new issues as fast as GCD #seems CV doesn't update total counts #comicIssues = gcdinfo['totalissues'] if gcdinfo['gcdvariation'] == "cv": comicIssues = str(int(comic['ComicIssues']) + 1) else: comicIssues = comic['ComicIssues'] controlValueDict = {"ComicID": comicid} newValueDict = {"ComicName": comic['ComicName'], "ComicSortName": sortname, "ComicYear": comic['ComicYear'], "ComicImage": comic['ComicImage'], "Total": comicIssues, "ComicLocation": comlocation, "ComicPublisher": comic['ComicPublisher'], "ComicPublished": parseit.resultPublished, "DateAdded": helpers.today(), "Status": "Loading"} myDB.upsert("comics", newValueDict, controlValueDict) issued = cv.getComic(comicid,'issue') logger.info(u"Sucessfully retrieved issue details for " + comic['ComicName'] ) n = 0 iscnt = int(comicIssues) issid = [] issnum = [] issname = [] issdate = [] int_issnum = [] #let's start issue #'s at 0 -- thanks to DC for the new 52 reboot! :) latestiss = "0" latestdate = "0000-00-00" #print ("total issues:" + str(iscnt)) #---removed NEW code here--- logger.info(u"Now adding/updating issues for" + comic['ComicName']) # file check to see if issue exists logger.info(u"Checking directory for existing issues.") #fc = filechecker.listFiles(dir=comlocation, watchcomic=comic['ComicName']) #havefiles = 0 #fccnt = int(fc['comiccount']) #logger.info(u"Found " + str(fccnt) + "/" + str(iscnt) + " issues of " + comic['ComicName'] + "...verifying") #fcnew = [] while (n <= iscnt): #---NEW.code try: firstval = issued['issuechoice'][n] except IndexError: break cleanname = helpers.cleanName(firstval['Issue_Name']) issid = str(firstval['Issue_ID']) issnum = str(firstval['Issue_Number']) issname = cleanname if '.' in str(issnum): issn_st = str(issnum).find('.') issn_b4dec = str(issnum)[:issn_st] #if the length of decimal is only 1 digit, assume it's a tenth dec_is = str(issnum)[issn_st + 1:] if len(dec_is) == 1: dec_nisval = int(dec_is) * 10 iss_naftdec = str(dec_nisval) if len(dec_is) == 2: dec_nisval = int(dec_is) iss_naftdec = str(dec_nisval) iss_issue = issn_b4dec + "." + iss_naftdec issis = (int(issn_b4dec) * 1000) + dec_nisval else: issis = int(issnum) * 1000 bb = 0 while (bb <= iscnt): try: gcdval = gcdinfo['gcdchoice'][bb] except IndexError: #account for gcd variation here if gcdinfo['gcdvariation'] == 'gcd': print ("gcd-variation accounted for.") issdate = '0000-00-00' int_issnum = int ( issis / 1000 ) break if 'nn' in str(gcdval['GCDIssue']): #no number detected - GN, TP or the like logger.warn(u"Non Series detected (Graphic Novel, etc) - cannot proceed at this time.") updater.no_searchresults(comicid) return elif '.' in str(gcdval['GCDIssue']): issst = str(gcdval['GCDIssue']).find('.') issb4dec = str(gcdval['GCDIssue'])[:issst] #if the length of decimal is only 1 digit, assume it's a tenth decis = str(gcdval['GCDIssue'])[issst+1:] if len(decis) == 1: decisval = int(decis) * 10 issaftdec = str(decisval) if len(decis) == 2: decisval = int(decis) issaftdec = str(decisval) gcd_issue = issb4dec + "." + issaftdec gcdis = (int(issb4dec) * 1000) + decisval else: gcdis = int(str(gcdval['GCDIssue'])) * 1000 if gcdis == issis: issdate = str(gcdval['GCDDate']) int_issnum = int( gcdis / 1000 ) #get the latest issue / date using the date. if gcdval['GCDDate'] > latestdate: latestiss = str(issnum) latestdate = str(gcdval['GCDDate']) break #bb = iscnt bb+=1 #print("(" + str(n) + ") IssueID: " + str(issid) + " IssueNo: " + str(issnum) + " Date" + str(issdate)) #---END.NEW. # check if the issue already exists iss_exists = myDB.select('SELECT * from issues WHERE IssueID=?', [issid]) # Only change the status & add DateAdded if the issue is not already in the database if not len(iss_exists): newValueDict['DateAdded'] = helpers.today() controlValueDict = {"IssueID": issid} newValueDict = {"ComicID": comicid, "ComicName": comic['ComicName'], "IssueName": issname, "Issue_Number": issnum, "IssueDate": issdate, "Int_IssueNumber": int_issnum } if mylar.AUTOWANT_ALL: newValueDict['Status'] = "Wanted" #elif release_dict['releasedate'] > helpers.today() and mylar.AUTOWANT_UPCOMING: # newValueDict['Status'] = "Wanted" else: newValueDict['Status'] = "Skipped" myDB.upsert("issues", newValueDict, controlValueDict) n+=1 # logger.debug(u"Updating comic cache for " + comic['ComicName']) # cache.getThumb(ComicID=issue['issueid']) # logger.debug(u"Updating cache for: " + comic['ComicName']) # cache.getThumb(ComicIDcomicid) #check for existing files... updater.forceRescan(comicid) controlValueStat = {"ComicID": comicid} newValueStat = {"Status": "Active", "LatestIssue": latestiss, "LatestDate": latestdate } myDB.upsert("comics", newValueStat, controlValueStat) logger.info(u"Updating complete for: " + comic['ComicName']) #here we grab issues that have been marked as wanted above... results = myDB.select("SELECT * FROM issues where ComicID=? AND Status='Wanted'", [comicid]) if results: logger.info(u"Attempting to grab wanted issues for : " + comic['ComicName']) for result in results: foundNZB = "none" if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL) and (mylar.SAB_HOST): foundNZB = search.searchforissue(result['IssueID']) if foundNZB == "yes": updater.foundsearch(result['ComicID'], result['IssueID']) else: logger.info(u"No issues marked as wanted for " + comic['ComicName']) logger.info(u"Finished grabbing what I could.")
def Process_next(self,comicid,issueid,issuenumOG,ml=None): annchk = "no" extensions = ('.cbr', '.cbz') snatchedtorrent = False myDB = db.DBConnection() comicnzb = myDB.selectone("SELECT * from comics WHERE comicid=?", [comicid]).fetchone() issuenzb = myDB.selectone("SELECT * from issues WHERE issueid=? AND comicid=? AND ComicName NOT NULL", [issueid,comicid]).fetchone() if ml is not None and mylar.SNATCHEDTORRENT_NOTIFY: snatchnzb = myDB.selectone("SELECT * from snatched WHERE IssueID=? AND ComicID=? AND (provider=? OR provider=?) AND Status='Snatched'", [issueid,comicid,'KAT','CBT']).fetchone() if snatchnzb is None: logger.fdebug('Was not downloaded with Mylar and the usage of torrents. Disabling torrent manual post-processing completion notification.') else: logger.fdebug('Was downloaded from ' + snatchnzb['Provider'] + '. Enabling torrent manual post-processing completion notification.') snatchedtorrent = True logger.fdebug('issueid: ' + str(issueid)) logger.fdebug('issuenumOG: ' + str(issuenumOG)) if issuenzb is None: issuenzb = myDB.selectone("SELECT * from annuals WHERE issueid=? and comicid=?", [issueid,comicid]).fetchone() annchk = "yes" #issueno = str(issuenum).split('.')[0] #new CV API - removed all decimals...here we go AGAIN! issuenum = issuenzb['Issue_Number'] issue_except = 'None' if 'au' in issuenum.lower() and issuenum[:1].isdigit(): issuenum = re.sub("[^0-9]", "", issuenum) issue_except = ' AU' elif 'ai' in issuenum.lower() and issuenum[:1].isdigit(): issuenum = re.sub("[^0-9]", "", issuenum) issue_except = ' AI' elif 'inh' in issuenum.lower() and issuenum[:1].isdigit(): issuenum = re.sub("[^0-9]", "", issuenum) issue_except = '.INH' elif 'now' in issuenum.lower() and issuenum[:1].isdigit(): if '!' in issuenum: issuenum = re.sub('\!', '', issuenum) issuenum = re.sub("[^0-9]", "", issuenum) issue_except = '.NOW' if '.' in issuenum: iss_find = issuenum.find('.') iss_b4dec = issuenum[:iss_find] iss_decval = issuenum[iss_find+1:] if int(iss_decval) == 0: iss = iss_b4dec issdec = int(iss_decval) issueno = str(iss) self._log("Issue Number: " + str(issueno)) logger.fdebug("Issue Number: " + str(issueno)) else: if len(iss_decval) == 1: iss = iss_b4dec + "." + iss_decval issdec = int(iss_decval) * 10 else: iss = iss_b4dec + "." + iss_decval.rstrip('0') issdec = int(iss_decval.rstrip('0')) * 10 issueno = iss_b4dec self._log("Issue Number: " + str(iss)) logger.fdebug("Issue Number: " + str(iss)) else: iss = issuenum issueno = str(iss) # issue zero-suppression here if mylar.ZERO_LEVEL == "0": zeroadd = "" else: if mylar.ZERO_LEVEL_N == "none": zeroadd = "" elif mylar.ZERO_LEVEL_N == "0x": zeroadd = "0" elif mylar.ZERO_LEVEL_N == "00x": zeroadd = "00" logger.fdebug("Zero Suppression set to : " + str(mylar.ZERO_LEVEL_N)) if str(len(issueno)) > 1: if int(issueno) < 0: self._log("issue detected is a negative") prettycomiss = '-' + str(zeroadd) + str(abs(issueno)) elif int(issueno) < 10: self._log("issue detected less than 10") if '.' in iss: if int(iss_decval) > 0: issueno = str(iss) prettycomiss = str(zeroadd) + str(iss) else: prettycomiss = str(zeroadd) + str(int(issueno)) else: prettycomiss = str(zeroadd) + str(iss) if issue_except != 'None': prettycomiss = str(prettycomiss) + issue_except self._log("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ". Issue will be set as : " + str(prettycomiss)) elif int(issueno) >= 10 and int(issueno) < 100: self._log("issue detected greater than 10, but less than 100") if mylar.ZERO_LEVEL_N == "none": zeroadd = "" else: zeroadd = "0" if '.' in iss: if int(iss_decval) > 0: issueno = str(iss) prettycomiss = str(zeroadd) + str(iss) else: prettycomiss = str(zeroadd) + str(int(issueno)) else: prettycomiss = str(zeroadd) + str(iss) if issue_except != 'None': prettycomiss = str(prettycomiss) + issue_except self._log("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ".Issue will be set as : " + str(prettycomiss)) else: self._log("issue detected greater than 100") if '.' in iss: if int(iss_decval) > 0: issueno = str(iss) prettycomiss = str(issueno) if issue_except != 'None': prettycomiss = str(prettycomiss) + issue_except self._log("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ". Issue will be set as : " + str(prettycomiss)) else: prettycomiss = str(issueno) self._log("issue length error - cannot determine length. Defaulting to None: " + str(prettycomiss)) if annchk == "yes": self._log("Annual detected.") logger.fdebug("Pretty Comic Issue is : " + str(prettycomiss)) issueyear = issuenzb['IssueDate'][:4] self._log("Issue Year: " + str(issueyear)) logger.fdebug("Issue Year : " + str(issueyear)) month = issuenzb['IssueDate'][5:7].replace('-','').strip() month_name = helpers.fullmonth(month) # comicnzb= myDB.action("SELECT * from comics WHERE comicid=?", [comicid]).fetchone() publisher = comicnzb['ComicPublisher'] self._log("Publisher: " + publisher) logger.fdebug("Publisher: " + str(publisher)) #we need to un-unicode this to make sure we can write the filenames properly for spec.chars series = comicnzb['ComicName'].encode('ascii', 'ignore').strip() self._log("Series: " + series) logger.fdebug("Series: " + str(series)) seriesyear = comicnzb['ComicYear'] self._log("Year: " + seriesyear) logger.fdebug("Year: " + str(seriesyear)) comlocation = comicnzb['ComicLocation'] self._log("Comic Location: " + comlocation) logger.fdebug("Comic Location: " + str(comlocation)) comversion = comicnzb['ComicVersion'] self._log("Comic Version: " + str(comversion)) logger.fdebug("Comic Version: " + str(comversion)) if comversion is None: comversion = 'None' #if comversion is None, remove it so it doesn't populate with 'None' if comversion == 'None': chunk_f_f = re.sub('\$VolumeN','',mylar.FILE_FORMAT) chunk_f = re.compile(r'\s+') chunk_file_format = chunk_f.sub(' ', chunk_f_f) self._log("No version # found for series - tag will not be available for renaming.") logger.fdebug("No version # found for series, removing from filename") logger.fdebug("new format is now: " + str(chunk_file_format)) else: chunk_file_format = mylar.FILE_FORMAT if annchk == "no": chunk_f_f = re.sub('\$Annual','',chunk_file_format) chunk_f = re.compile(r'\s+') chunk_file_format = chunk_f.sub(' ', chunk_f_f) logger.fdebug('not an annual - removing from filename paramaters') logger.fdebug('new format: ' + str(chunk_file_format)) else: logger.fdebug('chunk_file_format is: ' + str(chunk_file_format)) if '$Annual' not in chunk_file_format: #if it's an annual, but $Annual isn't specified in file_format, we need to #force it in there, by default in the format of $Annual $Issue prettycomiss = "Annual " + str(prettycomiss) logger.fdebug('prettycomiss: ' + str(prettycomiss)) ofilename = None #if meta-tagging is not enabled, we need to declare the check as being fail #if meta-tagging is enabled, it gets changed just below to a default of pass pcheck = "fail" #tag the meta. if mylar.ENABLE_META: self._log("Metatagging enabled - proceeding...") logger.fdebug("Metatagging enabled - proceeding...") pcheck = "pass" try: import cmtagmylar if ml is None: pcheck = cmtagmylar.run(self.nzb_folder, issueid=issueid) else: pcheck = cmtagmylar.run(self.nzb_folder, issueid=issueid, manual="yes", filename=ml['ComicLocation']) except ImportError: logger.fdebug("comictaggerlib not found on system. Ensure the ENTIRE lib directory is located within mylar/lib/comictaggerlib/") logger.fdebug("continuing with PostProcessing, but I'm not using metadata.") pcheck = "fail" if pcheck == "fail": self._log("Unable to write metadata successfully - check mylar.log file. Attempting to continue without tagging...") logger.fdebug("Unable to write metadata successfully - check mylar.log file. Attempting to continue without tagging...") elif pcheck == "unrar error": self._log("This is a corrupt archive - whether CRC errors or it's incomplete. Marking as BAD, and retrying a different copy.") logger.error("This is a corrupt archive - whether CRC errors or it's incomplete. Marking as BAD, and retrying a different copy.") return self.log else: otofilename = pcheck self._log("Sucessfully wrote metadata to .cbz - Continuing..") logger.fdebug("Sucessfully wrote metadata to .cbz (" + str(otofilename) + ") - Continuing..") #Run Pre-script if mylar.ENABLE_PRE_SCRIPTS: nzbn = self.nzb_name #original nzb name nzbf = self.nzb_folder #original nzb folder #name, comicyear, comicid , issueid, issueyear, issue, publisher #create the dic and send it. seriesmeta = [] seriesmetadata = {} seriesmeta.append({ 'name': series, 'comicyear': seriesyear, 'comicid': comicid, 'issueid': issueid, 'issueyear': issueyear, 'issue': issuenum, 'publisher': publisher }) seriesmetadata['seriesmeta'] = seriesmeta self._run_pre_scripts(nzbn, nzbf, seriesmetadata ) #rename file and move to new path #nfilename = series + " " + issueno + " (" + seriesyear + ")" file_values = {'$Series': series, '$Issue': prettycomiss, '$Year': issueyear, '$series': series.lower(), '$Publisher': publisher, '$publisher': publisher.lower(), '$VolumeY': 'V' + str(seriesyear), '$VolumeN': comversion, '$monthname': month_name, '$month': month, '$Annual': 'Annual' } #if it's a Manual Run, use the ml['ComicLocation'] for the exact filename. if ml is None: for root, dirnames, filenames in os.walk(self.nzb_folder): for filename in filenames: if filename.lower().endswith(extensions): odir = root ofilename = filename path, ext = os.path.splitext(ofilename) if odir is None: logger.fdebug('no root folder set.') odir = self.nzb_folder logger.fdebug('odir: ' + str(odir)) logger.fdebug('ofilename: ' + str(ofilename)) else: if pcheck == "fail": otofilename = ml['ComicLocation'] logger.fdebug('otofilename:' + str(otofilename)) odir, ofilename = os.path.split(otofilename) logger.fdebug('odir: ' + str(odir)) logger.fdebug('ofilename: ' + str(ofilename)) path, ext = os.path.splitext(ofilename) logger.fdebug('path: ' + str(path)) logger.fdebug('ext:' + str(ext)) if ofilename is None: logger.error(u"Aborting PostProcessing - the filename doesn't exist in the location given. Make sure that " + str(self.nzb_folder) + " exists and is the correct location.") return self._log("Original Filename: " + ofilename) self._log("Original Extension: " + ext) logger.fdebug("Original Filname: " + str(ofilename)) logger.fdebug("Original Extension: " + str(ext)) if mylar.FILE_FORMAT == '' or not mylar.RENAME_FILES: self._log("Rename Files isn't enabled...keeping original filename.") logger.fdebug("Rename Files isn't enabled - keeping original filename.") #check if extension is in nzb_name - will screw up otherwise if ofilename.lower().endswith(extensions): nfilename = ofilename[:-4] else: nfilename = ofilename else: nfilename = helpers.replace_all(chunk_file_format, file_values) if mylar.REPLACE_SPACES: #mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot nfilename = nfilename.replace(' ', mylar.REPLACE_CHAR) nfilename = re.sub('[\,\:\?]', '', nfilename) nfilename = re.sub('[\/]', '-', nfilename) self._log("New Filename: " + nfilename) logger.fdebug("New Filename: " + str(nfilename)) #src = os.path.join(self.nzb_folder, ofilename) src = os.path.join(odir, ofilename) filechecker.validateAndCreateDirectory(comlocation, True) if mylar.LOWERCASE_FILENAMES: dst = (comlocation + "/" + nfilename + ext).lower() else: dst = comlocation + "/" + nfilename + ext.lower() self._log("Source:" + src) self._log("Destination:" + dst) logger.fdebug("Source: " + str(src)) logger.fdebug("Destination: " + str(dst)) if ml is None: #downtype = for use with updater on history table to set status to 'Downloaded' downtype = 'True' #non-manual run moving/deleting... logger.fdebug('self.nzb_folder: ' + self.nzb_folder) logger.fdebug('odir: ' + str(odir)) logger.fdebug('ofilename:' + str(ofilename)) logger.fdebug('nfilename:' + str(nfilename + ext)) if mylar.RENAME_FILES: if str(ofilename) != str(nfilename + ext): logger.fdebug("Renaming " + os.path.join(odir, str(ofilename)) + " ..to.. " + os.path.join(odir,str(nfilename + ext))) os.rename(os.path.join(odir, str(ofilename)), os.path.join(odir,str(nfilename + ext))) else: logger.fdebug('filename is identical as original, not renaming.') #src = os.path.join(self.nzb_folder, str(nfilename + ext)) src = os.path.join(odir, str(nfilename + ext)) try: shutil.move(src, dst) except (OSError, IOError): self._log("Failed to move directory - check directories and manually re-run.") self._log("Post-Processing ABORTED.") return #tidyup old path try: shutil.rmtree(self.nzb_folder) except (OSError, IOError): self._log("Failed to remove temporary directory - check directory and manually re-run.") self._log("Post-Processing ABORTED.") return self._log("Removed temporary directory : " + str(self.nzb_folder)) else: #downtype = for use with updater on history table to set status to 'Post-Processed' downtype = 'PP' #Manual Run, this is the portion. if mylar.RENAME_FILES: if str(ofilename) != str(nfilename + ext): logger.fdebug("Renaming " + os.path.join(self.nzb_folder, str(ofilename)) + " ..to.. " + os.path.join(self.nzb_folder,str(nfilename + ext))) os.rename(os.path.join(odir, str(ofilename)), os.path.join(odir ,str(nfilename + ext))) else: logger.fdebug('filename is identical as original, not renaming.') src = os.path.join(odir, str(nfilename + ext)) logger.fdebug('odir rename: ' + os.path.join(odir, str(ofilename)) + ' TO ' + os.path.join(odir, str(nfilename + ext))) logger.fdebug('odir src : ' + os.path.join(odir, str(nfilename + ext))) logger.fdebug("Moving " + src + " ... to ... " + dst) try: shutil.move(src, dst) except (OSError, IOError): logger.fdebug("Failed to move directory - check directories and manually re-run.") logger.fdebug("Post-Processing ABORTED.") return logger.fdebug("Successfully moved to : " + dst) #tidyup old path #try: # os.remove(os.path.join(self.nzb_folder, str(ofilename))) # logger.fdebug("Deleting : " + os.path.join(self.nzb_folder, str(ofilename))) #except (OSError, IOError): # logger.fdebug("Failed to remove temporary directory - check directory and manually re-run.") # logger.fdebug("Post-Processing ABORTED.") # return #logger.fdebug("Removed temporary directory : " + str(self.nzb_folder)) #Hopefully set permissions on downloaded file try: permission = int(mylar.CHMOD_FILE, 8) os.umask(0) os.chmod(dst.rstrip(), permission) except OSError: logger.error('Failed to change file permissions. Ensure that the user running Mylar has proper permissions to change permissions in : ' + dst) logger.fdebug('Continuing post-processing but unable to change file permissions in ' + dst) #delete entry from nzblog table myDB.action('DELETE from nzblog WHERE issueid=?', [issueid]) #update snatched table to change status to Downloaded if annchk == "no": updater.foundsearch(comicid, issueid, down=downtype) dispiss = 'issue: ' + str(issuenumOG) else: updater.foundsearch(comicid, issueid, mode='want_ann', down=downtype) dispiss = 'annual issue: ' + str(issuenumOG) #force rescan of files updater.forceRescan(comicid) logger.info(u"Post-Processing completed for: " + series + " " + dispiss ) self._log(u"Post Processing SUCCESSFUL! ") if mylar.WEEKFOLDER: #if enabled, will *copy* the post-processed file to the weeklypull list folder for the given week. weeklypull.weekly_singlecopy(comicid,issuenum,str(nfilename+ext),dst) # retrieve/create the corresponding comic objects if mylar.ENABLE_EXTRA_SCRIPTS: folderp = str(dst) #folder location after move/rename nzbn = self.nzb_name #original nzb name filen = str(nfilename + ext) #new filename #name, comicyear, comicid , issueid, issueyear, issue, publisher #create the dic and send it. seriesmeta = [] seriesmetadata = {} seriesmeta.append({ 'name': series, 'comicyear': seriesyear, 'comicid': comicid, 'issueid': issueid, 'issueyear': issueyear, 'issue': issuenum, 'publisher': publisher }) seriesmetadata['seriesmeta'] = seriesmeta self._run_extra_scripts(nzbn, self.nzb_folder, filen, folderp, seriesmetadata ) if ml is not None: #we only need to return self.log if it's a manual run and it's not a snatched torrent if snatchedtorrent: #manual run + snatched torrent pass else: #manual run + not snatched torrent (or normal manual-run) return self.log if annchk == "no": prline = series + '(' + issueyear + ') - issue #' + issuenumOG else: prline = series + ' Annual (' + issueyear + ') - issue #' + issuenumOG prline2 = 'Mylar has downloaded and post-processed: ' + prline if mylar.PROWL_ENABLED: pushmessage = prline logger.info(u"Prowl request") prowl = notifiers.PROWL() prowl.notify(pushmessage,"Download and Postprocessing completed") if mylar.NMA_ENABLED: nma = notifiers.NMA() nma.notify(prline=prline, prline2=prline2) if mylar.PUSHOVER_ENABLED: logger.info(u"Pushover request") pushover = notifiers.PUSHOVER() pushover.notify(prline, "Download and Post-Processing completed") if mylar.BOXCAR_ENABLED: boxcar = notifiers.BOXCAR() boxcar.notify(prline=prline, prline2=prline2) if mylar.PUSHBULLET_ENABLED: pushbullet = notifiers.PUSHBULLET() pushbullet.notify(prline=prline, prline2=prline2) return self.log
def addComictoDB(comicid): # Putting this here to get around the circular import. Will try to use this to update images at later date. from mylar import cache myDB = db.DBConnection() # myDB.action('DELETE from blacklist WHERE ComicID=?', [comicid]) # We need the current minimal info in the database instantly # so we don't throw a 500 error when we redirect to the artistPage controlValueDict = {"ComicID": comicid} dbcomic = myDB.action('SELECT * FROM comics WHERE ComicID=?', [comicid]).fetchone() if dbcomic is None: newValueDict = {"ComicName": "Comic ID: %s" % (comicid), "Status": "Loading"} else: newValueDict = {"Status": "Loading"} myDB.upsert("comics", newValueDict, controlValueDict) # we need to lookup the info for the requested ComicID in full now comic = cv.getComic(comicid,'comic') if not comic: logger.warn("Error fetching comic. ID for : " + comicid) if dbcomic is None: newValueDict = {"ComicName": "Fetch failed, try refreshing. (%s)" % (comicid), "Status": "Active"} else: newValueDict = {"Status": "Active"} myDB.upsert("comics", newValueDict, controlValueDict) return if comic['ComicName'].startswith('The '): sortname = comic['ComicName'][4:] else: sortname = comic['ComicName'] logger.info(u"Now adding/updating: " + comic['ComicName']) #--Now that we know ComicName, let's try some scraping #--Start # gcd will return issue details (most importantly publishing date) gcdinfo=parseit.GCDScraper(comic['ComicName'], comic['ComicYear'], comic['ComicIssues'], comicid) if gcdinfo == "No Match": logger.warn("No matching result found for " + comic['ComicName'] + " (" + comic['ComicYear'] + ")" ) return logger.info(u"Sucessfully retrieved details for " + comic['ComicName'] ) # print ("Series Published" + parseit.resultPublished) #--End #comic book location on machine # setup default location here comlocation = mylar.DESTINATION_DIR + "/" + comic['ComicName'] + " (" + comic['ComicYear'] + ")" #if mylar.REPLACE_SPACES == "yes": #mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot mylarREPLACE_CHAR = '_' comlocation = comlocation.replace(' ', mylarREPLACE_CHAR) #if it doesn't exist - create it (otherwise will bugger up later on) if os.path.isdir(str(comlocation)): logger.info(u"Directory (" + str(comlocation) + ") already exists! Continuing...") else: #print ("Directory doesn't exist!") try: os.makedirs(str(comlocation)) logger.info(u"Directory successfully created at: " + str(comlocation)) except OSError.e: if e.errno != errno.EEXIST: raise #print ("root dir for series: " + comlocation) #try to account for CV not updating new issues as fast as GCD if gcdinfo['gcdvariation'] == "yes": comicIssues = str(int(comic['ComicIssues']) + 1) else: comicIssues = comic['ComicIssues'] controlValueDict = {"ComicID": comicid} newValueDict = {"ComicName": comic['ComicName'], "ComicSortName": sortname, "ComicYear": comic['ComicYear'], "ComicImage": comic['ComicImage'], "Total": comicIssues, "Description": comic['ComicDesc'], "ComicLocation": comlocation, "ComicPublisher": comic['ComicPublisher'], "ComicPublished": parseit.resultPublished, "DateAdded": helpers.today(), "Status": "Loading"} myDB.upsert("comics", newValueDict, controlValueDict) issued = cv.getComic(comicid,'issue') logger.info(u"Sucessfully retrieved issue details for " + comic['ComicName'] ) n = 0 iscnt = int(comicIssues) issid = [] issnum = [] issname = [] issdate = [] int_issnum = [] #let's start issue #'s at 0 -- thanks to DC for the new 52 reboot! :) latestiss = "0" latestdate = "0000-00-00" while (n < iscnt): firstval = issued['issuechoice'][n] cleanname = helpers.cleanName(firstval['Issue_Name']) issid.append( str(firstval['Issue_ID']) ) issnum.append( str(firstval['Issue_Number']) ) issname.append(cleanname) bb = 0 while (bb < iscnt): gcdval = gcdinfo['gcdchoice'][bb] #print ("issuecompare: " + str(issnum[n])) #print ("issuecheck: " + str(gcdval['GCDIssue']) ) if str(gcdval['GCDIssue']) == str(issnum[n]): issdate.append( str(gcdval['GCDDate']) ) issnumchg = issnum[n].replace(".00", "") #print ("issnumchg" + str(issnumchg) + "...latestiss:" + str(latestiss)) int_issnum.append(int(issnumchg)) #get the latest issue / date using the date. if gcdval['GCDDate'] > latestdate: latestiss = str(issnumchg) latestdate = str(gcdval['GCDDate']) bb = iscnt bb+=1 #logger.info(u"IssueID: " + str(issid[n]) + " IssueNo: " + str(issnum[n]) + " Date" + str(issdate[n]) ) n+=1 latestiss = latestiss + ".00" #once again - thanks to the new 52 reboot...start n at 0. n = 0 logger.info(u"Now adding/updating issues for" + comic['ComicName']) # file check to see if issue exists logger.info(u"Checking directory for existing issues.") fc = filechecker.listFiles(dir=comlocation, watchcomic=comic['ComicName']) havefiles = 0 fccnt = int(fc['comiccount']) logger.info(u"Found " + str(fccnt) + " issues of " + comic['ComicName']) fcnew = [] while (n < iscnt): fn = 0 haveissue = "no" #print ("on issue " + str(int(n+1)) + " of " + str(iscnt) + " issues") # check if the issue already exists iss_exists = myDB.select('SELECT * from issues WHERE IssueID=?', [issid[n]]) #print ("checking issue: " + str(int_issnum[n])) # stupid way to do this, but check each issue against file-list in fc. while (fn < fccnt): tmpfc = fc['comiclist'][fn] #print (str(int_issnum[n]) + " against ... " + str(tmpfc['ComicFilename'])) temploc = tmpfc['ComicFilename'].replace('_', ' ') fcnew = shlex.split(str(temploc)) fcn = len(fcnew) som = 0 # this loop searches each word in the filename for a match. while (som < fcn): #print (fcnew[som]) #counts get buggered up when the issue is the last field in the filename - ie. '50.cbr' if ".cbr" in fcnew[som]: fcnew[som] = fcnew[som].replace(".cbr", "") elif ".cbz" in fcnew[som]: fcnew[som] = fcnew[som].replace(".cbz", "") if fcnew[som].isdigit(): #print ("digit detected") #good ol' 52 again.... if int(fcnew[som]) > 0: fcdigit = fcnew[som].lstrip('0') else: fcdigit = "0" #print ( "filename:" + str(int(fcnew[som])) + " - issue: " + str(int_issnum[n]) ) if int(fcdigit) == int_issnum[n]: #print ("matched") #print ("We have this issue - " + str(issnum[n]) + " at " + tmpfc['ComicFilename'] ) havefiles+=1 haveissue = "yes" isslocation = str(tmpfc['ComicFilename']) break #print ("failed word match on:" + str(fcnew[som]) + "..continuing next word") som+=1 #print (str(temploc) + " doesn't match anything...moving to next file.") fn+=1 if haveissue == "no": isslocation = "None" controlValueDict = {"IssueID": issid[n]} newValueDict = {"ComicID": comicid, "ComicName": comic['ComicName'], "IssueName": issname[n], "Issue_Number": issnum[n], "IssueDate": issdate[n], "Location": isslocation, "Int_IssueNumber": int_issnum[n] } # Only change the status & add DateAdded if the issue is not already in the database if not len(iss_exists): controlValueDict = {"IssueID": issid[n]} newValueDict['DateAdded'] = helpers.today() if haveissue == "no": if mylar.AUTOWANT_ALL: newValueDict['Status'] = "Wanted" #elif release_dict['releasedate'] > helpers.today() and mylar.AUTOWANT_UPCOMING: # newValueDict['Status'] = "Wanted" else: newValueDict['Status'] = "Skipped" elif haveissue == "yes": newValueDict['Status'] = "Downloaded" myDB.upsert("issues", newValueDict, controlValueDict) n+=1 # logger.debug(u"Updating comic cache for " + comic['ComicName']) # cache.getThumb(ComicID=issue['issueid']) # newValueDict['LastUpdated'] = helpers.now() # myDB.upsert("comics", newValueDict, controlValueDict) # logger.debug(u"Updating cache for: " + comic['ComicName']) # cache.getThumb(ComicIDcomicid) controlValueStat = {"ComicID": comicid} newValueStat = {"Status": "Active", "Have": havefiles, "LatestIssue": latestiss, "LatestDate": latestdate } myDB.upsert("comics", newValueStat, controlValueStat) logger.info(u"Updating complete for: " + comic['ComicName']) #here we grab issues that have been marked as wanted above... results = myDB.select("SELECT * FROM issues where ComicID=? AND Status='Wanted'", [comicid]) if results: logger.info(u"Attempting to grab wanted issues for : " + comic['ComicName']) for result in results: foundNZB = "none" if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL) and (mylar.SAB_HOST): foundNZB = search.searchforissue(result['IssueID']) if foundNZB == "yes": updater.foundsearch(result['ComicID'], result['IssueID']) else: logger.info(u"No issues marked as wanted for " + comic['ComicName']) logger.info(u"Finished grabbing what I could.")
def addComictoDB(comicid, mismatch=None): # Putting this here to get around the circular import. Will try to use this to update images at later date. from mylar import cache myDB = db.DBConnection() # We need the current minimal info in the database instantly # so we don't throw a 500 error when we redirect to the artistPage controlValueDict = {"ComicID": comicid} dbcomic = myDB.action("SELECT * FROM comics WHERE ComicID=?", [comicid]).fetchone() if dbcomic is None: newValueDict = {"ComicName": "Comic ID: %s" % (comicid), "Status": "Loading"} comlocation = None else: newValueDict = {"Status": "Loading"} comlocation = dbcomic["ComicLocation"] myDB.upsert("comics", newValueDict, controlValueDict) # we need to lookup the info for the requested ComicID in full now comic = cv.getComic(comicid, "comic") # comic = myDB.action('SELECT * FROM comics WHERE ComicID=?', [comicid]).fetchone() if not comic: logger.warn("Error fetching comic. ID for : " + comicid) if dbcomic is None: newValueDict = {"ComicName": "Fetch failed, try refreshing. (%s)" % (comicid), "Status": "Active"} else: newValueDict = {"Status": "Active"} myDB.upsert("comics", newValueDict, controlValueDict) return if comic["ComicName"].startswith("The "): sortname = comic["ComicName"][4:] else: sortname = comic["ComicName"] logger.info(u"Now adding/updating: " + comic["ComicName"]) # --Now that we know ComicName, let's try some scraping # --Start # gcd will return issue details (most importantly publishing date) if mismatch == "no" or mismatch is None: gcdinfo = parseit.GCDScraper(comic["ComicName"], comic["ComicYear"], comic["ComicIssues"], comicid) mismatch_com = "no" if gcdinfo == "No Match": updater.no_searchresults(comicid) nomatch = "true" logger.info( u"There was an error when trying to add " + comic["ComicName"] + " (" + comic["ComicYear"] + ")" ) return nomatch else: mismatch_com = "yes" # print ("gcdinfo:" + str(gcdinfo)) elif mismatch == "yes": CV_EXcomicid = myDB.action("SELECT * from exceptions WHERE ComicID=?", [comicid]).fetchone() if CV_EXcomicid["variloop"] is None: pass else: vari_loop = CV_EXcomicid["variloop"] NewComicID = CV_EXcomicid["NewComicID"] gcomicid = CV_EXcomicid["GComicID"] resultURL = "/series/" + str(NewComicID) + "/" # print ("variloop" + str(CV_EXcomicid['variloop'])) # if vari_loop == '99': gcdinfo = parseit.GCDdetails( comseries=None, resultURL=resultURL, vari_loop=0, ComicID=comicid, TotalIssues=0, issvariation="no", resultPublished=None, ) logger.info(u"Sucessfully retrieved details for " + comic["ComicName"]) # print ("Series Published" + parseit.resultPublished) # comic book location on machine # setup default location here if comlocation is None: if ":" in comic["ComicName"] or "/" in comic["ComicName"] or "," in comic["ComicName"]: comicdir = comic["ComicName"] if ":" in comicdir: comicdir = comicdir.replace(":", "") if "/" in comicdir: comicdir = comicdir.replace("/", "-") if "," in comicdir: comicdir = comicdir.replace(",", "") else: comicdir = comic["ComicName"] series = comicdir publisher = comic["ComicPublisher"] year = comic["ComicYear"] # do work to generate folder path values = {"$Series": series, "$Publisher": publisher, "$Year": year} # print mylar.FOLDER_FORMAT # print 'working dir:' # print helpers.replace_all(mylar.FOLDER_FORMAT, values) if mylar.FOLDER_FORMAT == "": comlocation = mylar.DESTINATION_DIR + "/" + comicdir + " (" + comic["ComicYear"] + ")" else: comlocation = mylar.DESTINATION_DIR + "/" + helpers.replace_all(mylar.FOLDER_FORMAT, values) # comlocation = mylar.DESTINATION_DIR + "/" + comicdir + " (" + comic['ComicYear'] + ")" if mylar.DESTINATION_DIR == "": logger.error(u"There is no general directory specified - please specify in Config/Post-Processing.") return if mylar.REPLACE_SPACES: # mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot comlocation = comlocation.replace(" ", mylar.REPLACE_CHAR) # if it doesn't exist - create it (otherwise will bugger up later on) if os.path.isdir(str(comlocation)): logger.info(u"Directory (" + str(comlocation) + ") already exists! Continuing...") else: # print ("Directory doesn't exist!") try: os.makedirs(str(comlocation)) logger.info(u"Directory successfully created at: " + str(comlocation)) except OSError: logger.error(u"Could not create comicdir : " + str(comlocation)) # try to account for CV not updating new issues as fast as GCD # seems CV doesn't update total counts # comicIssues = gcdinfo['totalissues'] if gcdinfo["gcdvariation"] == "cv": comicIssues = str(int(comic["ComicIssues"]) + 1) else: comicIssues = comic["ComicIssues"] # let's download the image... if os.path.exists(mylar.CACHE_DIR): pass else: # let's make the dir. try: os.makedirs(str(mylar.CACHE_DIR)) logger.info(u"Cache Directory successfully created at: " + str(mylar.CACHE_DIR)) except OSError: logger.error("Could not create cache dir. Check permissions of cache dir: " + str(mylar.CACHE_DIR)) coverfile = mylar.CACHE_DIR + "/" + str(comicid) + ".jpg" # try: urllib.urlretrieve(str(comic["ComicImage"]), str(coverfile)) try: with open(str(coverfile)) as f: ComicImage = "cache/" + str(comicid) + ".jpg" logger.info(u"Sucessfully retrieved cover for " + str(comic["ComicName"])) except IOError as e: logger.error(u"Unable to save cover locally at this time.") controlValueDict = {"ComicID": comicid} newValueDict = { "ComicName": comic["ComicName"], "ComicSortName": sortname, "ComicYear": comic["ComicYear"], "ComicImage": ComicImage, "Total": comicIssues, "ComicLocation": comlocation, "ComicPublisher": comic["ComicPublisher"], "ComicPublished": gcdinfo["resultPublished"], "DateAdded": helpers.today(), "Status": "Loading", } myDB.upsert("comics", newValueDict, controlValueDict) issued = cv.getComic(comicid, "issue") logger.info(u"Sucessfully retrieved issue details for " + comic["ComicName"]) n = 0 iscnt = int(comicIssues) issid = [] issnum = [] issname = [] issdate = [] int_issnum = [] # let's start issue #'s at 0 -- thanks to DC for the new 52 reboot! :) latestiss = "0" latestdate = "0000-00-00" # print ("total issues:" + str(iscnt)) # ---removed NEW code here--- logger.info(u"Now adding/updating issues for " + comic["ComicName"]) # file check to see if issue exists logger.info(u"Checking directory for existing issues.") # fc = filechecker.listFiles(dir=comlocation, watchcomic=comic['ComicName']) # havefiles = 0 # fccnt = int(fc['comiccount']) # logger.info(u"Found " + str(fccnt) + "/" + str(iscnt) + " issues of " + comic['ComicName'] + "...verifying") # fcnew = [] while n <= iscnt: # ---NEW.code try: firstval = issued["issuechoice"][n] except IndexError: break cleanname = helpers.cleanName(firstval["Issue_Name"]) issid = str(firstval["Issue_ID"]) issnum = str(firstval["Issue_Number"]) issname = cleanname if "." in str(issnum): issn_st = str(issnum).find(".") issn_b4dec = str(issnum)[:issn_st] # if the length of decimal is only 1 digit, assume it's a tenth dec_is = str(issnum)[issn_st + 1 :] if len(dec_is) == 1: dec_nisval = int(dec_is) * 10 iss_naftdec = str(dec_nisval) if len(dec_is) == 2: dec_nisval = int(dec_is) iss_naftdec = str(dec_nisval) iss_issue = issn_b4dec + "." + iss_naftdec issis = (int(issn_b4dec) * 1000) + dec_nisval else: issis = int(issnum) * 1000 bb = 0 while bb <= iscnt: try: gcdval = gcdinfo["gcdchoice"][bb] except IndexError: # account for gcd variation here if gcdinfo["gcdvariation"] == "gcd": # print ("gcd-variation accounted for.") issdate = "0000-00-00" int_issnum = int(issis / 1000) break if "nn" in str(gcdval["GCDIssue"]): # no number detected - GN, TP or the like logger.warn(u"Non Series detected (Graphic Novel, etc) - cannot proceed at this time.") updater.no_searchresults(comicid) return elif "." in str(gcdval["GCDIssue"]): # print ("g-issue:" + str(gcdval['GCDIssue'])) issst = str(gcdval["GCDIssue"]).find(".") # print ("issst:" + str(issst)) issb4dec = str(gcdval["GCDIssue"])[:issst] # print ("issb4dec:" + str(issb4dec)) # if the length of decimal is only 1 digit, assume it's a tenth decis = str(gcdval["GCDIssue"])[issst + 1 :] # print ("decis:" + str(decis)) if len(decis) == 1: decisval = int(decis) * 10 issaftdec = str(decisval) if len(decis) == 2: decisval = int(decis) issaftdec = str(decisval) gcd_issue = issb4dec + "." + issaftdec # print ("gcd_issue:" + str(gcd_issue)) gcdis = (int(issb4dec) * 1000) + decisval else: gcdis = int(str(gcdval["GCDIssue"])) * 1000 if gcdis == issis: issdate = str(gcdval["GCDDate"]) int_issnum = int(gcdis / 1000) # get the latest issue / date using the date. if gcdval["GCDDate"] > latestdate: latestiss = str(issnum) latestdate = str(gcdval["GCDDate"]) break # bb = iscnt bb += 1 # print("(" + str(n) + ") IssueID: " + str(issid) + " IssueNo: " + str(issnum) + " Date" + str(issdate)) # ---END.NEW. # check if the issue already exists iss_exists = myDB.action("SELECT * from issues WHERE IssueID=?", [issid]).fetchone() # Only change the status & add DateAdded if the issue is already in the database if iss_exists is None: newValueDict["DateAdded"] = helpers.today() controlValueDict = {"IssueID": issid} newValueDict = { "ComicID": comicid, "ComicName": comic["ComicName"], "IssueName": issname, "Issue_Number": issnum, "IssueDate": issdate, "Int_IssueNumber": int_issnum, } if mylar.AUTOWANT_ALL: newValueDict["Status"] = "Wanted" # elif release_dict['releasedate'] > helpers.today() and mylar.AUTOWANT_UPCOMING: # newValueDict['Status'] = "Wanted" else: newValueDict["Status"] = "Skipped" if iss_exists: # print ("Existing status : " + str(iss_exists['Status'])) newValueDict["Status"] = iss_exists["Status"] myDB.upsert("issues", newValueDict, controlValueDict) n += 1 # logger.debug(u"Updating comic cache for " + comic['ComicName']) # cache.getThumb(ComicID=issue['issueid']) # logger.debug(u"Updating cache for: " + comic['ComicName']) # cache.getThumb(ComicIDcomicid) # check for existing files... updater.forceRescan(comicid) controlValueStat = {"ComicID": comicid} newValueStat = { "Status": "Active", "LatestIssue": latestiss, "LatestDate": latestdate, "LastUpdated": helpers.now(), } myDB.upsert("comics", newValueStat, controlValueStat) logger.info(u"Updating complete for: " + comic["ComicName"]) # lets' check the pullist for anyting at this time as well since we're here. if mylar.AUTOWANT_UPCOMING: logger.info(u"Checking this week's pullist for new issues of " + str(comic["ComicName"])) updater.newpullcheck() # here we grab issues that have been marked as wanted above... results = myDB.select("SELECT * FROM issues where ComicID=? AND Status='Wanted'", [comicid]) if results: logger.info(u"Attempting to grab wanted issues for : " + comic["ComicName"]) for result in results: foundNZB = "none" if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL) and (mylar.SAB_HOST): foundNZB = search.searchforissue(result["IssueID"]) if foundNZB == "yes": updater.foundsearch(result["ComicID"], result["IssueID"]) else: logger.info(u"No issues marked as wanted for " + comic["ComicName"]) logger.info(u"Finished grabbing what I could.")
def GCDimport(gcomicid): # this is for importing via GCD only and not using CV. # used when volume spanning is discovered for a Comic (and can't be added using CV). # Issue Counts are wrong (and can't be added). # because Comicvine ComicID and GCD ComicID could be identical at some random point, let's distinguish. # CV = comicid, GCD = gcomicid :) (ie. CV=2740, GCD=G3719) gcdcomicid = gcomicid myDB = db.DBConnection() # We need the current minimal info in the database instantly # so we don't throw a 500 error when we redirect to the artistPage controlValueDict = {"ComicID": gcdcomicid} comic = myDB.action( "SELECT ComicName, ComicYear, Total, ComicPublished, ComicImage, ComicLocation FROM comics WHERE ComicID=?", [gcomicid], ).fetchone() ComicName = comic[0] ComicYear = comic[1] ComicIssues = comic[2] comlocation = comic[5] # ComicImage = comic[4] # print ("Comic:" + str(ComicName)) newValueDict = {"Status": "Loading"} myDB.upsert("comics", newValueDict, controlValueDict) # we need to lookup the info for the requested ComicID in full now # comic = cv.getComic(comicid,'comic') if not comic: logger.warn("Error fetching comic. ID for : " + gcdcomicid) if dbcomic is None: newValueDict = {"ComicName": "Fetch failed, try refreshing. (%s)" % (gcdcomicid), "Status": "Active"} else: newValueDict = {"Status": "Active"} myDB.upsert("comics", newValueDict, controlValueDict) return if ComicName.startswith("The "): sortname = ComicName[4:] else: sortname = ComicName logger.info(u"Now adding/updating: " + ComicName) # --Now that we know ComicName, let's try some scraping # --Start # gcd will return issue details (most importantly publishing date) comicid = gcomicid[1:] resultURL = "/series/" + str(comicid) + "/" gcdinfo = parseit.GCDdetails( comseries=None, resultURL=resultURL, vari_loop=0, ComicID=gcdcomicid, TotalIssues=ComicIssues, issvariation=None, resultPublished=None, ) if gcdinfo == "No Match": logger.warn("No matching result found for " + ComicName + " (" + ComicYear + ")") updater.no_searchresults(gcomicid) nomatch = "true" return nomatch logger.info(u"Sucessfully retrieved details for " + ComicName) # print ("Series Published" + parseit.resultPublished) # --End ComicImage = gcdinfo["ComicImage"] # comic book location on machine # setup default location here if comlocation is None: if ":" in ComicName or "/" in ComicName or "," in ComicName: comicdir = ComicName if ":" in comicdir: comicdir = comicdir.replace(":", "") if "/" in comicdir: comicdir = comicdir.replace("/", "-") if "," in comicdir: comicdir = comicdir.replace(",", "") else: comicdir = ComicName comlocation = mylar.DESTINATION_DIR + "/" + comicdir + " (" + ComicYear + ")" if mylar.DESTINATION_DIR == "": logger.error(u"There is no general directory specified - please specify in Config/Post-Processing.") return if mylar.REPLACE_SPACES: # mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot comlocation = comlocation.replace(" ", mylar.REPLACE_CHAR) # if it doesn't exist - create it (otherwise will bugger up later on) if os.path.isdir(str(comlocation)): logger.info(u"Directory (" + str(comlocation) + ") already exists! Continuing...") else: # print ("Directory doesn't exist!") try: os.makedirs(str(comlocation)) logger.info(u"Directory successfully created at: " + str(comlocation)) except OSError: logger.error(u"Could not create comicdir : " + str(comlocation)) comicIssues = gcdinfo["totalissues"] # let's download the image... if os.path.exists(mylar.CACHE_DIR): pass else: # let's make the dir. try: os.makedirs(str(mylar.CACHE_DIR)) logger.info(u"Cache Directory successfully created at: " + str(mylar.CACHE_DIR)) except OSError: logger.error(u"Could not create cache dir : " + str(mylar.CACHE_DIR)) coverfile = mylar.CACHE_DIR + "/" + str(gcomicid) + ".jpg" urllib.urlretrieve(str(ComicImage), str(coverfile)) try: with open(str(coverfile)) as f: ComicImage = "cache/" + str(gcomicid) + ".jpg" logger.info(u"Sucessfully retrieved cover for " + str(ComicName)) except IOError as e: logger.error(u"Unable to save cover locally at this time.") controlValueDict = {"ComicID": gcomicid} newValueDict = { "ComicName": ComicName, "ComicSortName": sortname, "ComicYear": ComicYear, "Total": comicIssues, "ComicLocation": comlocation, "ComicImage": ComicImage, # "ComicPublisher": comic['ComicPublisher'], # "ComicPublished": comicPublished, "DateAdded": helpers.today(), "Status": "Loading", } myDB.upsert("comics", newValueDict, controlValueDict) logger.info(u"Sucessfully retrieved issue details for " + ComicName) n = 0 iscnt = int(comicIssues) issnum = [] issname = [] issdate = [] int_issnum = [] # let's start issue #'s at 0 -- thanks to DC for the new 52 reboot! :) latestiss = "0" latestdate = "0000-00-00" # print ("total issues:" + str(iscnt)) # ---removed NEW code here--- logger.info(u"Now adding/updating issues for " + ComicName) bb = 0 while bb <= iscnt: # ---NEW.code try: gcdval = gcdinfo["gcdchoice"][bb] # print ("gcdval: " + str(gcdval)) except IndexError: # account for gcd variation here if gcdinfo["gcdvariation"] == "gcd": # print ("gcd-variation accounted for.") issdate = "0000-00-00" int_issnum = int(issis / 1000) break if "nn" in str(gcdval["GCDIssue"]): # no number detected - GN, TP or the like logger.warn(u"Non Series detected (Graphic Novel, etc) - cannot proceed at this time.") updater.no_searchresults(comicid) return elif "." in str(gcdval["GCDIssue"]): issst = str(gcdval["GCDIssue"]).find(".") issb4dec = str(gcdval["GCDIssue"])[:issst] # if the length of decimal is only 1 digit, assume it's a tenth decis = str(gcdval["GCDIssue"])[issst + 1 :] if len(decis) == 1: decisval = int(decis) * 10 issaftdec = str(decisval) if len(decis) == 2: decisval = int(decis) issaftdec = str(decisval) if int(issaftdec) == 0: issaftdec = "00" gcd_issue = issb4dec + "." + issaftdec gcdis = (int(issb4dec) * 1000) + decisval else: gcdis = int(str(gcdval["GCDIssue"])) * 1000 gcd_issue = str(gcdval["GCDIssue"]) # get the latest issue / date using the date. int_issnum = int(gcdis / 1000) issdate = str(gcdval["GCDDate"]) issid = "G" + str(gcdval["IssueID"]) if gcdval["GCDDate"] > latestdate: latestiss = str(gcd_issue) latestdate = str(gcdval["GCDDate"]) # print("(" + str(bb) + ") IssueID: " + str(issid) + " IssueNo: " + str(gcd_issue) + " Date" + str(issdate) ) # ---END.NEW. # check if the issue already exists iss_exists = myDB.action("SELECT * from issues WHERE IssueID=?", [issid]).fetchone() # Only change the status & add DateAdded if the issue is not already in the database if iss_exists is None: newValueDict["DateAdded"] = helpers.today() # adjust for inconsistencies in GCD date format - some dates have ? which borks up things. if "?" in str(issdate): issdate = "0000-00-00" controlValueDict = {"IssueID": issid} newValueDict = { "ComicID": gcomicid, "ComicName": ComicName, "Issue_Number": gcd_issue, "IssueDate": issdate, "Int_IssueNumber": int_issnum, } # print ("issueid:" + str(controlValueDict)) # print ("values:" + str(newValueDict)) if mylar.AUTOWANT_ALL: newValueDict["Status"] = "Wanted" # elif release_dict['releasedate'] > helpers.today() and mylar.AUTOWANT_UPCOMING: # newValueDict['Status'] = "Wanted" else: newValueDict["Status"] = "Skipped" if iss_exists: # print ("Existing status : " + str(iss_exists['Status'])) newValueDict["Status"] = iss_exists["Status"] myDB.upsert("issues", newValueDict, controlValueDict) bb += 1 # logger.debug(u"Updating comic cache for " + ComicName) # cache.getThumb(ComicID=issue['issueid']) # logger.debug(u"Updating cache for: " + ComicName) # cache.getThumb(ComicIDcomicid) # check for existing files... updater.forceRescan(gcomicid) controlValueStat = {"ComicID": gcomicid} newValueStat = { "Status": "Active", "LatestIssue": latestiss, "LatestDate": latestdate, "LastUpdated": helpers.now(), } myDB.upsert("comics", newValueStat, controlValueStat) logger.info(u"Updating complete for: " + ComicName) # lets' check the pullist for anyting at this time as well since we're here. if mylar.AUTOWANT_UPCOMING: logger.info(u"Checking this week's pullist for new issues of " + str(ComicName)) updater.newpullcheck() # here we grab issues that have been marked as wanted above... results = myDB.select("SELECT * FROM issues where ComicID=? AND Status='Wanted'", [gcomicid]) if results: logger.info(u"Attempting to grab wanted issues for : " + ComicName) for result in results: foundNZB = "none" if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL) and (mylar.SAB_HOST): foundNZB = search.searchforissue(result["IssueID"]) if foundNZB == "yes": updater.foundsearch(result["ComicID"], result["IssueID"]) else: logger.info(u"No issues marked as wanted for " + ComicName) logger.info(u"Finished grabbing what I could.")
def Process_next(self,comicid,issueid,issuenumOG,ml=None): annchk = "no" extensions = ('.cbr', '.cbz') myDB = db.DBConnection() comicnzb = myDB.action("SELECT * from comics WHERE comicid=?", [comicid]).fetchone() issuenzb = myDB.action("SELECT * from issues WHERE issueid=? AND comicid=? AND ComicName NOT NULL", [issueid,comicid]).fetchone() print "issueid: " + str(issueid) print "issuenumOG: " + str(issuenumOG) if issuenzb is None: print "chk1" issuenzb = myDB.action("SELECT * from annuals WHERE issueid=? and comicid=?", [issueid,comicid]).fetchone() print "chk2" annchk = "yes" print issuenzb #issueno = str(issuenum).split('.')[0] #new CV API - removed all decimals...here we go AGAIN! issuenum = issuenzb['Issue_Number'] issue_except = 'None' if 'au' in issuenum.lower(): issuenum = re.sub("[^0-9]", "", issuenum) issue_except = ' AU' elif 'ai' in issuenum.lower(): issuenum = re.sub("[^0-9]", "", issuenum) issue_except = ' AI' if '.' in issuenum: iss_find = issuenum.find('.') iss_b4dec = issuenum[:iss_find] iss_decval = issuenum[iss_find+1:] if int(iss_decval) == 0: iss = iss_b4dec issdec = int(iss_decval) issueno = str(iss) self._log("Issue Number: " + str(issueno), logger.DEBUG) logger.fdebug("Issue Number: " + str(issueno)) else: if len(iss_decval) == 1: iss = iss_b4dec + "." + iss_decval issdec = int(iss_decval) * 10 else: iss = iss_b4dec + "." + iss_decval.rstrip('0') issdec = int(iss_decval.rstrip('0')) * 10 issueno = iss_b4dec self._log("Issue Number: " + str(iss), logger.DEBUG) logger.fdebug("Issue Number: " + str(iss)) else: iss = issuenum issueno = str(iss) # issue zero-suppression here if mylar.ZERO_LEVEL == "0": zeroadd = "" else: if mylar.ZERO_LEVEL_N == "none": zeroadd = "" elif mylar.ZERO_LEVEL_N == "0x": zeroadd = "0" elif mylar.ZERO_LEVEL_N == "00x": zeroadd = "00" logger.fdebug("Zero Suppression set to : " + str(mylar.ZERO_LEVEL_N)) if str(len(issueno)) > 1: if int(issueno) < 10: self._log("issue detected less than 10", logger.DEBUG) if '.' in iss: if int(iss_decval) > 0: issueno = str(iss) prettycomiss = str(zeroadd) + str(iss) else: prettycomiss = str(zeroadd) + str(int(issueno)) else: prettycomiss = str(zeroadd) + str(iss) if issue_except != 'None': prettycomiss = str(prettycomiss) + issue_except self._log("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ". Issue will be set as : " + str(prettycomiss), logger.DEBUG) elif int(issueno) >= 10 and int(issueno) < 100: self._log("issue detected greater than 10, but less than 100", logger.DEBUG) if mylar.ZERO_LEVEL_N == "none": zeroadd = "" else: zeroadd = "0" if '.' in iss: if int(iss_decval) > 0: issueno = str(iss) prettycomiss = str(zeroadd) + str(iss) else: prettycomiss = str(zeroadd) + str(int(issueno)) else: prettycomiss = str(zeroadd) + str(iss) if issue_except != 'None': prettycomiss = str(prettycomiss) + issue_except self._log("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ".Issue will be set as : " + str(prettycomiss), logger.DEBUG) else: self._log("issue detected greater than 100", logger.DEBUG) if '.' in iss: if int(iss_decval) > 0: issueno = str(iss) prettycomiss = str(issueno) if issue_except != 'None': prettycomiss = str(prettycomiss) + issue_except self._log("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ". Issue will be set as : " + str(prettycomiss), logger.DEBUG) else: prettycomiss = str(issueno) self._log("issue length error - cannot determine length. Defaulting to None: " + str(prettycomiss), logger.DEBUG) if annchk == "yes": prettycomiss = "Annual " + str(prettycomiss) self._log("Annual detected.") logger.fdebug("Pretty Comic Issue is : " + str(prettycomiss)) issueyear = issuenzb['IssueDate'][:4] self._log("Issue Year: " + str(issueyear), logger.DEBUG) logger.fdebug("Issue Year : " + str(issueyear)) # comicnzb= myDB.action("SELECT * from comics WHERE comicid=?", [comicid]).fetchone() publisher = comicnzb['ComicPublisher'] self._log("Publisher: " + publisher, logger.DEBUG) logger.fdebug("Publisher: " + str(publisher)) #we need to un-unicode this to make sure we can write the filenames properly for spec.chars series = comicnzb['ComicName'].encode('ascii', 'ignore').strip() self._log("Series: " + series, logger.DEBUG) logger.fdebug("Series: " + str(series)) seriesyear = comicnzb['ComicYear'] self._log("Year: " + seriesyear, logger.DEBUG) logger.fdebug("Year: " + str(seriesyear)) comlocation = comicnzb['ComicLocation'] self._log("Comic Location: " + comlocation, logger.DEBUG) logger.fdebug("Comic Location: " + str(comlocation)) comversion = comicnzb['ComicVersion'] self._log("Comic Version: " + str(comversion), logger.DEBUG) logger.fdebug("Comic Version: " + str(comversion)) if comversion is None: comversion = 'None' #if comversion is None, remove it so it doesn't populate with 'None' if comversion == 'None': chunk_f_f = re.sub('\$VolumeN','',mylar.FILE_FORMAT) chunk_f = re.compile(r'\s+') chunk_file_format = chunk_f.sub(' ', chunk_f_f) self._log("No version # found for series - tag will not be available for renaming.", logger.DEBUG) logger.fdebug("No version # found for series, removing from filename") logger.fdebug("new format is now: " + str(chunk_file_format)) else: chunk_file_format = mylar.FILE_FORMAT ofilename = None #if meta-tagging is not enabled, we need to declare the check as being fail #if meta-tagging is enabled, it gets changed just below to a default of pass pcheck = "fail" #tag the meta. if mylar.ENABLE_META: self._log("Metatagging enabled - proceeding...") logger.fdebug("Metatagging enabled - proceeding...") pcheck = "pass" try: import cmtagmylar if ml is None: pcheck = cmtagmylar.run(self.nzb_folder, issueid=issueid) else: pcheck = cmtagmylar.run(self.nzb_folder, issueid=issueid, manual="yes", filename=ml['ComicLocation']) except ImportError: logger.fdebug("comictaggerlib not found on system. Ensure the ENTIRE lib directory is located within mylar/lib/comictaggerlib/") logger.fdebug("continuing with PostProcessing, but I'm not using metadata.") pcheck = "fail" if pcheck == "fail": self._log("Unable to write metadata successfully - check mylar.log file. Attempting to continue without tagging...") logger.fdebug("Unable to write metadata successfully - check mylar.log file. Attempting to continue without tagging...") elif pcheck == "unrar error": self._log("This is a corrupt archive - whether CRC errors or it's incomplete. Marking as BAD, and retrying a different copy.") logger.error("This is a corrupt archive - whether CRC errors or it's incomplete. Marking as BAD, and retrying a different copy.") return self.log else: otofilename = pcheck self._log("Sucessfully wrote metadata to .cbz - Continuing..") logger.fdebug("Sucessfully wrote metadata to .cbz (" + str(otofilename) + ") - Continuing..") #Run Pre-script if mylar.ENABLE_PRE_SCRIPTS: nzbn = self.nzb_name #original nzb name nzbf = self.nzb_folder #original nzb folder #name, comicyear, comicid , issueid, issueyear, issue, publisher #create the dic and send it. seriesmeta = [] seriesmetadata = {} seriesmeta.append({ 'name': series, 'comicyear': seriesyear, 'comicid': comicid, 'issueid': issueid, 'issueyear': issueyear, 'issue': issuenum, 'publisher': publisher }) seriesmetadata['seriesmeta'] = seriesmeta self._run_pre_scripts(nzbn, nzbf, seriesmetadata ) #rename file and move to new path #nfilename = series + " " + issueno + " (" + seriesyear + ")" file_values = {'$Series': series, '$Issue': prettycomiss, '$Year': issueyear, '$series': series.lower(), '$Publisher': publisher, '$publisher': publisher.lower(), '$VolumeY': 'V' + str(seriesyear), '$VolumeN': comversion } #if it's a Manual Run, use the ml['ComicLocation'] for the exact filename. if ml is None: for root, dirnames, filenames in os.walk(self.nzb_folder): for filename in filenames: if filename.lower().endswith(extensions): ofilename = filename path, ext = os.path.splitext(ofilename) else: if pcheck == "fail": otofilename = ml['ComicLocation'] print "otofilename:" + str(otofilename) odir, ofilename = os.path.split(otofilename) print "ofilename: " + str(ofilename) path, ext = os.path.splitext(ofilename) print "path: " + str(path) print "ext:" + str(ext) if ofilename is None: logger.error(u"Aborting PostProcessing - the filename doesn't exist in the location given. Make sure that " + str(self.nzb_folder) + " exists and is the correct location.") return self._log("Original Filename: " + ofilename, logger.DEBUG) self._log("Original Extension: " + ext, logger.DEBUG) logger.fdebug("Original Filname: " + str(ofilename)) logger.fdebug("Original Extension: " + str(ext)) if mylar.FILE_FORMAT == '' or not mylar.RENAME_FILES: self._log("Rename Files isn't enabled...keeping original filename.", logger.DEBUG) logger.fdebug("Rename Files isn't enabled - keeping original filename.") #check if extension is in nzb_name - will screw up otherwise if ofilename.lower().endswith(extensions): nfilename = ofilename[:-4] else: nfilename = ofilename else: nfilename = helpers.replace_all(chunk_file_format, file_values) if mylar.REPLACE_SPACES: #mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot nfilename = nfilename.replace(' ', mylar.REPLACE_CHAR) nfilename = re.sub('[\,\:\?]', '', nfilename) self._log("New Filename: " + nfilename, logger.DEBUG) logger.fdebug("New Filename: " + str(nfilename)) src = os.path.join(self.nzb_folder, ofilename) filechecker.validateAndCreateDirectory(comlocation, True) if mylar.LOWERCASE_FILENAMES: dst = (comlocation + "/" + nfilename + ext).lower() else: dst = comlocation + "/" + nfilename + ext.lower() self._log("Source:" + src, logger.DEBUG) self._log("Destination:" + dst, logger.DEBUG) logger.fdebug("Source: " + str(src)) logger.fdebug("Destination: " + str(dst)) if ml is None: #non-manual run moving/deleting... os.rename(os.path.join(self.nzb_folder, str(ofilename)), os.path.join(self.nzb_folder,str(nfilename + ext))) src = os.path.join(self.nzb_folder, str(nfilename + ext)) try: shutil.move(src, dst) except (OSError, IOError): self._log("Failed to move directory - check directories and manually re-run.", logger.DEBUG) self._log("Post-Processing ABORTED.", logger.DEBUG) return #tidyup old path try: shutil.rmtree(self.nzb_folder) except (OSError, IOError): self._log("Failed to remove temporary directory - check directory and manually re-run.", logger.DEBUG) self._log("Post-Processing ABORTED.", logger.DEBUG) return self._log("Removed temporary directory : " + str(self.nzb_folder), logger.DEBUG) else: #Manual Run, this is the portion. logger.fdebug("Renaming " + os.path.join(self.nzb_folder, str(ofilename)) + " ..to.. " + os.path.join(self.nzb_folder,str(nfilename + ext))) os.rename(os.path.join(self.nzb_folder, str(ofilename)), os.path.join(self.nzb_folder,str(nfilename + ext))) src = os.path.join(self.nzb_folder, str(nfilename + ext)) logger.fdebug("Moving " + src + " ... to ... " + dst) try: shutil.move(src, dst) except (OSError, IOError): logger.fdebug("Failed to move directory - check directories and manually re-run.") logger.fdebug("Post-Processing ABORTED.") return logger.fdebug("Successfully moved to : " + dst) #tidyup old path #try: # os.remove(os.path.join(self.nzb_folder, str(ofilename))) # logger.fdebug("Deleting : " + os.path.join(self.nzb_folder, str(ofilename))) #except (OSError, IOError): # logger.fdebug("Failed to remove temporary directory - check directory and manually re-run.") # logger.fdebug("Post-Processing ABORTED.") # return #logger.fdebug("Removed temporary directory : " + str(self.nzb_folder)) #delete entry from nzblog table myDB.action('DELETE from nzblog WHERE issueid=?', [issueid]) #update snatched table to change status to Downloaded if annchk == "no": updater.foundsearch(comicid, issueid, down='True') else: updater.foundsearch(comicid, issueid, mode='want_ann', down='True') #force rescan of files updater.forceRescan(comicid) logger.info(u"Post-Processing completed for: " + series + " issue: " + str(issuenumOG) ) self._log(u"Post Processing SUCCESSFULL! ", logger.DEBUG) if ml is not None: return else: if mylar.PROWL_ENABLED: pushmessage = series + '(' + issueyear + ') - issue #' + issuenumOG logger.info(u"Prowl request") prowl = notifiers.PROWL() prowl.notify(pushmessage,"Download and Postprocessing completed") if mylar.NMA_ENABLED: nma = notifiers.NMA() nma.notify(series, str(issueyear), str(issuenumOG)) if mylar.PUSHOVER_ENABLED: pushmessage = series + ' (' + str(issueyear) + ') - issue #' + str(issuenumOG) logger.info(u"Pushover request") pushover = notifiers.PUSHOVER() pushover.notify(pushmessage, "Download and Post-Processing completed") if mylar.BOXCAR_ENABLED: boxcar = notifiers.BOXCAR() boxcar.notify(series, str(issueyear), str(issuenumOG)) # retrieve/create the corresponding comic objects if mylar.ENABLE_EXTRA_SCRIPTS: folderp = str(dst) #folder location after move/rename nzbn = self.nzb_name #original nzb name filen = str(nfilename + ext) #new filename #name, comicyear, comicid , issueid, issueyear, issue, publisher #create the dic and send it. seriesmeta = [] seriesmetadata = {} seriesmeta.append({ 'name': series, 'comicyear': seriesyear, 'comicid': comicid, 'issueid': issueid, 'issueyear': issueyear, 'issue': issuenum, 'publisher': publisher }) seriesmetadata['seriesmeta'] = seriesmeta self._run_extra_scripts(nzbname, self.nzb_folder, filen, folderp, seriesmetadata ) return self.log
if mylar.AUTOWANT_UPCOMING and 'Present' in gcdinfo['resultPublished']: logger.info(u"Checking this week's pullist for new issues of " + str(comic['ComicName'])) updater.newpullcheck(comic['ComicName'], comicid) #here we grab issues that have been marked as wanted above... results = myDB.select("SELECT * FROM issues where ComicID=? AND Status='Wanted'", [comicid]) if results: logger.info(u"Attempting to grab wanted issues for : " + comic['ComicName']) for result in results: foundNZB = "none" if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL or mylar.NEWZNAB or mylar.NZBX) and (mylar.SAB_HOST): foundNZB = search.searchforissue(result['IssueID']) if foundNZB == "yes": updater.foundsearch(result['ComicID'], result['IssueID']) else: logger.info(u"No issues marked as wanted for " + comic['ComicName']) logger.info(u"Finished grabbing what I could.") def GCDimport(gcomicid, pullupd=None): # this is for importing via GCD only and not using CV. # used when volume spanning is discovered for a Comic (and can't be added using CV). # Issue Counts are wrong (and can't be added). # because Comicvine ComicID and GCD ComicID could be identical at some random point, let's distinguish. # CV = comicid, GCD = gcomicid :) (ie. CV=2740, GCD=G3719) gcdcomicid = gcomicid myDB = db.DBConnection()
def Process(self): self._log("nzb name: " + str(self.nzb_name), logger.DEBUG) self._log("nzb folder: " + str(self.nzb_folder), logger.DEBUG) logger.fdebug("nzb name: " + str(self.nzb_name)) logger.fdebug("nzb folder: " + str(self.nzb_folder)) if mylar.USE_SABNZBD==0: logger.fdebug("Not using SABNzbd") else: # if the SAB Directory option is enabled, let's use that folder name and append the jobname. if mylar.SAB_DIRECTORY is not None and mylar.SAB_DIRECTORY is not 'None' and len(mylar.SAB_DIRECTORY) > 4: self.nzb_folder = os.path.join(mylar.SAB_DIRECTORY, self.nzb_name).encode(mylar.SYS_ENCODING) #lookup nzb_name in nzblog table to get issueid #query SAB to find out if Replace Spaces enabled / not as well as Replace Decimals #http://localhost:8080/sabnzbd/api?mode=set_config§ion=misc&keyword=dirscan_speed&value=5 querysab = str(mylar.SAB_HOST) + "/api?mode=get_config§ion=misc&output=xml&apikey=" + str(mylar.SAB_APIKEY) #logger.info("querysab_string:" + str(querysab)) file = urllib2.urlopen(querysab) data = file.read() file.close() dom = parseString(data) try: sabreps = dom.getElementsByTagName('replace_spaces')[0].firstChild.wholeText except: errorm = dom.getElementsByTagName('error')[0].firstChild.wholeText logger.error(u"Error detected attempting to retrieve SAB data : " + errorm) return sabrepd = dom.getElementsByTagName('replace_dots')[0].firstChild.wholeText logger.fdebug("SAB Replace Spaces: " + str(sabreps)) logger.fdebug("SAB Replace Dots: " + str(sabrepd)) if mylar.USE_NZBGET==1: logger.fdebug("Using NZBGET") logger.fdebug("NZB name as passed from NZBGet: " + self.nzb_name) myDB = db.DBConnection() nzbname = self.nzb_name #remove extensions from nzb_name if they somehow got through (Experimental most likely) extensions = ('.cbr', '.cbz') if nzbname.lower().endswith(extensions): fd, ext = os.path.splitext(nzbname) self._log("Removed extension from nzb: " + ext, logger.DEBUG) nzbname = re.sub(str(ext), '', str(nzbname)) #replace spaces nzbname = re.sub(' ', '.', str(nzbname)) nzbname = re.sub('[\,\:\?]', '', str(nzbname)) nzbname = re.sub('[\&]', 'and', str(nzbname)) logger.fdebug("After conversions, nzbname is : " + str(nzbname)) # if mylar.USE_NZBGET==1: # nzbname=self.nzb_name self._log("nzbname: " + str(nzbname), logger.DEBUG) nzbiss = myDB.action("SELECT * from nzblog WHERE nzbname=?", [nzbname]).fetchone() if nzbiss is None: self._log("Failure - could not initially locate nzbfile in my database to rename.", logger.DEBUG) logger.fdebug("Failure - could not locate nzbfile initially.") # if failed on spaces, change it all to decimals and try again. nzbname = re.sub('_', '.', str(nzbname)) self._log("trying again with this nzbname: " + str(nzbname), logger.DEBUG) logger.fdebug("trying again with nzbname of : " + str(nzbname)) nzbiss = myDB.action("SELECT * from nzblog WHERE nzbname=?", [nzbname]).fetchone() if nzbiss is None: logger.error(u"Unable to locate downloaded file to rename. PostProcessing aborted.") return else: self._log("I corrected and found the nzb as : " + str(nzbname)) logger.fdebug("auto-corrected and found the nzb as : " + str(nzbname)) issueid = nzbiss['IssueID'] else: issueid = nzbiss['IssueID'] logger.fdebug("issueid:" + str(issueid)) sarc = nzbiss['SARC'] #use issueid to get publisher, series, year, issue number issuenzb = myDB.action("SELECT * from issues WHERE issueid=?", [issueid]).fetchone() if issuenzb is not None: if helpers.is_number(issueid): sandwich = int(issuenzb['IssueID']) else: #if it's non-numeric, it contains a 'G' at the beginning indicating it's a multi-volume #using GCD data. Set sandwich to 1 so it will bypass and continue post-processing. if 'S' in issueid: sandwich = issueid elif 'G' in issueid: sandwich = 1 if helpers.is_number(sandwich): if sandwich < 900000: # if sandwich is less than 900000 it's a normal watchlist download. Bypass. pass else: if issuenzb is None or 'S' in sandwich or int(sandwich) >= 900000: # this has no issueID, therefore it's a one-off or a manual post-proc. # At this point, let's just drop it into the Comic Location folder and forget about it.. if 'S' in sandwich: self._log("One-off STORYARC mode enabled for Post-Processing for " + str(sarc)) logger.info("One-off STORYARC mode enabled for Post-Processing for " + str(sarc)) if mylar.STORYARCDIR: storyarcd = os.path.join(mylar.DESTINATION_DIR, "StoryArcs", sarc) self._log("StoryArc Directory set to : " + storyarcd, logger.DEBUG) else: self._log("Grab-Bag Directory set to : " + mylar.GRABBAG_DIR, logger.DEBUG) else: self._log("One-off mode enabled for Post-Processing. All I'm doing is moving the file untouched into the Grab-bag directory.", logger.DEBUG) logger.info("One-off mode enabled for Post-Processing. Will move into Grab-bag directory.") self._log("Grab-Bag Directory set to : " + mylar.GRABBAG_DIR, logger.DEBUG) for root, dirnames, filenames in os.walk(self.nzb_folder): for filename in filenames: if filename.lower().endswith(extensions): ofilename = filename path, ext = os.path.splitext(ofilename) if 'S' in sandwich: if mylar.STORYARCDIR: grdst = storyarcd else: grdst = mylar.DESTINATION_DIR else: if mylar.GRABBAG_DIR: grdst = mylar.GRABBAG_DIR else: grdst = mylar.DESTINATION_DIR filechecker.validateAndCreateDirectory(grdst, True) grab_dst = os.path.join(grdst, ofilename) self._log("Destination Path : " + grab_dst, logger.DEBUG) logger.info("Destination Path : " + grab_dst) grab_src = os.path.join(self.nzb_folder, ofilename) self._log("Source Path : " + grab_src, logger.DEBUG) logger.info("Source Path : " + grab_src) logger.info("Moving " + str(ofilename) + " into directory : " + str(grdst)) try: shutil.move(grab_src, grab_dst) except (OSError, IOError): self._log("Failed to move directory - check directories and manually re-run.", logger.DEBUG) logger.debug("Failed to move directory - check directories and manually re-run.") return #tidyup old path try: shutil.rmtree(self.nzb_folder) except (OSError, IOError): self._log("Failed to remove temporary directory.", logger.DEBUG) logger.debug("Failed to remove temporary directory - check directory and manually re-run.") return logger.debug("Removed temporary directory : " + str(self.nzb_folder)) self._log("Removed temporary directory : " + self.nzb_folder, logger.DEBUG) #delete entry from nzblog table myDB.action('DELETE from nzblog WHERE issueid=?', [issueid]) if 'S' in issueid: issuearcid = re.sub('S', '', issueid) logger.info("IssueArcID is : " + str(issuearcid)) ctrlVal = {"IssueArcID": issuearcid} newVal = {"Status": "Downloaded", "Location": grab_dst } myDB.upsert("readinglist",newVal,ctrlVal) logger.info("updated status to Downloaded") return self.log comicid = issuenzb['ComicID'] issuenumOG = issuenzb['Issue_Number'] #issueno = str(issuenum).split('.')[0] #new CV API - removed all decimals...here we go AGAIN! issuenum = issuenumOG issue_except = 'None' if 'au' in issuenum.lower(): issuenum = re.sub("[^0-9]", "", issuenum) issue_except = ' AU' if '.' in issuenum: iss_find = issuenum.find('.') iss_b4dec = issuenum[:iss_find] iss_decval = issuenum[iss_find+1:] if int(iss_decval) == 0: iss = iss_b4dec issdec = int(iss_decval) issueno = str(iss) self._log("Issue Number: " + str(issueno), logger.DEBUG) logger.fdebug("Issue Number: " + str(issueno)) else: if len(iss_decval) == 1: iss = iss_b4dec + "." + iss_decval issdec = int(iss_decval) * 10 else: iss = iss_b4dec + "." + iss_decval.rstrip('0') issdec = int(iss_decval.rstrip('0')) * 10 issueno = iss_b4dec self._log("Issue Number: " + str(iss), logger.DEBUG) logger.fdebug("Issue Number: " + str(iss)) else: iss = issuenum issueno = str(iss) # issue zero-suppression here if mylar.ZERO_LEVEL == "0": zeroadd = "" else: if mylar.ZERO_LEVEL_N == "none": zeroadd = "" elif mylar.ZERO_LEVEL_N == "0x": zeroadd = "0" elif mylar.ZERO_LEVEL_N == "00x": zeroadd = "00" logger.fdebug("Zero Suppression set to : " + str(mylar.ZERO_LEVEL_N)) if str(len(issueno)) > 1: if int(issueno) < 10: self._log("issue detected less than 10", logger.DEBUG) if '.' in iss: if int(iss_decval) > 0: issueno = str(iss) prettycomiss = str(zeroadd) + str(iss) else: prettycomiss = str(zeroadd) + str(int(issueno)) else: prettycomiss = str(zeroadd) + str(iss) if issue_except != 'None': prettycomiss = str(prettycomiss) + issue_except self._log("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ". Issue will be set as : " + str(prettycomiss), logger.DEBUG) elif int(issueno) >= 10 and int(issueno) < 100: self._log("issue detected greater than 10, but less than 100", logger.DEBUG) if mylar.ZERO_LEVEL_N == "none": zeroadd = "" else: zeroadd = "0" if '.' in iss: if int(iss_decval) > 0: issueno = str(iss) prettycomiss = str(zeroadd) + str(iss) else: prettycomiss = str(zeroadd) + str(int(issueno)) else: prettycomiss = str(zeroadd) + str(iss) if issue_except != 'None': prettycomiss = str(prettycomiss) + issue_except self._log("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ".Issue will be set as : " + str(prettycomiss), logger.DEBUG) else: self._log("issue detected greater than 100", logger.DEBUG) if '.' in iss: if int(iss_decval) > 0: issueno = str(iss) prettycomiss = str(issueno) if issue_except != 'None': prettycomiss = str(prettycomiss) + issue_except self._log("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ". Issue will be set as : " + str(prettycomiss), logger.DEBUG) else: prettycomiss = str(issueno) self._log("issue length error - cannot determine length. Defaulting to None: " + str(prettycomiss), logger.DEBUG) logger.fdebug("Pretty Comic Issue is : " + str(prettycomiss)) issueyear = issuenzb['IssueDate'][:4] self._log("Issue Year: " + str(issueyear), logger.DEBUG) logger.fdebug("Issue Year : " + str(issueyear)) comicnzb= myDB.action("SELECT * from comics WHERE comicid=?", [comicid]).fetchone() publisher = comicnzb['ComicPublisher'] self._log("Publisher: " + publisher, logger.DEBUG) logger.fdebug("Publisher: " + str(publisher)) #we need to un-unicode this to make sure we can write the filenames properly for spec.chars series = comicnzb['ComicName'].encode('ascii', 'ignore').strip() self._log("Series: " + series, logger.DEBUG) logger.fdebug("Series: " + str(series)) seriesyear = comicnzb['ComicYear'] self._log("Year: " + seriesyear, logger.DEBUG) logger.fdebug("Year: " + str(seriesyear)) comlocation = comicnzb['ComicLocation'] self._log("Comic Location: " + comlocation, logger.DEBUG) logger.fdebug("Comic Location: " + str(comlocation)) comversion = comicnzb['ComicVersion'] self._log("Comic Version: " + str(comversion), logger.DEBUG) logger.fdebug("Comic Version: " + str(comversion)) if comversion is None: comversion = 'None' #if comversion is None, remove it so it doesn't populate with 'None' if comversion == 'None': chunk_f_f = re.sub('\$VolumeN','',mylar.FILE_FORMAT) chunk_f = re.compile(r'\s+') chunk_file_format = chunk_f.sub(' ', chunk_f_f) self._log("No version # found for series - tag will not be available for renaming.", logger.DEBUG) logger.fdebug("No version # found for series, removing from filename") logger.fdebug("new format is now: " + str(chunk_file_format)) else: chunk_file_format = mylar.FILE_FORMAT #Run Pre-script if mylar.ENABLE_PRE_SCRIPTS: nzbn = self.nzb_name #original nzb name nzbf = self.nzb_folder #original nzb folder #name, comicyear, comicid , issueid, issueyear, issue, publisher #create the dic and send it. seriesmeta = [] seriesmetadata = {} seriesmeta.append({ 'name': series, 'comicyear': seriesyear, 'comicid': comicid, 'issueid': issueid, 'issueyear': issueyear, 'issue': issuenum, 'publisher': publisher }) seriesmetadata['seriesmeta'] = seriesmeta self._run_pre_scripts(nzbn, nzbf, seriesmetadata ) #rename file and move to new path #nfilename = series + " " + issueno + " (" + seriesyear + ")" file_values = {'$Series': series, '$Issue': prettycomiss, '$Year': issueyear, '$series': series.lower(), '$Publisher': publisher, '$publisher': publisher.lower(), '$VolumeY': 'V' + str(seriesyear), '$VolumeN': comversion } ofilename = None for root, dirnames, filenames in os.walk(self.nzb_folder): for filename in filenames: if filename.lower().endswith(extensions): ofilename = filename path, ext = os.path.splitext(ofilename) if ofilename is None: logger.error(u"Aborting PostProcessing - the filename doesn't exist in the location given. Make sure that " + str(self.nzb_folder) + " exists and is the correct location.") return self._log("Original Filename: " + ofilename, logger.DEBUG) self._log("Original Extension: " + ext, logger.DEBUG) logger.fdebug("Original Filname: " + str(ofilename)) logger.fdebug("Original Extension: " + str(ext)) if mylar.FILE_FORMAT == '' or not mylar.RENAME_FILES: self._log("Rename Files isn't enabled...keeping original filename.", logger.DEBUG) logger.fdebug("Rename Files isn't enabled - keeping original filename.") #check if extension is in nzb_name - will screw up otherwise if ofilename.lower().endswith(extensions): nfilename = ofilename[:-4] else: nfilename = ofilename else: nfilename = helpers.replace_all(chunk_file_format, file_values) if mylar.REPLACE_SPACES: #mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot nfilename = nfilename.replace(' ', mylar.REPLACE_CHAR) nfilename = re.sub('[\,\:\?]', '', nfilename) self._log("New Filename: " + nfilename, logger.DEBUG) logger.fdebug("New Filename: " + str(nfilename)) src = os.path.join(self.nzb_folder, ofilename) filechecker.validateAndCreateDirectory(comlocation, True) if mylar.LOWERCASE_FILENAMES: dst = (comlocation + "/" + nfilename + ext).lower() else: dst = comlocation + "/" + nfilename + ext.lower() self._log("Source:" + src, logger.DEBUG) self._log("Destination:" + dst, logger.DEBUG) logger.fdebug("Source: " + str(src)) logger.fdebug("Destination: " + str(dst)) os.rename(os.path.join(self.nzb_folder, str(ofilename)), os.path.join(self.nzb_folder,str(nfilename + ext))) src = os.path.join(self.nzb_folder, str(nfilename + ext)) try: shutil.move(src, dst) except (OSError, IOError): self._log("Failed to move directory - check directories and manually re-run.", logger.DEBUG) self._log("Post-Processing ABORTED.", logger.DEBUG) return #tidyup old path try: shutil.rmtree(self.nzb_folder) except (OSError, IOError): self._log("Failed to remove temporary directory - check directory and manually re-run.", logger.DEBUG) self._log("Post-Processing ABORTED.", logger.DEBUG) return self._log("Removed temporary directory : " + str(self.nzb_folder), logger.DEBUG) #delete entry from nzblog table myDB.action('DELETE from nzblog WHERE issueid=?', [issueid]) #update snatched table to change status to Downloaded updater.foundsearch(comicid, issueid, down='True') #force rescan of files updater.forceRescan(comicid) logger.info(u"Post-Processing completed for: " + series + " issue: " + str(issuenumOG) ) self._log(u"Post Processing SUCCESSFULL! ", logger.DEBUG) if mylar.PROWL_ENABLED: pushmessage = series + '(' + issueyear + ') - issue #' + issuenumOG logger.info(u"Prowl request") prowl = notifiers.PROWL() prowl.notify(pushmessage,"Download and Postprocessing completed") if mylar.NMA_ENABLED: nma = notifiers.NMA() nma.notify(series, str(issueyear), str(issuenumOG)) if mylar.PUSHOVER_ENABLED: pushmessage = series + ' (' + str(issueyear) + ') - issue #' + str(issuenumOG) logger.info(u"Pushover request") pushover = notifiers.PUSHOVER() pushover.notify(pushmessage, "Download and Post-Processing completed") # retrieve/create the corresponding comic objects if mylar.ENABLE_EXTRA_SCRIPTS: folderp = str(dst) #folder location after move/rename nzbn = self.nzb_name #original nzb name filen = str(nfilename + ext) #new filename #name, comicyear, comicid , issueid, issueyear, issue, publisher #create the dic and send it. seriesmeta = [] seriesmetadata = {} seriesmeta.append({ 'name': series, 'comicyear': seriesyear, 'comicid': comicid, 'issueid': issueid, 'issueyear': issueyear, 'issue': issuenum, 'publisher': publisher }) seriesmetadata['seriesmeta'] = seriesmeta self._run_extra_scripts(nzbname, self.nzb_folder, filen, folderp, seriesmetadata ) return self.log
def GCDimport(gcomicid, pullupd=None, imported=None, ogcname=None): # this is for importing via GCD only and not using CV. # used when volume spanning is discovered for a Comic (and can't be added using CV). # Issue Counts are wrong (and can't be added). # because Comicvine ComicID and GCD ComicID could be identical at some random point, let's distinguish. # CV = comicid, GCD = gcomicid :) (ie. CV=2740, GCD=G3719) gcdcomicid = gcomicid myDB = db.DBConnection() # We need the current minimal info in the database instantly # so we don't throw a 500 error when we redirect to the artistPage controlValueDict = {"ComicID": gcdcomicid} comic = myDB.action( 'SELECT ComicName, ComicYear, Total, ComicPublished, ComicImage, ComicLocation, ComicPublisher FROM comics WHERE ComicID=?', [gcomicid]).fetchone() ComicName = comic[0] ComicYear = comic[1] ComicIssues = comic[2] ComicPublished = comic[3] comlocation = comic[5] ComicPublisher = comic[6] #ComicImage = comic[4] #print ("Comic:" + str(ComicName)) newValueDict = {"Status": "Loading"} myDB.upsert("comics", newValueDict, controlValueDict) # we need to lookup the info for the requested ComicID in full now #comic = cv.getComic(comicid,'comic') if not comic: logger.warn("Error fetching comic. ID for : " + gcdcomicid) if dbcomic is None: newValueDict = { "ComicName": "Fetch failed, try refreshing. (%s)" % (gcdcomicid), "Status": "Active" } else: newValueDict = {"Status": "Active"} myDB.upsert("comics", newValueDict, controlValueDict) return #run the re-sortorder here in order to properly display the page if pullupd is None: helpers.ComicSort(comicorder=mylar.COMICSORT, imported=gcomicid) if ComicName.startswith('The '): sortname = ComicName[4:] else: sortname = ComicName logger.info(u"Now adding/updating: " + ComicName) #--Now that we know ComicName, let's try some scraping #--Start # gcd will return issue details (most importantly publishing date) comicid = gcomicid[1:] resultURL = "/series/" + str(comicid) + "/" gcdinfo = parseit.GCDdetails(comseries=None, resultURL=resultURL, vari_loop=0, ComicID=gcdcomicid, TotalIssues=ComicIssues, issvariation=None, resultPublished=None) if gcdinfo == "No Match": logger.warn("No matching result found for " + ComicName + " (" + ComicYear + ")") updater.no_searchresults(gcomicid) nomatch = "true" return nomatch logger.info(u"Sucessfully retrieved details for " + ComicName) # print ("Series Published" + parseit.resultPublished) #--End ComicImage = gcdinfo['ComicImage'] #comic book location on machine # setup default location here if comlocation is None: # let's remove the non-standard characters here. u_comicnm = ComicName u_comicname = u_comicnm.encode('ascii', 'ignore').strip() if ':' in u_comicname or '/' in u_comicname or ',' in u_comicname: comicdir = u_comicname if ':' in comicdir: comicdir = comicdir.replace(':', '') if '/' in comicdir: comicdir = comicdir.replace('/', '-') if ',' in comicdir: comicdir = comicdir.replace(',', '') else: comicdir = u_comicname series = comicdir publisher = ComicPublisher year = ComicYear #do work to generate folder path values = { '$Series': series, '$Publisher': publisher, '$Year': year, '$series': series.lower(), '$publisher': publisher.lower(), '$Volume': year } if mylar.FOLDER_FORMAT == '': comlocation = mylar.DESTINATION_DIR + "/" + comicdir + " (" + comic[ 'ComicYear'] + ")" else: comlocation = mylar.DESTINATION_DIR + "/" + helpers.replace_all( mylar.FOLDER_FORMAT, values) #comlocation = mylar.DESTINATION_DIR + "/" + comicdir + " (" + ComicYear + ")" if mylar.DESTINATION_DIR == "": logger.error( u"There is no general directory specified - please specify in Config/Post-Processing." ) return if mylar.REPLACE_SPACES: #mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot comlocation = comlocation.replace(' ', mylar.REPLACE_CHAR) #if it doesn't exist - create it (otherwise will bugger up later on) if os.path.isdir(str(comlocation)): logger.info(u"Directory (" + str(comlocation) + ") already exists! Continuing...") else: #print ("Directory doesn't exist!") #try: # os.makedirs(str(comlocation)) # logger.info(u"Directory successfully created at: " + str(comlocation)) #except OSError: # logger.error(u"Could not create comicdir : " + str(comlocation)) filechecker.validateAndCreateDirectory(comlocation, True) comicIssues = gcdinfo['totalissues'] #let's download the image... if os.path.exists(mylar.CACHE_DIR): pass else: #let's make the dir. try: os.makedirs(str(mylar.CACHE_DIR)) logger.info(u"Cache Directory successfully created at: " + str(mylar.CACHE_DIR)) except OSError: logger.error(u"Could not create cache dir : " + str(mylar.CACHE_DIR)) coverfile = os.path.join(mylar.CACHE_DIR, str(gcomicid) + ".jpg") #try: urllib.urlretrieve(str(ComicImage), str(coverfile)) try: with open(str(coverfile)) as f: ComicImage = os.path.join('cache', str(gcomicid) + ".jpg") #this is for Firefox when outside the LAN...it works, but I don't know how to implement it #without breaking the normal flow for inside the LAN (above) #ComicImage = "http://" + str(mylar.HTTP_HOST) + ":" + str(mylar.HTTP_PORT) + "/cache/" + str(comi$ logger.info(u"Sucessfully retrieved cover for " + ComicName) #if the comic cover local is checked, save a cover.jpg to the series folder. if mylar.COMIC_COVER_LOCAL: comiclocal = os.path.join(str(comlocation) + "/cover.jpg") shutil.copy(ComicImage, comiclocal) except IOError as e: logger.error(u"Unable to save cover locally at this time.") #if comic['ComicVersion'].isdigit(): # comicVol = "v" + comic['ComicVersion'] #else: # comicVol = None controlValueDict = {"ComicID": gcomicid} newValueDict = { "ComicName": ComicName, "ComicSortName": sortname, "ComicYear": ComicYear, "Total": comicIssues, "ComicLocation": comlocation, #"ComicVersion": comicVol, "ComicImage": ComicImage, #"ComicPublisher": comic['ComicPublisher'], #"ComicPublished": comicPublished, "DateAdded": helpers.today(), "Status": "Loading" } myDB.upsert("comics", newValueDict, controlValueDict) #comicsort here... #run the re-sortorder here in order to properly display the page if pullupd is None: helpers.ComicSort(sequence='update') logger.info(u"Sucessfully retrieved issue details for " + ComicName) n = 0 iscnt = int(comicIssues) issnum = [] issname = [] issdate = [] int_issnum = [] #let's start issue #'s at 0 -- thanks to DC for the new 52 reboot! :) latestiss = "0" latestdate = "0000-00-00" #print ("total issues:" + str(iscnt)) #---removed NEW code here--- logger.info(u"Now adding/updating issues for " + ComicName) bb = 0 while (bb <= iscnt): #---NEW.code try: gcdval = gcdinfo['gcdchoice'][bb] #print ("gcdval: " + str(gcdval)) except IndexError: #account for gcd variation here if gcdinfo['gcdvariation'] == 'gcd': #print ("gcd-variation accounted for.") issdate = '0000-00-00' int_issnum = int(issis / 1000) break if 'nn' in str(gcdval['GCDIssue']): #no number detected - GN, TP or the like logger.warn( u"Non Series detected (Graphic Novel, etc) - cannot proceed at this time." ) updater.no_searchresults(comicid) return elif '.' in str(gcdval['GCDIssue']): issst = str(gcdval['GCDIssue']).find('.') issb4dec = str(gcdval['GCDIssue'])[:issst] #if the length of decimal is only 1 digit, assume it's a tenth decis = str(gcdval['GCDIssue'])[issst + 1:] if len(decis) == 1: decisval = int(decis) * 10 issaftdec = str(decisval) if len(decis) == 2: decisval = int(decis) issaftdec = str(decisval) if int(issaftdec) == 0: issaftdec = "00" gcd_issue = issb4dec + "." + issaftdec gcdis = (int(issb4dec) * 1000) + decisval else: gcdis = int(str(gcdval['GCDIssue'])) * 1000 gcd_issue = str(gcdval['GCDIssue']) #get the latest issue / date using the date. int_issnum = int(gcdis / 1000) issdate = str(gcdval['GCDDate']) issid = "G" + str(gcdval['IssueID']) if gcdval['GCDDate'] > latestdate: latestiss = str(gcd_issue) latestdate = str(gcdval['GCDDate']) #print("(" + str(bb) + ") IssueID: " + str(issid) + " IssueNo: " + str(gcd_issue) + " Date" + str(issdate) ) #---END.NEW. # check if the issue already exists iss_exists = myDB.action('SELECT * from issues WHERE IssueID=?', [issid]).fetchone() # Only change the status & add DateAdded if the issue is not already in the database if iss_exists is None: newValueDict['DateAdded'] = helpers.today() #adjust for inconsistencies in GCD date format - some dates have ? which borks up things. if "?" in str(issdate): issdate = "0000-00-00" controlValueDict = {"IssueID": issid} newValueDict = { "ComicID": gcomicid, "ComicName": ComicName, "Issue_Number": gcd_issue, "IssueDate": issdate, "Int_IssueNumber": int_issnum } #print ("issueid:" + str(controlValueDict)) #print ("values:" + str(newValueDict)) if mylar.AUTOWANT_ALL: newValueDict['Status'] = "Wanted" elif issdate > helpers.today() and mylar.AUTOWANT_UPCOMING: newValueDict['Status'] = "Wanted" else: newValueDict['Status'] = "Skipped" if iss_exists: #print ("Existing status : " + str(iss_exists['Status'])) newValueDict['Status'] = iss_exists['Status'] myDB.upsert("issues", newValueDict, controlValueDict) bb += 1 # logger.debug(u"Updating comic cache for " + ComicName) # cache.getThumb(ComicID=issue['issueid']) # logger.debug(u"Updating cache for: " + ComicName) # cache.getThumb(ComicIDcomicid) controlValueStat = {"ComicID": gcomicid} newValueStat = { "Status": "Active", "LatestIssue": latestiss, "LatestDate": latestdate, "LastUpdated": helpers.now() } myDB.upsert("comics", newValueStat, controlValueStat) if mylar.CVINFO: if not os.path.exists(comlocation + "/cvinfo"): with open(comlocation + "/cvinfo", "w") as text_file: text_file.write("http://www.comicvine.com/volume/49-" + str(comicid)) logger.info(u"Updating complete for: " + ComicName) #move the files...if imported is not empty (meaning it's not from the mass importer.) if imported is None or imported == 'None': pass else: if mylar.IMP_MOVE: logger.info("Mass import - Move files") moveit.movefiles(gcomicid, comlocation, ogcname) else: logger.info( "Mass import - Moving not Enabled. Setting Archived Status for import." ) moveit.archivefiles(gcomicid, ogcname) #check for existing files... updater.forceRescan(gcomicid) if pullupd is None: # lets' check the pullist for anyting at this time as well since we're here. if mylar.AUTOWANT_UPCOMING and 'Present' in ComicPublished: logger.info(u"Checking this week's pullist for new issues of " + ComicName) updater.newpullcheck(comic['ComicName'], gcomicid) #here we grab issues that have been marked as wanted above... results = myDB.select( "SELECT * FROM issues where ComicID=? AND Status='Wanted'", [gcomicid]) if results: logger.info(u"Attempting to grab wanted issues for : " + ComicName) for result in results: foundNZB = "none" if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL or mylar.NEWZNAB or mylar.NZBX) and (mylar.SAB_HOST): foundNZB = search.searchforissue(result['IssueID']) if foundNZB == "yes": updater.foundsearch(result['ComicID'], result['IssueID']) else: logger.info(u"No issues marked as wanted for " + ComicName) logger.info(u"Finished grabbing what I could.")
def Process_next(self, comicid, issueid, issuenumOG, ml=None): annchk = "no" extensions = ('.cbr', '.cbz') myDB = db.DBConnection() comicnzb = myDB.action("SELECT * from comics WHERE comicid=?", [comicid]).fetchone() issuenzb = myDB.action( "SELECT * from issues WHERE issueid=? AND comicid=? AND ComicName NOT NULL", [issueid, comicid]).fetchone() logger.fdebug('issueid: ' + str(issueid)) logger.fdebug('issuenumOG: ' + str(issuenumOG)) if issuenzb is None: issuenzb = myDB.action( "SELECT * from annuals WHERE issueid=? and comicid=?", [issueid, comicid]).fetchone() annchk = "yes" #issueno = str(issuenum).split('.')[0] #new CV API - removed all decimals...here we go AGAIN! issuenum = issuenzb['Issue_Number'] issue_except = 'None' if 'au' in issuenum.lower() and issuenum[:1].isdigit(): issuenum = re.sub("[^0-9]", "", issuenum) issue_except = ' AU' elif 'ai' in issuenum.lower() and issuenum[:1].isdigit(): issuenum = re.sub("[^0-9]", "", issuenum) issue_except = ' AI' elif 'inh' in issuenum.lower() and issuenum[:1].isdigit(): issuenum = re.sub("[^0-9]", "", issuenum) issue_except = '.INH' elif 'now' in issuenum.lower() and issuenum[:1].isdigit(): if '!' in issuenum: issuenum = re.sub('\!', '', issuenum) issuenum = re.sub("[^0-9]", "", issuenum) issue_except = '.NOW' if '.' in issuenum: iss_find = issuenum.find('.') iss_b4dec = issuenum[:iss_find] iss_decval = issuenum[iss_find + 1:] if int(iss_decval) == 0: iss = iss_b4dec issdec = int(iss_decval) issueno = str(iss) self._log("Issue Number: " + str(issueno), logger.DEBUG) logger.fdebug("Issue Number: " + str(issueno)) else: if len(iss_decval) == 1: iss = iss_b4dec + "." + iss_decval issdec = int(iss_decval) * 10 else: iss = iss_b4dec + "." + iss_decval.rstrip('0') issdec = int(iss_decval.rstrip('0')) * 10 issueno = iss_b4dec self._log("Issue Number: " + str(iss), logger.DEBUG) logger.fdebug("Issue Number: " + str(iss)) else: iss = issuenum issueno = str(iss) # issue zero-suppression here if mylar.ZERO_LEVEL == "0": zeroadd = "" else: if mylar.ZERO_LEVEL_N == "none": zeroadd = "" elif mylar.ZERO_LEVEL_N == "0x": zeroadd = "0" elif mylar.ZERO_LEVEL_N == "00x": zeroadd = "00" logger.fdebug("Zero Suppression set to : " + str(mylar.ZERO_LEVEL_N)) if str(len(issueno)) > 1: if int(issueno) < 10: self._log("issue detected less than 10", logger.DEBUG) if '.' in iss: if int(iss_decval) > 0: issueno = str(iss) prettycomiss = str(zeroadd) + str(iss) else: prettycomiss = str(zeroadd) + str(int(issueno)) else: prettycomiss = str(zeroadd) + str(iss) if issue_except != 'None': prettycomiss = str(prettycomiss) + issue_except self._log( "Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ". Issue will be set as : " + str(prettycomiss), logger.DEBUG) elif int(issueno) >= 10 and int(issueno) < 100: self._log("issue detected greater than 10, but less than 100", logger.DEBUG) if mylar.ZERO_LEVEL_N == "none": zeroadd = "" else: zeroadd = "0" if '.' in iss: if int(iss_decval) > 0: issueno = str(iss) prettycomiss = str(zeroadd) + str(iss) else: prettycomiss = str(zeroadd) + str(int(issueno)) else: prettycomiss = str(zeroadd) + str(iss) if issue_except != 'None': prettycomiss = str(prettycomiss) + issue_except self._log( "Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ".Issue will be set as : " + str(prettycomiss), logger.DEBUG) else: self._log("issue detected greater than 100", logger.DEBUG) if '.' in iss: if int(iss_decval) > 0: issueno = str(iss) prettycomiss = str(issueno) if issue_except != 'None': prettycomiss = str(prettycomiss) + issue_except self._log( "Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ". Issue will be set as : " + str(prettycomiss), logger.DEBUG) else: prettycomiss = str(issueno) self._log( "issue length error - cannot determine length. Defaulting to None: " + str(prettycomiss), logger.DEBUG) if annchk == "yes": self._log("Annual detected.") logger.fdebug("Pretty Comic Issue is : " + str(prettycomiss)) issueyear = issuenzb['IssueDate'][:4] self._log("Issue Year: " + str(issueyear), logger.DEBUG) logger.fdebug("Issue Year : " + str(issueyear)) month = issuenzb['IssueDate'][5:7].replace('-', '').strip() month_name = helpers.fullmonth(month) # comicnzb= myDB.action("SELECT * from comics WHERE comicid=?", [comicid]).fetchone() publisher = comicnzb['ComicPublisher'] self._log("Publisher: " + publisher, logger.DEBUG) logger.fdebug("Publisher: " + str(publisher)) #we need to un-unicode this to make sure we can write the filenames properly for spec.chars series = comicnzb['ComicName'].encode('ascii', 'ignore').strip() self._log("Series: " + series, logger.DEBUG) logger.fdebug("Series: " + str(series)) seriesyear = comicnzb['ComicYear'] self._log("Year: " + seriesyear, logger.DEBUG) logger.fdebug("Year: " + str(seriesyear)) comlocation = comicnzb['ComicLocation'] self._log("Comic Location: " + comlocation, logger.DEBUG) logger.fdebug("Comic Location: " + str(comlocation)) comversion = comicnzb['ComicVersion'] self._log("Comic Version: " + str(comversion), logger.DEBUG) logger.fdebug("Comic Version: " + str(comversion)) if comversion is None: comversion = 'None' #if comversion is None, remove it so it doesn't populate with 'None' if comversion == 'None': chunk_f_f = re.sub('\$VolumeN', '', mylar.FILE_FORMAT) chunk_f = re.compile(r'\s+') chunk_file_format = chunk_f.sub(' ', chunk_f_f) self._log( "No version # found for series - tag will not be available for renaming.", logger.DEBUG) logger.fdebug( "No version # found for series, removing from filename") logger.fdebug("new format is now: " + str(chunk_file_format)) else: chunk_file_format = mylar.FILE_FORMAT if annchk == "no": chunk_f_f = re.sub('\$Annual', '', chunk_file_format) chunk_f = re.compile(r'\s+') chunk_file_format = chunk_f.sub(' ', chunk_f_f) logger.fdebug('not an annual - removing from filename paramaters') logger.fdebug('new format: ' + str(chunk_file_format)) else: logger.fdebug('chunk_file_format is: ' + str(chunk_file_format)) if '$Annual' not in chunk_file_format: #if it's an annual, but $Annual isn't specified in file_format, we need to #force it in there, by default in the format of $Annual $Issue prettycomiss = "Annual " + str(prettycomiss) logger.fdebug('prettycomiss: ' + str(prettycomiss)) ofilename = None #if meta-tagging is not enabled, we need to declare the check as being fail #if meta-tagging is enabled, it gets changed just below to a default of pass pcheck = "fail" #tag the meta. if mylar.ENABLE_META: self._log("Metatagging enabled - proceeding...") logger.fdebug("Metatagging enabled - proceeding...") pcheck = "pass" try: import cmtagmylar if ml is None: pcheck = cmtagmylar.run(self.nzb_folder, issueid=issueid) else: pcheck = cmtagmylar.run(self.nzb_folder, issueid=issueid, manual="yes", filename=ml['ComicLocation']) except ImportError: logger.fdebug( "comictaggerlib not found on system. Ensure the ENTIRE lib directory is located within mylar/lib/comictaggerlib/" ) logger.fdebug( "continuing with PostProcessing, but I'm not using metadata." ) pcheck = "fail" if pcheck == "fail": self._log( "Unable to write metadata successfully - check mylar.log file. Attempting to continue without tagging..." ) logger.fdebug( "Unable to write metadata successfully - check mylar.log file. Attempting to continue without tagging..." ) elif pcheck == "unrar error": self._log( "This is a corrupt archive - whether CRC errors or it's incomplete. Marking as BAD, and retrying a different copy." ) logger.error( "This is a corrupt archive - whether CRC errors or it's incomplete. Marking as BAD, and retrying a different copy." ) return self.log else: otofilename = pcheck self._log("Sucessfully wrote metadata to .cbz - Continuing..") logger.fdebug("Sucessfully wrote metadata to .cbz (" + str(otofilename) + ") - Continuing..") #Run Pre-script if mylar.ENABLE_PRE_SCRIPTS: nzbn = self.nzb_name #original nzb name nzbf = self.nzb_folder #original nzb folder #name, comicyear, comicid , issueid, issueyear, issue, publisher #create the dic and send it. seriesmeta = [] seriesmetadata = {} seriesmeta.append({ 'name': series, 'comicyear': seriesyear, 'comicid': comicid, 'issueid': issueid, 'issueyear': issueyear, 'issue': issuenum, 'publisher': publisher }) seriesmetadata['seriesmeta'] = seriesmeta self._run_pre_scripts(nzbn, nzbf, seriesmetadata) #rename file and move to new path #nfilename = series + " " + issueno + " (" + seriesyear + ")" file_values = { '$Series': series, '$Issue': prettycomiss, '$Year': issueyear, '$series': series.lower(), '$Publisher': publisher, '$publisher': publisher.lower(), '$VolumeY': 'V' + str(seriesyear), '$VolumeN': comversion, '$monthname': month_name, '$month': month, '$Annual': 'Annual' } #if it's a Manual Run, use the ml['ComicLocation'] for the exact filename. if ml is None: for root, dirnames, filenames in os.walk(self.nzb_folder): for filename in filenames: if filename.lower().endswith(extensions): ofilename = filename path, ext = os.path.splitext(ofilename) else: if pcheck == "fail": otofilename = ml['ComicLocation'] logger.fdebug('otofilename:' + str(otofilename)) odir, ofilename = os.path.split(otofilename) logger.fdebug('ofilename: ' + str(ofilename)) path, ext = os.path.splitext(ofilename) logger.fdebug('path: ' + str(path)) logger.fdebug('ext:' + str(ext)) if ofilename is None: logger.error( u"Aborting PostProcessing - the filename doesn't exist in the location given. Make sure that " + str(self.nzb_folder) + " exists and is the correct location.") return self._log("Original Filename: " + ofilename, logger.DEBUG) self._log("Original Extension: " + ext, logger.DEBUG) logger.fdebug("Original Filname: " + str(ofilename)) logger.fdebug("Original Extension: " + str(ext)) if mylar.FILE_FORMAT == '' or not mylar.RENAME_FILES: self._log( "Rename Files isn't enabled...keeping original filename.", logger.DEBUG) logger.fdebug( "Rename Files isn't enabled - keeping original filename.") #check if extension is in nzb_name - will screw up otherwise if ofilename.lower().endswith(extensions): nfilename = ofilename[:-4] else: nfilename = ofilename else: nfilename = helpers.replace_all(chunk_file_format, file_values) if mylar.REPLACE_SPACES: #mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot nfilename = nfilename.replace(' ', mylar.REPLACE_CHAR) nfilename = re.sub('[\,\:\?]', '', nfilename) nfilename = re.sub('[\/]', '-', nfilename) self._log("New Filename: " + nfilename, logger.DEBUG) logger.fdebug("New Filename: " + str(nfilename)) src = os.path.join(self.nzb_folder, ofilename) filechecker.validateAndCreateDirectory(comlocation, True) if mylar.LOWERCASE_FILENAMES: dst = (comlocation + "/" + nfilename + ext).lower() else: dst = comlocation + "/" + nfilename + ext.lower() self._log("Source:" + src, logger.DEBUG) self._log("Destination:" + dst, logger.DEBUG) logger.fdebug("Source: " + str(src)) logger.fdebug("Destination: " + str(dst)) if ml is None: #non-manual run moving/deleting... logger.fdebug('self.nzb_folder: ' + self.nzb_folder) logger.fdebug('ofilename:' + str(ofilename)) logger.fdebug('nfilename:' + str(nfilename + ext)) os.rename(os.path.join(self.nzb_folder, str(ofilename)), os.path.join(self.nzb_folder, str(nfilename + ext))) src = os.path.join(self.nzb_folder, str(nfilename + ext)) try: shutil.move(src, dst) except (OSError, IOError): self._log( "Failed to move directory - check directories and manually re-run.", logger.DEBUG) self._log("Post-Processing ABORTED.", logger.DEBUG) return #tidyup old path try: shutil.rmtree(self.nzb_folder) except (OSError, IOError): self._log( "Failed to remove temporary directory - check directory and manually re-run.", logger.DEBUG) self._log("Post-Processing ABORTED.", logger.DEBUG) return self._log("Removed temporary directory : " + str(self.nzb_folder), logger.DEBUG) else: #Manual Run, this is the portion. logger.fdebug("Renaming " + os.path.join(self.nzb_folder, str(ofilename)) + " ..to.. " + os.path.join(self.nzb_folder, str(nfilename + ext))) os.rename(os.path.join(self.nzb_folder, str(ofilename)), os.path.join(self.nzb_folder, str(nfilename + ext))) src = os.path.join(self.nzb_folder, str(nfilename + ext)) logger.fdebug("Moving " + src + " ... to ... " + dst) try: shutil.move(src, dst) except (OSError, IOError): logger.fdebug( "Failed to move directory - check directories and manually re-run." ) logger.fdebug("Post-Processing ABORTED.") return logger.fdebug("Successfully moved to : " + dst) #tidyup old path #try: # os.remove(os.path.join(self.nzb_folder, str(ofilename))) # logger.fdebug("Deleting : " + os.path.join(self.nzb_folder, str(ofilename))) #except (OSError, IOError): # logger.fdebug("Failed to remove temporary directory - check directory and manually re-run.") # logger.fdebug("Post-Processing ABORTED.") # return #logger.fdebug("Removed temporary directory : " + str(self.nzb_folder)) #delete entry from nzblog table myDB.action('DELETE from nzblog WHERE issueid=?', [issueid]) #update snatched table to change status to Downloaded if annchk == "no": updater.foundsearch(comicid, issueid, down='True') dispiss = 'issue: ' + str(issuenumOG) else: updater.foundsearch(comicid, issueid, mode='want_ann', down='True') dispiss = 'annual issue: ' + str(issuenumOG) #force rescan of files updater.forceRescan(comicid) logger.info(u"Post-Processing completed for: " + series + " " + dispiss) self._log(u"Post Processing SUCCESSFULL! ", logger.DEBUG) # retrieve/create the corresponding comic objects if mylar.ENABLE_EXTRA_SCRIPTS: folderp = str(dst) #folder location after move/rename nzbn = self.nzb_name #original nzb name filen = str(nfilename + ext) #new filename #name, comicyear, comicid , issueid, issueyear, issue, publisher #create the dic and send it. seriesmeta = [] seriesmetadata = {} seriesmeta.append({ 'name': series, 'comicyear': seriesyear, 'comicid': comicid, 'issueid': issueid, 'issueyear': issueyear, 'issue': issuenum, 'publisher': publisher }) seriesmetadata['seriesmeta'] = seriesmeta self._run_extra_scripts(nzbn, self.nzb_folder, filen, folderp, seriesmetadata) if ml is not None: return self.log else: if mylar.PROWL_ENABLED: pushmessage = series + '(' + issueyear + ') - issue #' + issuenumOG logger.info(u"Prowl request") prowl = notifiers.PROWL() prowl.notify(pushmessage, "Download and Postprocessing completed") if mylar.NMA_ENABLED: nma = notifiers.NMA() nma.notify(series, str(issueyear), str(issuenumOG)) if mylar.PUSHOVER_ENABLED: pushmessage = series + ' (' + str( issueyear) + ') - issue #' + str(issuenumOG) logger.info(u"Pushover request") pushover = notifiers.PUSHOVER() pushover.notify(pushmessage, "Download and Post-Processing completed") if mylar.BOXCAR_ENABLED: boxcar = notifiers.BOXCAR() boxcar.notify(series, str(issueyear), str(issuenumOG)) return self.log