def markasRead(self, IssueID=None, IssueArcID=None): myDB = db.DBConnection() if IssueID: issue = myDB.selectone('SELECT * from readlist WHERE IssueID=?', [IssueID]).fetchone() if issue['Status'] == 'Read': NewVal = {"Status": "Added"} else: NewVal = {"Status": "Read"} NewVal['StatusChange'] = helpers.today() CtrlVal = {"IssueID": IssueID} myDB.upsert("readlist", NewVal, CtrlVal) logger.info(self.module + ' Marked ' + issue['ComicName'] + ' #' + str(issue['Issue_Number']) + ' as Read.') elif IssueArcID: issue = myDB.selectone( 'SELECT * from readinglist WHERE IssueArcID=?', [IssueArcID]).fetchone() if issue['Status'] == 'Read': NewVal = {"Status": "Added"} else: NewVal = {"Status": "Read"} NewVal['StatusChange'] = helpers.today() CtrlVal = {"IssueArcID": IssueArcID} myDB.upsert("readinglist", NewVal, CtrlVal) logger.info(self.module + ' Marked ' + issue['ComicName'] + ' #' + str(issue['IssueNumber']) + ' as Read.') return
def markasRead(self): myDB = db.DBConnection() if self.IssueID: issue = myDB.selectone('SELECT * from readlist WHERE IssueID=?', [self.IssueID]).fetchone() if issue['Status'] == 'Read': NewVal = {"Status": "Added"} else: NewVal = {"Status": "Read"} NewVal['StatusChange'] = helpers.today() CtrlVal = {"IssueID": self.IssueID} myDB.upsert("readlist", NewVal, CtrlVal) logger.info(self.module + ' Marked ' + issue['ComicName'] + ' #' + str(issue['Issue_Number']) + ' as Read.') elif self.IssueArcID: issue = myDB.selectone('SELECT * from readinglist WHERE IssueArcID=?', [self.IssueArcID]).fetchone() if issue['Status'] == 'Read': NewVal = {"Status": "Added"} else: NewVal = {"Status": "Read"} NewVal['StatusChange'] = helpers.today() CtrlVal = {"IssueArcID": self.IssueArcID} myDB.upsert("readinglist", NewVal, CtrlVal) logger.info(self.module + ' Marked ' + issue['ComicName'] + ' #' + str(issue['IssueNumber']) + ' as Read.') return
def addtoreadlist(self): annualize = False myDB = db.DBConnection() readlist = myDB.selectone("SELECT * from issues where IssueID=?", [self.IssueID]).fetchone() if readlist is None: logger.fdebug(self.module + ' Checking against annuals..') readlist = myDB.selectone("SELECT * from annuals where IssueID=?", [self.IssueID]).fetchone() if readlist is None: logger.error(self.module + ' Cannot locate IssueID - aborting..') return else: annualize = True comicinfo = myDB.selectone("SELECT * from comics where ComicID=?", [readlist['ComicID']]).fetchone() logger.info(self.module + ' Attempting to add issueid ' + readlist['IssueID']) if comicinfo is None: logger.info(self.module + ' Issue not located on your current watchlist. I should probably check story-arcs but I do not have that capability just yet.') else: locpath = None if mylar.MULTIPLE_DEST_DIRS is not None and mylar.MULTIPLE_DEST_DIRS != 'None' and os.path.join(mylar.MULTIPLE_DEST_DIRS, os.path.basename(comicinfo['ComicLocation'])) != comicinfo['ComicLocation']: logger.fdebug(self.module + ' Multiple_dest_dirs:' + mylar.MULTIPLE_DEST_DIRS) logger.fdebug(self.module + ' Dir: ' + comicinfo['ComicLocation']) logger.fdebug(self.module + ' Os.path.basename: ' + os.path.basename(comicinfo['ComicLocation'])) pathdir = os.path.join(mylar.MULTIPLE_DEST_DIRS, os.path.basename(comicinfo['ComicLocation'])) if os.path.exists(os.path.join(pathdir, readlist['Location'])): locpath = os.path.join(pathdir, readlist['Location']) else: if os.path.exists(os.path.join(comicinfo['ComicLocation'], readlist['Location'])): locpath = os.path.join(comicinfo['ComicLocation'], readlist['Location']) else: if os.path.exists(os.path.join(comicinfo['ComicLocation'], readlist['Location'])): locpath = os.path.join(comicinfo['ComicLocation'], readlist['Location']) if not locpath is None: comicissue = readlist['Issue_Number'] comicname = comicinfo['ComicName'] dspinfo = comicname + ' #' + comicissue if annualize: if mylar.ANNUALS_ON: comicissue = 'Annual ' + readlist['Issue_Number'] dspinfo = comicname + ' Annual #' + readlist['Issue_Number'] else: comicname = comicinfo['ComicName'] + ' Annual' dspinfo = comicname + ' #' + comicissue ctrlval = {"IssueID": self.IssueID} newval = {"DateAdded": helpers.today(), "Status": "Added", "ComicID": readlist['ComicID'], "Issue_Number": comicissue, "IssueDate": readlist['IssueDate'], "SeriesYear": comicinfo['ComicYear'], "ComicName": comicname, "Location": locpath} myDB.upsert("readlist", newval, ctrlval) logger.info(self.module + ' Added ' + dspinfo + ' to the Reading list.') return
def scanLibrary(scan=None, queue=None): valreturn = [] if scan: try: soma, noids = libraryScan() except Exception, e: logger.error('Unable to complete the scan: %s' % e) return if soma == "Completed": logger.info('Sucessfully completed import.') else: logger.info('Starting mass importing...' + str(noids) + ' records.') #this is what it should do... #store soma (the list of comic_details from importing) into sql table so import can be whenever #display webpage showing results #allow user to select comic to add (one at a time) #call addComic off of the webpage to initiate the add. #return to result page to finish or continue adding. #.... #threading.Thread(target=self.searchit).start() #threadthis = threadit.ThreadUrl() #result = threadthis.main(soma) myDB = db.DBConnection() sl = 0 logger.fdebug("number of records: " + str(noids)) while (sl < int(noids)): soma_sl = soma['comic_info'][sl] logger.fdebug("soma_sl: " + str(soma_sl)) logger.fdebug("comicname: " + soma_sl['comicname'].encode('utf-8')) logger.fdebug("filename: " + soma_sl['comfilename'].encode('utf-8')) controlValue = {"impID": soma_sl['impid']} newValue = { "ComicYear": soma_sl['comicyear'], "Status": "Not Imported", "ComicName": soma_sl['comicname'].encode('utf-8'), "DisplayName": soma_sl['displayname'].encode('utf-8'), "ComicFilename": soma_sl['comfilename'].encode('utf-8'), "ComicLocation": soma_sl['comlocation'].encode('utf-8'), "ImportDate": helpers.today(), "WatchMatch": soma_sl['watchmatch'] } myDB.upsert("importresults", newValue, controlValue) sl += 1 # because we could be adding volumes/series that span years, we need to account for this # add the year to the db under the term, valid-years # add the issue to the db under the term, min-issue #locate metadata here. # unzip -z filename.cbz will show the comment field of the zip which contains the metadata. #self.importResults() valreturn.append({"somevalue": 'self.ie', "result": 'success'}) return queue.put(valreturn)
def get_artwork_from_cache(self, ComicID=None, imageURL=None): """ Pass a comicvine id to this function (either ComicID or IssueID) """ self.query_type = "artwork" if ComicID: self.id = ComicID self.id_type = "comic" else: self.id = IssueID self.id_type = "issue" if self._exists("artwork") and self._is_current(filename=self.artwork_files[0]): return self.artwork_files[0] else: # we already have the image for the comic in the sql db. Simply retrieve it, and save it. image_url = imageURL logger.debug("Retrieving comic image from: " + image_url) try: artwork = urllib2.urlopen(image_url, timeout=20).read() except Exception, e: logger.error('Unable to open url "' + image_url + '". Error: ' + str(e)) artwork = None if artwork: # Make sure the artwork dir exists: if not os.path.isdir(self.path_to_art_cache): try: os.makedirs(self.path_to_art_cache) except Exception, e: logger.error("Unable to create artwork cache dir. Error: " + str(e)) self.artwork_errors = True self.artwork_url = image_url # Delete the old stuff for artwork_file in self.artwork_files: try: os.remove(artwork_file) except: logger.error("Error deleting file from the cache: " + artwork_file) ext = os.path.splitext(image_url)[1] artwork_path = os.path.join(self.path_to_art_cache, self.id + "." + helpers.today() + ext) try: f = open(artwork_path, "wb") f.write(artwork) f.close() except Exception, e: logger.error("Unable to write to the cache dir: " + str(e)) self.artwork_errors = True self.artwork_url = image_url
def _is_current(self, filename=None, date=None): if filename: base_filename = os.path.basename(filename) date = base_filename.split(".")[1] # Calculate how old the cached file is based on todays date & file date stamp # helpers.today() returns todays date in yyyy-mm-dd format if self._get_age(helpers.today()) - self._get_age(date) < 30: return True else: return False
def _is_current(self, filename=None, date=None): if filename: base_filename = os.path.basename(filename) date = base_filename.split('.')[1] # Calculate how old the cached file is based on todays date & file date stamp # helpers.today() returns todays date in yyyy-mm-dd format if self._get_age(helpers.today()) - self._get_age(date) < 30: return True else: return False
def markissues(self, action=None, **args): myDB = db.DBConnection() if action == 'WantedNew': newaction = 'Wanted' else: newaction = action for IssueID in args: if IssueID is None: break print("IssueID:" + IssueID) mi = myDB.action("SELECT * FROM issues WHERE IssueID=?",[IssueID]).fetchone() miyr = myDB.action("SELECT ComicYear FROM comics WHERE ComicID=?", [mi['ComicID']]).fetchone() logger.info(u"Marking %s %s as %s" % (mi['ComicName'], mi['Issue_Number'], newaction)) controlValueDict = {"IssueID": mbid} newValueDict = {"Status": newaction} myDB.upsert("issues", newValueDict, controlValueDict) if action == 'Skipped': pass elif action == 'Wanted': foundcoms = search.search_init(mi['ComicName'], mi['Issue_Number'], mi['IssueDate'][:4], miyr['ComicYear']) #searcher.searchforissue(mbid, new=False) elif action == 'WantedNew': foundcoms = search.search_init(mi['ComicName'], mi['Issue_Number'], mi['IssueDate'][:4], miyr['ComicYear']) #searcher.searchforissue(mbid, new=True) if foundcoms == "yes": logger.info(u"Found " + mi['ComicName'] + " issue: " + mi['Issue_Number'] + " ! Marking as Snatched...") # file check to see if issue exists and update 'have' count if IssueID is not None: ComicID = mi['ComicID'] print ("ComicID: " + str(ComicID)) comic = myDB.action('SELECT * FROM comics WHERE ComicID=?', [ComicID]).fetchone() print ("comic location: " + comic['ComicLocation']) #fc = filechecker.listFiles(comic['ComicLocation'], mi['ComicName']) #HaveDict = {'ComicID': ComicID} #newHave = { 'Have': fc['comiccount'] } #myDB.upsert("comics", newHave, HaveDict) controlValueDict = {'IssueID': IssueID} newValueDict = {'Status': 'Snatched'} myDB.upsert("issues", newValueDict, controlValueDict) snatchedupdate = {"IssueID": IssueID} newsnatchValues = {"ComicName": mi['ComicName'], "ComicID": ComicID, "Issue_Number": mi['Issue_Number'], "DateAdded": helpers.today(), "Status": "Snatched" } myDB.upsert("snatched", newsnatchValues, snatchedupdate) else: logger.info(u"Couldn't find " + mi['ComicName'] + " issue: " + mi['Issue_Number'] + " ! Status still wanted...") if ComicID: raise cherrypy.HTTPRedirect("artistPage?ComicID=%s" % ComicID) else: raise cherrypy.HTTPRedirect("upcoming")
def scanLibrary(scan=None, queue=None): valreturn = [] if scan: try: soma, noids = libraryScan() except Exception, e: logger.error('Unable to complete the scan: %s' % e) return if soma == "Completed": logger.info('Sucessfully completed import.') else: logger.info('Starting mass importing...' + str(noids) + ' records.') #this is what it should do... #store soma (the list of comic_details from importing) into sql table so import can be whenever #display webpage showing results #allow user to select comic to add (one at a time) #call addComic off of the webpage to initiate the add. #return to result page to finish or continue adding. #.... #threading.Thread(target=self.searchit).start() #threadthis = threadit.ThreadUrl() #result = threadthis.main(soma) myDB = db.DBConnection() sl = 0 logger.fdebug("number of records: " + str(noids)) while (sl < int(noids)): soma_sl = soma['comic_info'][sl] logger.fdebug("soma_sl: " + str(soma_sl)) logger.fdebug("comicname: " + soma_sl['comicname'].encode('utf-8')) logger.fdebug("filename: " + soma_sl['comfilename'].encode('utf-8')) controlValue = {"impID": soma_sl['impid']} newValue = {"ComicYear": soma_sl['comicyear'], "Status": "Not Imported", "ComicName": soma_sl['comicname'].encode('utf-8'), "DisplayName": soma_sl['displayname'].encode('utf-8'), "ComicFilename": soma_sl['comfilename'].encode('utf-8'), "ComicLocation": soma_sl['comlocation'].encode('utf-8'), "ImportDate": helpers.today(), "WatchMatch": soma_sl['watchmatch']} myDB.upsert("importresults", newValue, controlValue) sl+=1 # because we could be adding volumes/series that span years, we need to account for this # add the year to the db under the term, valid-years # add the issue to the db under the term, min-issue #locate metadata here. # unzip -z filename.cbz will show the comment field of the zip which contains the metadata. #self.importResults() valreturn.append({"somevalue": 'self.ie', "result": 'success'}) return queue.put(valreturn)
def comicScan(self, path, scan=0, redirect=None, autoadd=0, libraryscan=0, imp_move=0, imp_rename=0): mylar.LIBRARYSCAN = libraryscan mylar.ADD_COMICS = autoadd mylar.COMIC_DIR = path mylar.IMP_MOVE = imp_move mylar.IMP_RENAME = imp_rename mylar.config_write() if scan: try: soma = librarysync.libraryScan() except Exception, e: logger.error('Unable to complete the scan: %s' % e) if soma == "Completed": print ("sucessfully completed import.") else: logger.info(u"Starting mass importing...") #this is what it should do... #store soma (the list of comic_details from importing) into sql table so import can be whenever #display webpage showing results #allow user to select comic to add (one at a time) #call addComic off of the webpage to initiate the add. #return to result page to finish or continue adding. #.... #threading.Thread(target=self.searchit).start() #threadthis = threadit.ThreadUrl() #result = threadthis.main(soma) myDB = db.DBConnection() sl = 0 while (sl < len(soma)): soma_sl = soma['comic_info'][sl] print ("cname: " + soma_sl['comicname']) controlValue = {"ComicName": soma_sl['comicname']} newValue = {"ComicYear": soma_sl['comicyear'], "Status": "Not Imported", "ImportDate": helpers.today()} myDB.upsert("importresults", newValue, controlValue) sl+=1 self.importResults()
def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None): # Putting this here to get around the circular import. Will try to use this to update images at later date. # from mylar import cache myDB = db.DBConnection() # We need the current minimal info in the database instantly # so we don't throw a 500 error when we redirect to the artistPage controlValueDict = {"ComicID": comicid} dbcomic = myDB.action('SELECT * FROM comics WHERE ComicID=?', [comicid]).fetchone() if dbcomic is None: newValueDict = {"ComicName": "Comic ID: %s" % (comicid), "Status": "Loading"} comlocation = None oldcomversion = None else: newValueDict = {"Status": "Loading"} comlocation = dbcomic['ComicLocation'] filechecker.validateAndCreateDirectory(comlocation, True) oldcomversion = dbcomic['ComicVersion'] #store the comicversion and chk if it exists before hammering. myDB.upsert("comics", newValueDict, controlValueDict) #run the re-sortorder here in order to properly display the page if pullupd is None: helpers.ComicSort(comicorder=mylar.COMICSORT, imported=comicid) # we need to lookup the info for the requested ComicID in full now comic = cv.getComic(comicid,'comic') #comic = myDB.action('SELECT * FROM comics WHERE ComicID=?', [comicid]).fetchone() if not comic: logger.warn("Error fetching comic. ID for : " + comicid) if dbcomic is None: newValueDict = {"ComicName": "Fetch failed, try refreshing. (%s)" % (comicid), "Status": "Active"} else: newValueDict = {"Status": "Active"} myDB.upsert("comics", newValueDict, controlValueDict) return if comic['ComicName'].startswith('The '): sortname = comic['ComicName'][4:] else: sortname = comic['ComicName'] logger.info(u"Now adding/updating: " + comic['ComicName']) #--Now that we know ComicName, let's try some scraping #--Start # gcd will return issue details (most importantly publishing date) if not mylar.CV_ONLY: if mismatch == "no" or mismatch is None: gcdinfo=parseit.GCDScraper(comic['ComicName'], comic['ComicYear'], comic['ComicIssues'], comicid) #print ("gcdinfo: " + str(gcdinfo)) mismatch_com = "no" if gcdinfo == "No Match": updater.no_searchresults(comicid) nomatch = "true" logger.info(u"There was an error when trying to add " + comic['ComicName'] + " (" + comic['ComicYear'] + ")" ) return nomatch else: mismatch_com = "yes" #print ("gcdinfo:" + str(gcdinfo)) elif mismatch == "yes": CV_EXcomicid = myDB.action("SELECT * from exceptions WHERE ComicID=?", [comicid]).fetchone() if CV_EXcomicid['variloop'] is None: pass else: vari_loop = CV_EXcomicid['variloop'] NewComicID = CV_EXcomicid['NewComicID'] gcomicid = CV_EXcomicid['GComicID'] resultURL = "/series/" + str(NewComicID) + "/" #print ("variloop" + str(CV_EXcomicid['variloop'])) #if vari_loop == '99': gcdinfo = parseit.GCDdetails(comseries=None, resultURL=resultURL, vari_loop=0, ComicID=comicid, TotalIssues=0, issvariation="no", resultPublished=None) logger.info(u"Sucessfully retrieved details for " + comic['ComicName'] ) # print ("Series Published" + parseit.resultPublished) CV_NoYearGiven = "no" #if the SeriesYear returned by CV is blank or none (0000), let's use the gcd one. if comic['ComicYear'] is None or comic['ComicYear'] == '0000': if mylar.CV_ONLY: #we'll defer this until later when we grab all the issues and then figure it out logger.info("Uh-oh. I can't find a Series Year for this series. I'm going to try analyzing deeper.") SeriesYear = cv.getComic(comicid,'firstissue',comic['FirstIssueID']) if SeriesYear == '0000': logger.info("Ok - I couldn't find a Series Year at all. Loading in the issue data now and will figure out the Series Year.") CV_NoYearGiven = "yes" issued = cv.getComic(comicid,'issue') SeriesYear = issued['firstdate'][:4] else: SeriesYear = gcdinfo['SeriesYear'] else: SeriesYear = comic['ComicYear'] #let's do the Annual check here. if mylar.ANNUALS_ON: annualcomicname = re.sub('[\,\:]', '', comic['ComicName']) annuals = comicbookdb.cbdb(annualcomicname, SeriesYear) print ("Number of Annuals returned: " + str(annuals['totalissues'])) nb = 0 while (nb <= int(annuals['totalissues'])): try: annualval = annuals['annualslist'][nb] except IndexError: break newCtrl = {"IssueID": str(annualval['AnnualIssue'] + annualval['AnnualDate'])} newVals = {"Issue_Number": annualval['AnnualIssue'], "IssueDate": annualval['AnnualDate'], "IssueName": annualval['AnnualTitle'], "ComicID": comicid, "Status": "Skipped"} myDB.upsert("annuals", newVals, newCtrl) nb+=1 #parseit.annualCheck(gcomicid=gcdinfo['GCDComicID'], comicid=comicid, comicname=comic['ComicName'], comicyear=SeriesYear) #comic book location on machine # setup default location here if comlocation is None: # let's remove the non-standard characters here. u_comicnm = comic['ComicName'] u_comicname = u_comicnm.encode('ascii', 'ignore').strip() if ':' in u_comicname or '/' in u_comicname or ',' in u_comicname or '?' in u_comicname: comicdir = u_comicname if ':' in comicdir: comicdir = comicdir.replace(':','') if '/' in comicdir: comicdir = comicdir.replace('/','-') if ',' in comicdir: comicdir = comicdir.replace(',','') if '?' in comicdir: comicdir = comicdir.replace('?','') else: comicdir = u_comicname series = comicdir publisher = re.sub('!','',comic['ComicPublisher']) # thanks Boom! year = SeriesYear comversion = comic['ComicVersion'] if comversion is None: comversion = 'None' #if comversion is None, remove it so it doesn't populate with 'None' if comversion == 'None': chunk_f_f = re.sub('\$VolumeN','',mylar.FILE_FORMAT) chunk_f = re.compile(r'\s+') mylar.FILE_FORMAT = chunk_f.sub(' ', chunk_f_f) #do work to generate folder path values = {'$Series': series, '$Publisher': publisher, '$Year': year, '$series': series.lower(), '$publisher': publisher.lower(), '$VolumeY': 'V' + str(year), '$VolumeN': comversion } #print mylar.FOLDER_FORMAT #print 'working dir:' #print helpers.replace_all(mylar.FOLDER_FORMAT, values) if mylar.FOLDER_FORMAT == '': comlocation = mylar.DESTINATION_DIR + "/" + comicdir + " (" + SeriesYear + ")" else: comlocation = mylar.DESTINATION_DIR + "/" + helpers.replace_all(mylar.FOLDER_FORMAT, values) #comlocation = mylar.DESTINATION_DIR + "/" + comicdir + " (" + comic['ComicYear'] + ")" if mylar.DESTINATION_DIR == "": logger.error(u"There is no general directory specified - please specify in Config/Post-Processing.") return if mylar.REPLACE_SPACES: #mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot comlocation = comlocation.replace(' ', mylar.REPLACE_CHAR) #moved this out of the above loop so it will chk for existance of comlocation in case moved #if it doesn't exist - create it (otherwise will bugger up later on) if os.path.isdir(str(comlocation)): logger.info(u"Directory (" + str(comlocation) + ") already exists! Continuing...") else: #print ("Directory doesn't exist!") #try: # os.makedirs(str(comlocation)) # logger.info(u"Directory successfully created at: " + str(comlocation)) #except OSError: # logger.error(u"Could not create comicdir : " + str(comlocation)) filechecker.validateAndCreateDirectory(comlocation, True) #try to account for CV not updating new issues as fast as GCD #seems CV doesn't update total counts #comicIssues = gcdinfo['totalissues'] comicIssues = comic['ComicIssues'] if not mylar.CV_ONLY: if gcdinfo['gcdvariation'] == "cv": comicIssues = str(int(comic['ComicIssues']) + 1) #let's download the image... if os.path.exists(mylar.CACHE_DIR):pass else: #let's make the dir. try: os.makedirs(str(mylar.CACHE_DIR)) logger.info(u"Cache Directory successfully created at: " + str(mylar.CACHE_DIR)) except OSError: logger.error('Could not create cache dir. Check permissions of cache dir: ' + str(mylar.CACHE_DIR)) coverfile = os.path.join(mylar.CACHE_DIR, str(comicid) + ".jpg") #try: urllib.urlretrieve(str(comic['ComicImage']), str(coverfile)) try: with open(str(coverfile)) as f: ComicImage = os.path.join('cache',str(comicid) + ".jpg") #this is for Firefox when outside the LAN...it works, but I don't know how to implement it #without breaking the normal flow for inside the LAN (above) #ComicImage = "http://" + str(mylar.HTTP_HOST) + ":" + str(mylar.HTTP_PORT) + "/cache/" + str(comicid) + ".jpg" logger.info(u"Sucessfully retrieved cover for " + comic['ComicName']) #if the comic cover local is checked, save a cover.jpg to the series folder. if mylar.COMIC_COVER_LOCAL: comiclocal = os.path.join(str(comlocation) + "/cover.jpg") shutil.copy(ComicImage,comiclocal) except IOError as e: logger.error(u"Unable to save cover locally at this time.") if oldcomversion is None: if comic['ComicVersion'].isdigit(): comicVol = "v" + comic['ComicVersion'] else: comicVol = None else: comicVol = oldcomversion #for description ... #Cdesc = helpers.cleanhtml(comic['ComicDescription']) #cdes_find = Cdesc.find("Collected") #cdes_removed = Cdesc[:cdes_find] #print cdes_removed controlValueDict = {"ComicID": comicid} newValueDict = {"ComicName": comic['ComicName'], "ComicSortName": sortname, "ComicYear": SeriesYear, "ComicImage": ComicImage, "Total": comicIssues, "ComicVersion": comicVol, "ComicLocation": comlocation, "ComicPublisher": comic['ComicPublisher'], #"Description": Cdesc.decode('utf-8', 'replace'), "DetailURL": comic['ComicURL'], # "ComicPublished": gcdinfo['resultPublished'], "ComicPublished": 'Unknown', "DateAdded": helpers.today(), "Status": "Loading"} myDB.upsert("comics", newValueDict, controlValueDict) #comicsort here... #run the re-sortorder here in order to properly display the page if pullupd is None: helpers.ComicSort(sequence='update') if CV_NoYearGiven == 'no': #if set to 'no' then we haven't pulled down the issues, otherwise we did it already issued = cv.getComic(comicid,'issue') logger.info(u"Sucessfully retrieved issue details for " + comic['ComicName'] ) n = 0 iscnt = int(comicIssues) issid = [] issnum = [] issname = [] issdate = [] int_issnum = [] #let's start issue #'s at 0 -- thanks to DC for the new 52 reboot! :) latestiss = "0" latestdate = "0000-00-00" firstiss = "10000000" firstdate = "2099-00-00" #print ("total issues:" + str(iscnt)) #---removed NEW code here--- logger.info(u"Now adding/updating issues for " + comic['ComicName']) if not mylar.CV_ONLY: #fccnt = int(fc['comiccount']) #logger.info(u"Found " + str(fccnt) + "/" + str(iscnt) + " issues of " + comic['ComicName'] + "...verifying") #fcnew = [] if iscnt > 0: #if a series is brand new, it wont have any issues/details yet so skip this part while (n <= iscnt): #---NEW.code try: firstval = issued['issuechoice'][n] except IndexError: break cleanname = helpers.cleanName(firstval['Issue_Name']) issid = str(firstval['Issue_ID']) issnum = str(firstval['Issue_Number']) #print ("issnum: " + str(issnum)) issname = cleanname if '.' in str(issnum): issn_st = str(issnum).find('.') issn_b4dec = str(issnum)[:issn_st] #if the length of decimal is only 1 digit, assume it's a tenth dec_is = str(issnum)[issn_st + 1:] if len(dec_is) == 1: dec_nisval = int(dec_is) * 10 iss_naftdec = str(dec_nisval) if len(dec_is) == 2: dec_nisval = int(dec_is) iss_naftdec = str(dec_nisval) iss_issue = issn_b4dec + "." + iss_naftdec issis = (int(issn_b4dec) * 1000) + dec_nisval elif 'au' in issnum.lower(): print ("au detected") stau = issnum.lower().find('au') issnum_au = issnum[:stau] print ("issnum_au: " + str(issnum_au)) #account for Age of Ultron mucked up numbering issis = str(int(issnum_au) * 1000) + 'AU' else: issis = int(issnum) * 1000 bb = 0 while (bb <= iscnt): try: gcdval = gcdinfo['gcdchoice'][bb] #print ("gcdval: " + str(gcdval)) except IndexError: #account for gcd variation here if gcdinfo['gcdvariation'] == 'gcd': #logger.fdebug("gcd-variation accounted for.") issdate = '0000-00-00' int_issnum = int ( issis / 1000 ) break if 'nn' in str(gcdval['GCDIssue']): #no number detected - GN, TP or the like logger.warn(u"Non Series detected (Graphic Novel, etc) - cannot proceed at this time.") updater.no_searchresults(comicid) return elif 'au' in gcdval['GCDIssue'].lower(): #account for Age of Ultron mucked up numbering - this is in format of 5AU.00 gstau = gcdval['GCDIssue'].lower().find('au') gcdis_au = gcdval['GCDIssue'][:gstau] gcdis = str(int(gcdis_au) * 1000) + 'AU' elif '.' in str(gcdval['GCDIssue']): #logger.fdebug("g-issue:" + str(gcdval['GCDIssue'])) issst = str(gcdval['GCDIssue']).find('.') #logger.fdebug("issst:" + str(issst)) issb4dec = str(gcdval['GCDIssue'])[:issst] #logger.fdebug("issb4dec:" + str(issb4dec)) #if the length of decimal is only 1 digit, assume it's a tenth decis = str(gcdval['GCDIssue'])[issst+1:] #logger.fdebug("decis:" + str(decis)) if len(decis) == 1: decisval = int(decis) * 10 issaftdec = str(decisval) if len(decis) == 2: decisval = int(decis) issaftdec = str(decisval) gcd_issue = issb4dec + "." + issaftdec #logger.fdebug("gcd_issue:" + str(gcd_issue)) try: gcdis = (int(issb4dec) * 1000) + decisval except ValueError: logger.error("This has no issue #'s for me to get - Either a Graphic Novel or one-shot. This feature to allow these will be added in the near future.") updater.no_searchresults(comicid) return else: gcdis = int(str(gcdval['GCDIssue'])) * 1000 if gcdis == issis: issdate = str(gcdval['GCDDate']) if str(issis).isdigit(): int_issnum = int( gcdis / 1000 ) else: if 'au' in issis.lower(): int_issnum = str(int(gcdis[:-2]) / 1000) + 'AU' else: logger.error("this has an alpha-numeric in the issue # which I cannot account for. Get on github and log the issue for evilhero.") return #get the latest issue / date using the date. if gcdval['GCDDate'] > latestdate: latestiss = str(issnum) latestdate = str(gcdval['GCDDate']) break #bb = iscnt bb+=1 #print("(" + str(n) + ") IssueID: " + str(issid) + " IssueNo: " + str(issnum) + " Date" + str(issdate)) #---END.NEW. # check if the issue already exists iss_exists = myDB.action('SELECT * from issues WHERE IssueID=?', [issid]).fetchone() # Only change the status & add DateAdded if the issue is already in the database if iss_exists is None: newValueDict['DateAdded'] = helpers.today() controlValueDict = {"IssueID": issid} newValueDict = {"ComicID": comicid, "ComicName": comic['ComicName'], "IssueName": issname, "Issue_Number": issnum, "IssueDate": issdate, "Int_IssueNumber": int_issnum } if mylar.AUTOWANT_ALL: newValueDict['Status'] = "Wanted" elif issdate > helpers.today() and mylar.AUTOWANT_UPCOMING: newValueDict['Status'] = "Wanted" else: newValueDict['Status'] = "Skipped" if iss_exists: #print ("Existing status : " + str(iss_exists['Status'])) newValueDict['Status'] = iss_exists['Status'] try: myDB.upsert("issues", newValueDict, controlValueDict) except sqlite3.InterfaceError, e: #raise sqlite3.InterfaceError(e) logger.error("MAJOR error trying to get issue data, this is most likey a MULTI-VOLUME series and you need to use the custom_exceptions.csv file.") myDB.action("DELETE FROM comics WHERE ComicID=?", [comicid]) return n+=1
def get_artwork_from_cache(self, ComicID=None, imageURL=None): ''' Pass a comicvine id to this function (either ComicID or IssueID) ''' self.query_type = 'artwork' if ComicID: self.id = ComicID self.id_type = 'comic' else: self.id = IssueID self.id_type = 'issue' if self._exists('artwork') and self._is_current( filename=self.artwork_files[0]): return self.artwork_files[0] else: # we already have the image for the comic in the sql db. Simply retrieve it, and save it. image_url = imageURL logger.debug('Retrieving comic image from: ' + image_url) try: artwork = urllib2.urlopen(image_url, timeout=20).read() except Exception, e: logger.error('Unable to open url "' + image_url + '". Error: ' + str(e)) artwork = None if artwork: # Make sure the artwork dir exists: if not os.path.isdir(self.path_to_art_cache): try: os.makedirs(self.path_to_art_cache) except Exception, e: logger.error( 'Unable to create artwork cache dir. Error: ' + str(e)) self.artwork_errors = True self.artwork_url = image_url #Delete the old stuff for artwork_file in self.artwork_files: try: os.remove(artwork_file) except: logger.error('Error deleting file from the cache: ' + artwork_file) ext = os.path.splitext(image_url)[1] artwork_path = os.path.join( self.path_to_art_cache, self.id + '.' + helpers.today() + ext) try: f = open(artwork_path, 'wb') f.write(artwork) f.close() except Exception, e: logger.error('Unable to write to the cache dir: ' + str(e)) self.artwork_errors = True self.artwork_url = image_url
def addtoreadlist(self): annualize = False myDB = db.DBConnection() readlist = myDB.selectone("SELECT * from issues where IssueID=?", [self.IssueID]).fetchone() if readlist is None: logger.fdebug(self.module + ' Checking against annuals..') readlist = myDB.selectone("SELECT * from annuals where IssueID=?", [self.IssueID]).fetchone() if readlist is None: logger.error(self.module + ' Cannot locate IssueID - aborting..') return else: logger.fdebug('%s Successfully found annual for %s' % (self.module, readlist['ComicID'])) annualize = True comicinfo = myDB.selectone("SELECT * from comics where ComicID=?", [readlist['ComicID']]).fetchone() logger.info(self.module + ' Attempting to add issueid ' + readlist['IssueID']) if comicinfo is None: logger.info( self.module + ' Issue not located on your current watchlist. I should probably check story-arcs but I do not have that capability just yet.' ) else: locpath = None if mylar.CONFIG.MULTIPLE_DEST_DIRS is not None and mylar.CONFIG.MULTIPLE_DEST_DIRS != 'None' and os.path.join( mylar.CONFIG.MULTIPLE_DEST_DIRS, os.path.basename(comicinfo['ComicLocation']) ) != comicinfo['ComicLocation']: pathdir = os.path.join( mylar.CONFIG.MULTIPLE_DEST_DIRS, os.path.basename(comicinfo['ComicLocation'])) if os.path.exists(os.path.join(pathdir, readlist['Location'])): locpath = os.path.join(pathdir, readlist['Location']) else: if os.path.exists( os.path.join(comicinfo['ComicLocation'], readlist['Location'])): locpath = os.path.join(comicinfo['ComicLocation'], readlist['Location']) else: if os.path.exists( os.path.join(comicinfo['ComicLocation'], readlist['Location'])): locpath = os.path.join(comicinfo['ComicLocation'], readlist['Location']) if not locpath is None: comicissue = readlist['Issue_Number'] if annualize is True: comicname = readlist['ReleaseComicName'] else: comicname = comicinfo['ComicName'] dspinfo = comicname + ' #' + comicissue if annualize is True: if mylar.CONFIG.ANNUALS_ON is True: dspinfo = comicname + ' #' + readlist['Issue_Number'] if 'annual' in comicname.lower(): comicissue = 'Annual ' + readlist['Issue_Number'] elif 'special' in comicname.lower(): comicissue = 'Special ' + readlist['Issue_Number'] ctrlval = {"IssueID": self.IssueID} newval = { "DateAdded": helpers.today(), "Status": "Added", "ComicID": readlist['ComicID'], "Issue_Number": comicissue, "IssueDate": readlist['IssueDate'], "SeriesYear": comicinfo['ComicYear'], "ComicName": comicname, "Location": locpath } myDB.upsert("readlist", newval, ctrlval) logger.info(self.module + ' Added ' + dspinfo + ' to the Reading list.') return
def syncreading(self): #3 status' exist for the readlist. # Added (Not Read) - Issue is added to the readlist and is awaiting to be 'sent' to your reading client. # Read - Issue has been read # Not Read - Issue has been downloaded to your reading client after the syncfiles has taken place. module = '[READLIST-TRANSFER]' myDB = db.DBConnection() readlist = [] cidlist = [] sendlist = [] if self.filelist is None: rl = myDB.select( 'SELECT issues.IssueID, comics.ComicID, comics.ComicLocation, issues.Location FROM readlist LEFT JOIN issues ON issues.IssueID = readlist.IssueID LEFT JOIN comics on comics.ComicID = issues.ComicID WHERE readlist.Status="Added"' ) if rl is None: logger.info( module + ' No issues have been marked to be synced. Aborting syncfiles' ) return for rlist in rl: readlist.append({ "filepath": os.path.join(rlist['ComicLocation'], rlist['Location']), "issueid": rlist['IssueID'], "comicid": rlist['ComicID'] }) else: readlist = self.filelist if len(readlist) > 0: for clist in readlist: if clist['filepath'] == 'None' or clist['filepath'] is None: logger.warn( module + ' There was a problem with ComicID/IssueID: [' + clist['comicid'] + '/' + clist['issueid'] + ']. I cannot locate the file in the given location (try re-adding to your readlist)[' + clist['filepath'] + ']') continue else: # multiplecid = False # for x in cidlist: # if clist['comicid'] == x['comicid']: # comicid = x['comicid'] # comiclocation = x['location'] # multiplecid = True # if multiplecid == False: # cid = myDB.selectone("SELECT * FROM comics WHERE ComicID=?", [clist['comicid']]).fetchone() # if cid is None: # continue # else: # comiclocation = cid['ComicLocation'] # comicid = cid['ComicID'] # if mylar.CONFIG.MULTIPLE_DEST_DIRS is not None and mylar.CONFIG.MULTIPLE_DEST_DIRS != 'None' and os.path.join(mylar.CONFIG.MULTIPLE_DEST_DIRS, os.path.basename(comiclocation)) != comiclocation: # logger.fdebug(module + ' Multiple_dest_dirs:' + mylar.CONFIG.MULTIPLE_DEST_DIRS) # logger.fdebug(module + ' Dir: ' + comiclocation) # logger.fdebug(module + ' Os.path.basename: ' + os.path.basename(comiclocation)) # pathdir = os.path.join(mylar.CONFIG.MULTIPLE_DEST_DIRS, os.path.basename(comiclocation)) if os.path.exists(clist['filepath']): sendlist.append({ "issueid": clist['issueid'], "filepath": clist['filepath'], "filename": os.path.split(clist['filepath'])[1] }) # else: # if os.path.exists(os.path.join(comiclocation, clist['filename'])): # sendlist.append({"issueid": clist['issueid'], # "filepath": comiclocation, # "filename": clist['filename']}) # else: # if os.path.exists(os.path.join(comiclocation, clist['filename'])): # sendlist.append({"issueid": clist['issueid'], # "filepath": comiclocation, # "filename": clist['filename']}) else: logger.warn( module + ' ' + clist['filepath'] + ' does not exist in the given location. Remove from the Reading List and Re-add and/or confirm the file exists in the specified location' ) continue # #cidlist is just for this reference loop to not make unnecessary db calls if the comicid has already been processed. # cidlist.append({"comicid": clist['comicid'], # "issueid": clist['issueid'], # "location": comiclocation}) #store the comicid so we don't make multiple sql requests if len(sendlist) == 0: logger.info(module + ' Nothing to send from your readlist') return logger.info(module + ' ' + str(len(sendlist)) + ' issues will be sent to your reading device.') # test if IP is up. import shlex import subprocess #fhost = mylar.CONFIG.TAB_HOST.find(':') host = mylar.CONFIG.TAB_HOST[:mylar.CONFIG.TAB_HOST.find(':')] if 'windows' not in mylar.OS_DETECT.lower(): cmdstring = str('ping -c1 ' + str(host)) else: cmdstring = str('ping -n 1 ' + str(host)) cmd = shlex.split(cmdstring) try: output = subprocess.check_output(cmd) except subprocess.CalledProcessError as e: logger.info(module + ' The host {0} is not Reachable at this time.'. format(cmd[-1])) return else: if 'unreachable' in output: logger.info(module + ' The host {0} is not Reachable at this time.'. format(cmd[-1])) return else: logger.info( module + ' The host {0} is Reachable. Preparing to send files.'. format(cmd[-1])) success = mylar.ftpsshup.sendfiles(sendlist) if success == 'fail': return if len(success) > 0: for succ in success: newCTRL = {"issueid": succ['issueid']} newVAL = { "Status": 'Downloaded', "StatusChange": helpers.today() } myDB.upsert("readlist", newVAL, newCTRL)
def GCDimport(gcomicid, pullupd=None, imported=None, ogcname=None): # this is for importing via GCD only and not using CV. # used when volume spanning is discovered for a Comic (and can't be added using CV). # Issue Counts are wrong (and can't be added). # because Comicvine ComicID and GCD ComicID could be identical at some random point, let's distinguish. # CV = comicid, GCD = gcomicid :) (ie. CV=2740, GCD=G3719) gcdcomicid = gcomicid myDB = db.DBConnection() # We need the current minimal info in the database instantly # so we don't throw a 500 error when we redirect to the artistPage controlValueDict = {"ComicID": gcdcomicid} comic = myDB.action( 'SELECT ComicName, ComicYear, Total, ComicPublished, ComicImage, ComicLocation, ComicPublisher FROM comics WHERE ComicID=?', [gcomicid]).fetchone() ComicName = comic[0] ComicYear = comic[1] ComicIssues = comic[2] ComicPublished = comic[3] comlocation = comic[5] ComicPublisher = comic[6] #ComicImage = comic[4] #print ("Comic:" + str(ComicName)) newValueDict = {"Status": "Loading"} myDB.upsert("comics", newValueDict, controlValueDict) # we need to lookup the info for the requested ComicID in full now #comic = cv.getComic(comicid,'comic') if not comic: logger.warn("Error fetching comic. ID for : " + gcdcomicid) if dbcomic is None: newValueDict = { "ComicName": "Fetch failed, try refreshing. (%s)" % (gcdcomicid), "Status": "Active" } else: newValueDict = {"Status": "Active"} myDB.upsert("comics", newValueDict, controlValueDict) return #run the re-sortorder here in order to properly display the page if pullupd is None: helpers.ComicSort(comicorder=mylar.COMICSORT, imported=gcomicid) if ComicName.startswith('The '): sortname = ComicName[4:] else: sortname = ComicName logger.info(u"Now adding/updating: " + ComicName) #--Now that we know ComicName, let's try some scraping #--Start # gcd will return issue details (most importantly publishing date) comicid = gcomicid[1:] resultURL = "/series/" + str(comicid) + "/" gcdinfo = parseit.GCDdetails(comseries=None, resultURL=resultURL, vari_loop=0, ComicID=gcdcomicid, TotalIssues=ComicIssues, issvariation=None, resultPublished=None) if gcdinfo == "No Match": logger.warn("No matching result found for " + ComicName + " (" + ComicYear + ")") updater.no_searchresults(gcomicid) nomatch = "true" return nomatch logger.info(u"Sucessfully retrieved details for " + ComicName) # print ("Series Published" + parseit.resultPublished) #--End ComicImage = gcdinfo['ComicImage'] #comic book location on machine # setup default location here if comlocation is None: # let's remove the non-standard characters here. u_comicnm = ComicName u_comicname = u_comicnm.encode('ascii', 'ignore').strip() if ':' in u_comicname or '/' in u_comicname or ',' in u_comicname: comicdir = u_comicname if ':' in comicdir: comicdir = comicdir.replace(':', '') if '/' in comicdir: comicdir = comicdir.replace('/', '-') if ',' in comicdir: comicdir = comicdir.replace(',', '') else: comicdir = u_comicname series = comicdir publisher = ComicPublisher year = ComicYear #do work to generate folder path values = { '$Series': series, '$Publisher': publisher, '$Year': year, '$series': series.lower(), '$publisher': publisher.lower(), '$Volume': year } if mylar.FOLDER_FORMAT == '': comlocation = mylar.DESTINATION_DIR + "/" + comicdir + " (" + comic[ 'ComicYear'] + ")" else: comlocation = mylar.DESTINATION_DIR + "/" + helpers.replace_all( mylar.FOLDER_FORMAT, values) #comlocation = mylar.DESTINATION_DIR + "/" + comicdir + " (" + ComicYear + ")" if mylar.DESTINATION_DIR == "": logger.error( u"There is no general directory specified - please specify in Config/Post-Processing." ) return if mylar.REPLACE_SPACES: #mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot comlocation = comlocation.replace(' ', mylar.REPLACE_CHAR) #if it doesn't exist - create it (otherwise will bugger up later on) if os.path.isdir(str(comlocation)): logger.info(u"Directory (" + str(comlocation) + ") already exists! Continuing...") else: #print ("Directory doesn't exist!") #try: # os.makedirs(str(comlocation)) # logger.info(u"Directory successfully created at: " + str(comlocation)) #except OSError: # logger.error(u"Could not create comicdir : " + str(comlocation)) filechecker.validateAndCreateDirectory(comlocation, True) comicIssues = gcdinfo['totalissues'] #let's download the image... if os.path.exists(mylar.CACHE_DIR): pass else: #let's make the dir. try: os.makedirs(str(mylar.CACHE_DIR)) logger.info(u"Cache Directory successfully created at: " + str(mylar.CACHE_DIR)) except OSError: logger.error(u"Could not create cache dir : " + str(mylar.CACHE_DIR)) coverfile = os.path.join(mylar.CACHE_DIR, str(gcomicid) + ".jpg") #try: urllib.urlretrieve(str(ComicImage), str(coverfile)) try: with open(str(coverfile)) as f: ComicImage = os.path.join('cache', str(gcomicid) + ".jpg") #this is for Firefox when outside the LAN...it works, but I don't know how to implement it #without breaking the normal flow for inside the LAN (above) #ComicImage = "http://" + str(mylar.HTTP_HOST) + ":" + str(mylar.HTTP_PORT) + "/cache/" + str(comi$ logger.info(u"Sucessfully retrieved cover for " + ComicName) #if the comic cover local is checked, save a cover.jpg to the series folder. if mylar.COMIC_COVER_LOCAL: comiclocal = os.path.join(str(comlocation) + "/cover.jpg") shutil.copy(ComicImage, comiclocal) except IOError as e: logger.error(u"Unable to save cover locally at this time.") #if comic['ComicVersion'].isdigit(): # comicVol = "v" + comic['ComicVersion'] #else: # comicVol = None controlValueDict = {"ComicID": gcomicid} newValueDict = { "ComicName": ComicName, "ComicSortName": sortname, "ComicYear": ComicYear, "Total": comicIssues, "ComicLocation": comlocation, #"ComicVersion": comicVol, "ComicImage": ComicImage, #"ComicPublisher": comic['ComicPublisher'], #"ComicPublished": comicPublished, "DateAdded": helpers.today(), "Status": "Loading" } myDB.upsert("comics", newValueDict, controlValueDict) #comicsort here... #run the re-sortorder here in order to properly display the page if pullupd is None: helpers.ComicSort(sequence='update') logger.info(u"Sucessfully retrieved issue details for " + ComicName) n = 0 iscnt = int(comicIssues) issnum = [] issname = [] issdate = [] int_issnum = [] #let's start issue #'s at 0 -- thanks to DC for the new 52 reboot! :) latestiss = "0" latestdate = "0000-00-00" #print ("total issues:" + str(iscnt)) #---removed NEW code here--- logger.info(u"Now adding/updating issues for " + ComicName) bb = 0 while (bb <= iscnt): #---NEW.code try: gcdval = gcdinfo['gcdchoice'][bb] #print ("gcdval: " + str(gcdval)) except IndexError: #account for gcd variation here if gcdinfo['gcdvariation'] == 'gcd': #print ("gcd-variation accounted for.") issdate = '0000-00-00' int_issnum = int(issis / 1000) break if 'nn' in str(gcdval['GCDIssue']): #no number detected - GN, TP or the like logger.warn( u"Non Series detected (Graphic Novel, etc) - cannot proceed at this time." ) updater.no_searchresults(comicid) return elif '.' in str(gcdval['GCDIssue']): issst = str(gcdval['GCDIssue']).find('.') issb4dec = str(gcdval['GCDIssue'])[:issst] #if the length of decimal is only 1 digit, assume it's a tenth decis = str(gcdval['GCDIssue'])[issst + 1:] if len(decis) == 1: decisval = int(decis) * 10 issaftdec = str(decisval) if len(decis) == 2: decisval = int(decis) issaftdec = str(decisval) if int(issaftdec) == 0: issaftdec = "00" gcd_issue = issb4dec + "." + issaftdec gcdis = (int(issb4dec) * 1000) + decisval else: gcdis = int(str(gcdval['GCDIssue'])) * 1000 gcd_issue = str(gcdval['GCDIssue']) #get the latest issue / date using the date. int_issnum = int(gcdis / 1000) issdate = str(gcdval['GCDDate']) issid = "G" + str(gcdval['IssueID']) if gcdval['GCDDate'] > latestdate: latestiss = str(gcd_issue) latestdate = str(gcdval['GCDDate']) #print("(" + str(bb) + ") IssueID: " + str(issid) + " IssueNo: " + str(gcd_issue) + " Date" + str(issdate) ) #---END.NEW. # check if the issue already exists iss_exists = myDB.action('SELECT * from issues WHERE IssueID=?', [issid]).fetchone() # Only change the status & add DateAdded if the issue is not already in the database if iss_exists is None: newValueDict['DateAdded'] = helpers.today() #adjust for inconsistencies in GCD date format - some dates have ? which borks up things. if "?" in str(issdate): issdate = "0000-00-00" controlValueDict = {"IssueID": issid} newValueDict = { "ComicID": gcomicid, "ComicName": ComicName, "Issue_Number": gcd_issue, "IssueDate": issdate, "Int_IssueNumber": int_issnum } #print ("issueid:" + str(controlValueDict)) #print ("values:" + str(newValueDict)) if mylar.AUTOWANT_ALL: newValueDict['Status'] = "Wanted" elif issdate > helpers.today() and mylar.AUTOWANT_UPCOMING: newValueDict['Status'] = "Wanted" else: newValueDict['Status'] = "Skipped" if iss_exists: #print ("Existing status : " + str(iss_exists['Status'])) newValueDict['Status'] = iss_exists['Status'] myDB.upsert("issues", newValueDict, controlValueDict) bb += 1 # logger.debug(u"Updating comic cache for " + ComicName) # cache.getThumb(ComicID=issue['issueid']) # logger.debug(u"Updating cache for: " + ComicName) # cache.getThumb(ComicIDcomicid) controlValueStat = {"ComicID": gcomicid} newValueStat = { "Status": "Active", "LatestIssue": latestiss, "LatestDate": latestdate, "LastUpdated": helpers.now() } myDB.upsert("comics", newValueStat, controlValueStat) if mylar.CVINFO: if not os.path.exists(comlocation + "/cvinfo"): with open(comlocation + "/cvinfo", "w") as text_file: text_file.write("http://www.comicvine.com/volume/49-" + str(comicid)) logger.info(u"Updating complete for: " + ComicName) #move the files...if imported is not empty (meaning it's not from the mass importer.) if imported is None or imported == 'None': pass else: if mylar.IMP_MOVE: logger.info("Mass import - Move files") moveit.movefiles(gcomicid, comlocation, ogcname) else: logger.info( "Mass import - Moving not Enabled. Setting Archived Status for import." ) moveit.archivefiles(gcomicid, ogcname) #check for existing files... updater.forceRescan(gcomicid) if pullupd is None: # lets' check the pullist for anyting at this time as well since we're here. if mylar.AUTOWANT_UPCOMING and 'Present' in ComicPublished: logger.info(u"Checking this week's pullist for new issues of " + ComicName) updater.newpullcheck(comic['ComicName'], gcomicid) #here we grab issues that have been marked as wanted above... results = myDB.select( "SELECT * FROM issues where ComicID=? AND Status='Wanted'", [gcomicid]) if results: logger.info(u"Attempting to grab wanted issues for : " + ComicName) for result in results: foundNZB = "none" if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL or mylar.NEWZNAB or mylar.NZBX) and (mylar.SAB_HOST): foundNZB = search.searchforissue(result['IssueID']) if foundNZB == "yes": updater.foundsearch(result['ComicID'], result['IssueID']) else: logger.info(u"No issues marked as wanted for " + ComicName) logger.info(u"Finished grabbing what I could.")
def addComictoDB(comicid): # Putting this here to get around the circular import. Will try to use this to update images at later date. from mylar import cache myDB = db.DBConnection() # myDB.action('DELETE from blacklist WHERE ComicID=?', [comicid]) # We need the current minimal info in the database instantly # so we don't throw a 500 error when we redirect to the artistPage controlValueDict = {"ComicID": comicid} dbcomic = myDB.action('SELECT * FROM comics WHERE ComicID=?', [comicid]).fetchone() if dbcomic is None: newValueDict = {"ComicName": "Comic ID: %s" % (comicid), "Status": "Loading"} else: newValueDict = {"Status": "Loading"} myDB.upsert("comics", newValueDict, controlValueDict) # we need to lookup the info for the requested ComicID in full now comic = cv.getComic(comicid,'comic') if not comic: logger.warn("Error fetching comic. ID for : " + comicid) if dbcomic is None: newValueDict = {"ComicName": "Fetch failed, try refreshing. (%s)" % (comicid), "Status": "Active"} else: newValueDict = {"Status": "Active"} myDB.upsert("comics", newValueDict, controlValueDict) return if comic['ComicName'].startswith('The '): sortname = comic['ComicName'][4:] else: sortname = comic['ComicName'] logger.info(u"Now adding/updating: " + comic['ComicName']) #--Now that we know ComicName, let's try some scraping #--Start # gcd will return issue details (most importantly publishing date) gcdinfo=parseit.GCDScraper(comic['ComicName'], comic['ComicYear'], comic['ComicIssues'], comicid) if gcdinfo == "No Match": logger.warn("No matching result found for " + comic['ComicName'] + " (" + comic['ComicYear'] + ")" ) return logger.info(u"Sucessfully retrieved details for " + comic['ComicName'] ) # print ("Series Published" + parseit.resultPublished) #--End #comic book location on machine # setup default location here comlocation = mylar.DESTINATION_DIR + "/" + comic['ComicName'] + " (" + comic['ComicYear'] + ")" #if mylar.REPLACE_SPACES == "yes": #mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot mylarREPLACE_CHAR = '_' comlocation = comlocation.replace(' ', mylarREPLACE_CHAR) #if it doesn't exist - create it (otherwise will bugger up later on) if os.path.isdir(str(comlocation)): logger.info(u"Directory (" + str(comlocation) + ") already exists! Continuing...") else: #print ("Directory doesn't exist!") try: os.makedirs(str(comlocation)) logger.info(u"Directory successfully created at: " + str(comlocation)) except OSError.e: if e.errno != errno.EEXIST: raise #print ("root dir for series: " + comlocation) #try to account for CV not updating new issues as fast as GCD if gcdinfo['gcdvariation'] == "yes": comicIssues = str(int(comic['ComicIssues']) + 1) else: comicIssues = comic['ComicIssues'] controlValueDict = {"ComicID": comicid} newValueDict = {"ComicName": comic['ComicName'], "ComicSortName": sortname, "ComicYear": comic['ComicYear'], "ComicImage": comic['ComicImage'], "Total": comicIssues, "Description": comic['ComicDesc'], "ComicLocation": comlocation, "ComicPublisher": comic['ComicPublisher'], "ComicPublished": parseit.resultPublished, "DateAdded": helpers.today(), "Status": "Loading"} myDB.upsert("comics", newValueDict, controlValueDict) issued = cv.getComic(comicid,'issue') logger.info(u"Sucessfully retrieved issue details for " + comic['ComicName'] ) n = 0 iscnt = int(comicIssues) issid = [] issnum = [] issname = [] issdate = [] int_issnum = [] #let's start issue #'s at 0 -- thanks to DC for the new 52 reboot! :) latestiss = "0" latestdate = "0000-00-00" while (n < iscnt): firstval = issued['issuechoice'][n] cleanname = helpers.cleanName(firstval['Issue_Name']) issid.append( str(firstval['Issue_ID']) ) issnum.append( str(firstval['Issue_Number']) ) issname.append(cleanname) bb = 0 while (bb < iscnt): gcdval = gcdinfo['gcdchoice'][bb] #print ("issuecompare: " + str(issnum[n])) #print ("issuecheck: " + str(gcdval['GCDIssue']) ) if str(gcdval['GCDIssue']) == str(issnum[n]): issdate.append( str(gcdval['GCDDate']) ) issnumchg = issnum[n].replace(".00", "") #print ("issnumchg" + str(issnumchg) + "...latestiss:" + str(latestiss)) int_issnum.append(int(issnumchg)) #get the latest issue / date using the date. if gcdval['GCDDate'] > latestdate: latestiss = str(issnumchg) latestdate = str(gcdval['GCDDate']) bb = iscnt bb+=1 #logger.info(u"IssueID: " + str(issid[n]) + " IssueNo: " + str(issnum[n]) + " Date" + str(issdate[n]) ) n+=1 latestiss = latestiss + ".00" #once again - thanks to the new 52 reboot...start n at 0. n = 0 logger.info(u"Now adding/updating issues for" + comic['ComicName']) # file check to see if issue exists logger.info(u"Checking directory for existing issues.") fc = filechecker.listFiles(dir=comlocation, watchcomic=comic['ComicName']) havefiles = 0 fccnt = int(fc['comiccount']) logger.info(u"Found " + str(fccnt) + " issues of " + comic['ComicName']) fcnew = [] while (n < iscnt): fn = 0 haveissue = "no" #print ("on issue " + str(int(n+1)) + " of " + str(iscnt) + " issues") # check if the issue already exists iss_exists = myDB.select('SELECT * from issues WHERE IssueID=?', [issid[n]]) #print ("checking issue: " + str(int_issnum[n])) # stupid way to do this, but check each issue against file-list in fc. while (fn < fccnt): tmpfc = fc['comiclist'][fn] #print (str(int_issnum[n]) + " against ... " + str(tmpfc['ComicFilename'])) temploc = tmpfc['ComicFilename'].replace('_', ' ') fcnew = shlex.split(str(temploc)) fcn = len(fcnew) som = 0 # this loop searches each word in the filename for a match. while (som < fcn): #print (fcnew[som]) #counts get buggered up when the issue is the last field in the filename - ie. '50.cbr' if ".cbr" in fcnew[som]: fcnew[som] = fcnew[som].replace(".cbr", "") elif ".cbz" in fcnew[som]: fcnew[som] = fcnew[som].replace(".cbz", "") if fcnew[som].isdigit(): #print ("digit detected") #good ol' 52 again.... if int(fcnew[som]) > 0: fcdigit = fcnew[som].lstrip('0') else: fcdigit = "0" #print ( "filename:" + str(int(fcnew[som])) + " - issue: " + str(int_issnum[n]) ) if int(fcdigit) == int_issnum[n]: #print ("matched") #print ("We have this issue - " + str(issnum[n]) + " at " + tmpfc['ComicFilename'] ) havefiles+=1 haveissue = "yes" isslocation = str(tmpfc['ComicFilename']) break #print ("failed word match on:" + str(fcnew[som]) + "..continuing next word") som+=1 #print (str(temploc) + " doesn't match anything...moving to next file.") fn+=1 if haveissue == "no": isslocation = "None" controlValueDict = {"IssueID": issid[n]} newValueDict = {"ComicID": comicid, "ComicName": comic['ComicName'], "IssueName": issname[n], "Issue_Number": issnum[n], "IssueDate": issdate[n], "Location": isslocation, "Int_IssueNumber": int_issnum[n] } # Only change the status & add DateAdded if the issue is not already in the database if not len(iss_exists): controlValueDict = {"IssueID": issid[n]} newValueDict['DateAdded'] = helpers.today() if haveissue == "no": if mylar.AUTOWANT_ALL: newValueDict['Status'] = "Wanted" #elif release_dict['releasedate'] > helpers.today() and mylar.AUTOWANT_UPCOMING: # newValueDict['Status'] = "Wanted" else: newValueDict['Status'] = "Skipped" elif haveissue == "yes": newValueDict['Status'] = "Downloaded" myDB.upsert("issues", newValueDict, controlValueDict) n+=1 # logger.debug(u"Updating comic cache for " + comic['ComicName']) # cache.getThumb(ComicID=issue['issueid']) # newValueDict['LastUpdated'] = helpers.now() # myDB.upsert("comics", newValueDict, controlValueDict) # logger.debug(u"Updating cache for: " + comic['ComicName']) # cache.getThumb(ComicIDcomicid) controlValueStat = {"ComicID": comicid} newValueStat = {"Status": "Active", "Have": havefiles, "LatestIssue": latestiss, "LatestDate": latestdate } myDB.upsert("comics", newValueStat, controlValueStat) logger.info(u"Updating complete for: " + comic['ComicName']) #here we grab issues that have been marked as wanted above... results = myDB.select("SELECT * FROM issues where ComicID=? AND Status='Wanted'", [comicid]) if results: logger.info(u"Attempting to grab wanted issues for : " + comic['ComicName']) for result in results: foundNZB = "none" if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL) and (mylar.SAB_HOST): foundNZB = search.searchforissue(result['IssueID']) if foundNZB == "yes": updater.foundsearch(result['ComicID'], result['IssueID']) else: logger.info(u"No issues marked as wanted for " + comic['ComicName']) logger.info(u"Finished grabbing what I could.")
def scanLibrary(scan=None, queue=None): mylar.IMPORT_FILES = 0 mylar.IMPORT_PARSED_COUNT = 0 valreturn = [] if scan: try: soma = libraryScan(queue=queue) except Exception as e: logger.error('[IMPORT] Unable to complete the scan: %s' % e) logger.error(traceback.format_exc()) mylar.IMPORT_STATUS = None valreturn.append({"somevalue": 'self.ie', "result": 'error'}) return queue.put(valreturn) if soma == "Completed": logger.info('[IMPORT] Sucessfully completed import.') elif soma == "Fail": mylar.IMPORT_STATUS = 'Failure' valreturn.append({"somevalue": 'self.ie', "result": 'error'}) return queue.put(valreturn) else: mylar.IMPORT_STATUS = 'Now adding the completed results to the DB.' logger.info('[IMPORT] Parsing/Reading of files completed!') logger.info('[IMPORT] Attempting to import ' + str(int(soma['import_cv_ids'] + soma['import_count'])) + ' files into your watchlist.') logger.info('[IMPORT-BREAKDOWN] Files with ComicIDs successfully extracted: ' + str(soma['import_cv_ids'])) logger.info('[IMPORT-BREAKDOWN] Files that had to be parsed: ' + str(soma['import_count'])) logger.info('[IMPORT-BREAKDOWN] Files that were unable to be parsed: ' + str(len(soma['failure_list']))) logger.info('[IMPORT-BREAKDOWN] Files that caused errors during the import: ' + str(len(soma['utter_failure_list']))) #logger.info('[IMPORT-BREAKDOWN] Failure Files: ' + str(soma['failure_list'])) myDB = db.DBConnection() #first we do the CV ones. if int(soma['import_cv_ids']) > 0: for i in soma['CV_import_comicids']: #we need to find the impid in the issueid_list as that holds the impid + other info abc = [x for x in soma['issueid_list'] if x['issueid'] == i['IssueID']] ghi = abc[0]['importinfo'] nspace_dynamicname = re.sub('[\|\s]', '', ghi['dynamicname'].lower()).strip() #these all have related ComicID/IssueID's...just add them as is. controlValue = {"impID": ghi['impid']} newValue = {"Status": "Not Imported", "ComicName": i['ComicName'], #helpers.conversion(i['ComicName']), "DisplayName": i['ComicName'], #helpers.conversion(i['ComicName']), "DynamicName": nspace_dynamicname, #helpers.conversion(nspace_dynamicname), "ComicID": i['ComicID'], "IssueID": i['IssueID'], "IssueNumber": i['Issue_Number'], #helpers.conversion(i['Issue_Number']), "Volume": ghi['volume'], "ComicYear": ghi['comicyear'], "ComicFilename": ghi['comfilename'], #helpers.conversion(ghi['comfilename']), "ComicLocation": ghi['comlocation'], #helpers.conversion(ghi['comlocation']), "ImportDate": helpers.today(), "WatchMatch": None} #i['watchmatch']} myDB.upsert("importresults", newValue, controlValue) if int(soma['import_count']) > 0: for ss in soma['import_by_comicids']: nspace_dynamicname = re.sub('[\|\s]', '', ss['dynamicname'].lower()).strip() controlValue = {"impID": ss['impid']} newValue = {"ComicYear": ss['comicyear'], "Status": "Not Imported", "ComicName": ss['comicname'], #helpers.conversion(ss['comicname']), "DisplayName": ss['displayname'], #helpers.conversion(ss['displayname']), "DynamicName": nspace_dynamicname, #helpers.conversion(nspace_dynamicname), "ComicID": ss['comicid'], #if it's been scanned in for cvinfo, this will be the CID - otherwise it's None "IssueID": None, "Volume": ss['volume'], "IssueNumber": ss['issuenumber'], #helpers.conversion(ss['issuenumber']), "ComicFilename": ss['comfilename'], #helpers.conversion(ss['comfilename']), "ComicLocation": ss['comlocation'], #helpers.conversion(ss['comlocation']), "ImportDate": helpers.today(), "WatchMatch": ss['watchmatch']} myDB.upsert("importresults", newValue, controlValue) # because we could be adding volumes/series that span years, we need to account for this # add the year to the db under the term, valid-years # add the issue to the db under the term, min-issue #locate metadata here. # unzip -z filename.cbz will show the comment field of the zip which contains the metadata. #self.importResults() mylar.IMPORT_STATUS = 'Import completed.' valreturn.append({"somevalue": 'self.ie', "result": 'success'}) return queue.put(valreturn)
def update_metadata(self): for cid in self.comiclist: if self.refreshSeries is True: updater.dbupdate(cid, calledfrom='json_api') myDB = db.DBConnection() comic = myDB.selectone('SELECT * FROM comics WHERE ComicID=?', [cid]).fetchone() if comic: description_load = None if not os.path.exists( comic['ComicLocation'] ) and mylar.CONFIG.CREATE_FOLDERS is True: try: checkdirectory = filechecker.validateAndCreateDirectory( comic['ComicLocation'], True) except Exception as e: logger.warn( '[%s] Unable to create series directory @ %s. Aborting updating of series.json' % (e, comic['ComicLocation'])) continue else: if checkdirectory is False: logger.warn( 'Unable to create series directory @ %s. Aborting updating of series.json' % (comic['ComicLocation'])) continue if os.path.exists( os.path.join(comic['ComicLocation'], 'series.json')): try: with open( os.path.join(comic['ComicLocation'], 'series.json')) as j_file: metainfo = json.load(j_file) logger.fdebug('metainfo_loaded: %s' % (metainfo, )) try: # series.json version 1.0.1 description_load = metainfo['metadata'][ 'description_text'] except Exception as e: try: # series.json version 1.0 description_load = metainfo['metadata'][0][ 'description_text'] except Exception as e: description_load = metainfo['metadata'][0][ 'description'] except Exception as e: try: description_load = metainfo['metadata'][ 'description_formatted'] except Exception as e: try: description_load = metainfo['metadata'][0][ 'description_formatted'] except Exception as e: logger.info( 'No description found in metadata. Reloading from dB if available.[error: %s]' % e) c_date = datetime.date(int(comic['LatestDate'][:4]), int(comic['LatestDate'][5:7]), 1) n_date = datetime.date.today() recentchk = (n_date - c_date).days if comic['NewPublish'] is True: seriesStatus = 'Continuing' else: #do this just incase and as an extra measure of accuracy hopefully. if recentchk < 55: seriesStatus = 'Continuing' else: seriesStatus = 'Ended' clean_issue_list = None if comic['Collects'] != 'None': clean_issue_list = comic['Collects'] if mylar.CONFIG.SERIESJSON_FILE_PRIORITY is True: if description_load is not None: cdes_removed = re.sub(r'\n', '', description_load).strip() cdes_formatted = description_load elif comic['DescriptionEdit'] is not None: cdes_removed = re.sub( r'\n', ' ', comic['DescriptionEdit']).strip() cdes_formatted = comic['DescriptionEdit'] else: if comic['Description'] is not None: cdes_removed = re.sub( r'\n', '', comic['Description']).strip() else: cdes_removed = comic['Description'] logger.warn( 'Series does not have a description. Not populating, but you might need to do a Refresh Series to fix this' ) cdes_formatted = comic['Description'] else: if comic['DescriptionEdit'] is not None: cdes_removed = re.sub( r'\n', ' ', comic['DescriptionEdit']).strip() cdes_formatted = comic['DescriptionEdit'] elif description_load is not None: cdes_removed = re.sub(r'\n', '', description_load).strip() cdes_formatted = description_load else: if comic['Description'] is not None: cdes_removed = re.sub( r'\n', '', comic['Description']).strip() else: cdes_removed = comic['Description'] logger.warn( 'Series does not have a description. Not populating, but you might need to do a Refresh Series to fix this' ) cdes_formatted = comic['Description'] comicVol = comic['ComicVersion'] if all( [mylar.CONFIG.SETDEFAULTVOLUME is True, comicVol is None]): comicVol = 1 elif comicVol is not None: if comicVol.isdigit(): comicVol = int(comicVol) logger.info('Updated version to :' + str(comicVol)) if all([ mylar.CONFIG.SETDEFAULTVOLUME is False, comicVol == 'v1' ]): comicVol = None else: comicVol = int(re.sub('[^0-9]', '', comicVol).strip()) if any([ comic['ComicYear'] is None, comic['ComicYear'] == '0000', comic['ComicYear'][-1:] == '-' ]): SeriesYear = int(issued['firstdate'][:4]) else: SeriesYear = int(comic['ComicYear']) csyear = comic['Corrected_SeriesYear'] if any([ SeriesYear > int(datetime.datetime.now().year) + 1, SeriesYear == 2099 ]) and csyear is not None: logger.info( 'Corrected year of ' + str(SeriesYear) + ' to corrected year for series that was manually entered previously of ' + str(csyear)) SeriesYear = int(csyear) if all([ int(comic['Total']) == 1, SeriesYear < int(helpers.today()[:4]), comic['Type'] != 'One-Shot', comic['Type'] != 'TPB' ]): logger.info( 'Determined to be a one-shot issue. Forcing Edition to One-Shot' ) booktype = 'One-Shot' else: booktype = comic['Type'] if comic['Corrected_Type'] and comic[ 'Corrected_Type'] != booktype: booktype = comic['Corrected_Type'] c_image = comic metadata = {} metadata['version'] = '1.0.1' metadata['metadata'] = ({ 'type': 'comicSeries', 'publisher': comic['ComicPublisher'], 'imprint': comic['PublisherImprint'], 'name': comic['ComicName'], 'cid': int(cid), 'year': SeriesYear, 'description_text': cdes_removed, 'description_formatted': cdes_formatted, 'volume': comicVol, 'booktype': booktype, 'age_rating': comic['AgeRating'], 'collects': clean_issue_list, 'ComicImage': comic['ComicImageURL'], 'total_issues': comic['Total'], 'publication_run': comic['ComicPublished'], 'status': seriesStatus }) try: with open(os.path.join(comic['ComicLocation'], 'series.json'), 'w', encoding='utf-8') as outfile: json.dump(metadata, outfile, indent=4, ensure_ascii=False) except Exception as e: logger.error( 'Unable to write series.json to %s. Error returned: %s' % (comic['ComicLocation'], e)) continue else: logger.fdebug( 'Successfully written series.json file to %s' % comic['ComicLocation']) myDB.upsert("comics", {"seriesjsonPresent": int(True)}, {"ComicID": cid}) return
def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=None): # Putting this here to get around the circular import. Will try to use this to update images at later date. # from mylar import cache myDB = db.DBConnection() # We need the current minimal info in the database instantly # so we don't throw a 500 error when we redirect to the artistPage controlValueDict = {"ComicID": comicid} dbcomic = myDB.action('SELECT * FROM comics WHERE ComicID=?', [comicid]).fetchone() if dbcomic is None: newValueDict = { "ComicName": "Comic ID: %s" % (comicid), "Status": "Loading" } comlocation = None oldcomversion = None else: newValueDict = {"Status": "Loading"} comlocation = dbcomic['ComicLocation'] filechecker.validateAndCreateDirectory(comlocation, True) oldcomversion = dbcomic[ 'ComicVersion'] #store the comicversion and chk if it exists before hammering. myDB.upsert("comics", newValueDict, controlValueDict) #run the re-sortorder here in order to properly display the page if pullupd is None: helpers.ComicSort(comicorder=mylar.COMICSORT, imported=comicid) # we need to lookup the info for the requested ComicID in full now comic = cv.getComic(comicid, 'comic') #comic = myDB.action('SELECT * FROM comics WHERE ComicID=?', [comicid]).fetchone() if not comic: logger.warn("Error fetching comic. ID for : " + comicid) if dbcomic is None: newValueDict = { "ComicName": "Fetch failed, try refreshing. (%s)" % (comicid), "Status": "Active" } else: newValueDict = {"Status": "Active"} myDB.upsert("comics", newValueDict, controlValueDict) return if comic['ComicName'].startswith('The '): sortname = comic['ComicName'][4:] else: sortname = comic['ComicName'] logger.info(u"Now adding/updating: " + comic['ComicName']) #--Now that we know ComicName, let's try some scraping #--Start # gcd will return issue details (most importantly publishing date) if not mylar.CV_ONLY: if mismatch == "no" or mismatch is None: gcdinfo = parseit.GCDScraper(comic['ComicName'], comic['ComicYear'], comic['ComicIssues'], comicid) #print ("gcdinfo: " + str(gcdinfo)) mismatch_com = "no" if gcdinfo == "No Match": updater.no_searchresults(comicid) nomatch = "true" logger.info(u"There was an error when trying to add " + comic['ComicName'] + " (" + comic['ComicYear'] + ")") return nomatch else: mismatch_com = "yes" #print ("gcdinfo:" + str(gcdinfo)) elif mismatch == "yes": CV_EXcomicid = myDB.action( "SELECT * from exceptions WHERE ComicID=?", [comicid]).fetchone() if CV_EXcomicid['variloop'] is None: pass else: vari_loop = CV_EXcomicid['variloop'] NewComicID = CV_EXcomicid['NewComicID'] gcomicid = CV_EXcomicid['GComicID'] resultURL = "/series/" + str(NewComicID) + "/" #print ("variloop" + str(CV_EXcomicid['variloop'])) #if vari_loop == '99': gcdinfo = parseit.GCDdetails(comseries=None, resultURL=resultURL, vari_loop=0, ComicID=comicid, TotalIssues=0, issvariation="no", resultPublished=None) logger.info(u"Sucessfully retrieved details for " + comic['ComicName']) # print ("Series Published" + parseit.resultPublished) CV_NoYearGiven = "no" #if the SeriesYear returned by CV is blank or none (0000), let's use the gcd one. if comic['ComicYear'] is None or comic['ComicYear'] == '0000': if mylar.CV_ONLY: #we'll defer this until later when we grab all the issues and then figure it out logger.info( "Uh-oh. I can't find a Series Year for this series. I'm going to try analyzing deeper." ) SeriesYear = cv.getComic(comicid, 'firstissue', comic['FirstIssueID']) if SeriesYear == '0000': logger.info( "Ok - I couldn't find a Series Year at all. Loading in the issue data now and will figure out the Series Year." ) CV_NoYearGiven = "yes" issued = cv.getComic(comicid, 'issue') SeriesYear = issued['firstdate'][:4] else: SeriesYear = gcdinfo['SeriesYear'] else: SeriesYear = comic['ComicYear'] #let's do the Annual check here. if mylar.ANNUALS_ON: annualcomicname = re.sub('[\,\:]', '', comic['ComicName']) annuals = comicbookdb.cbdb(annualcomicname, SeriesYear) print("Number of Annuals returned: " + str(annuals['totalissues'])) nb = 0 while (nb <= int(annuals['totalissues'])): try: annualval = annuals['annualslist'][nb] except IndexError: break newCtrl = { "IssueID": str(annualval['AnnualIssue'] + annualval['AnnualDate']) } newVals = { "Issue_Number": annualval['AnnualIssue'], "IssueDate": annualval['AnnualDate'], "IssueName": annualval['AnnualTitle'], "ComicID": comicid, "Status": "Skipped" } myDB.upsert("annuals", newVals, newCtrl) nb += 1 #parseit.annualCheck(gcomicid=gcdinfo['GCDComicID'], comicid=comicid, comicname=comic['ComicName'], comicyear=SeriesYear) #comic book location on machine # setup default location here if comlocation is None: # let's remove the non-standard characters here. u_comicnm = comic['ComicName'] u_comicname = u_comicnm.encode('ascii', 'ignore').strip() if ':' in u_comicname or '/' in u_comicname or ',' in u_comicname or '?' in u_comicname: comicdir = u_comicname if ':' in comicdir: comicdir = comicdir.replace(':', '') if '/' in comicdir: comicdir = comicdir.replace('/', '-') if ',' in comicdir: comicdir = comicdir.replace(',', '') if '?' in comicdir: comicdir = comicdir.replace('?', '') else: comicdir = u_comicname series = comicdir publisher = re.sub('!', '', comic['ComicPublisher']) # thanks Boom! year = SeriesYear comversion = comic['ComicVersion'] if comversion is None: comversion = 'None' #if comversion is None, remove it so it doesn't populate with 'None' if comversion == 'None': chunk_f_f = re.sub('\$VolumeN', '', mylar.FILE_FORMAT) chunk_f = re.compile(r'\s+') mylar.FILE_FORMAT = chunk_f.sub(' ', chunk_f_f) #do work to generate folder path values = { '$Series': series, '$Publisher': publisher, '$Year': year, '$series': series.lower(), '$publisher': publisher.lower(), '$VolumeY': 'V' + str(year), '$VolumeN': comversion } #print mylar.FOLDER_FORMAT #print 'working dir:' #print helpers.replace_all(mylar.FOLDER_FORMAT, values) if mylar.FOLDER_FORMAT == '': comlocation = mylar.DESTINATION_DIR + "/" + comicdir + " (" + SeriesYear + ")" else: comlocation = mylar.DESTINATION_DIR + "/" + helpers.replace_all( mylar.FOLDER_FORMAT, values) #comlocation = mylar.DESTINATION_DIR + "/" + comicdir + " (" + comic['ComicYear'] + ")" if mylar.DESTINATION_DIR == "": logger.error( u"There is no general directory specified - please specify in Config/Post-Processing." ) return if mylar.REPLACE_SPACES: #mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot comlocation = comlocation.replace(' ', mylar.REPLACE_CHAR) #moved this out of the above loop so it will chk for existance of comlocation in case moved #if it doesn't exist - create it (otherwise will bugger up later on) if os.path.isdir(str(comlocation)): logger.info(u"Directory (" + str(comlocation) + ") already exists! Continuing...") else: #print ("Directory doesn't exist!") #try: # os.makedirs(str(comlocation)) # logger.info(u"Directory successfully created at: " + str(comlocation)) #except OSError: # logger.error(u"Could not create comicdir : " + str(comlocation)) filechecker.validateAndCreateDirectory(comlocation, True) #try to account for CV not updating new issues as fast as GCD #seems CV doesn't update total counts #comicIssues = gcdinfo['totalissues'] comicIssues = comic['ComicIssues'] if not mylar.CV_ONLY: if gcdinfo['gcdvariation'] == "cv": comicIssues = str(int(comic['ComicIssues']) + 1) #let's download the image... if os.path.exists(mylar.CACHE_DIR): pass else: #let's make the dir. try: os.makedirs(str(mylar.CACHE_DIR)) logger.info(u"Cache Directory successfully created at: " + str(mylar.CACHE_DIR)) except OSError: logger.error( 'Could not create cache dir. Check permissions of cache dir: ' + str(mylar.CACHE_DIR)) coverfile = os.path.join(mylar.CACHE_DIR, str(comicid) + ".jpg") #try: urllib.urlretrieve(str(comic['ComicImage']), str(coverfile)) try: with open(str(coverfile)) as f: ComicImage = os.path.join('cache', str(comicid) + ".jpg") #this is for Firefox when outside the LAN...it works, but I don't know how to implement it #without breaking the normal flow for inside the LAN (above) #ComicImage = "http://" + str(mylar.HTTP_HOST) + ":" + str(mylar.HTTP_PORT) + "/cache/" + str(comicid) + ".jpg" logger.info(u"Sucessfully retrieved cover for " + comic['ComicName']) #if the comic cover local is checked, save a cover.jpg to the series folder. if mylar.COMIC_COVER_LOCAL: comiclocal = os.path.join(str(comlocation) + "/cover.jpg") shutil.copy(ComicImage, comiclocal) except IOError as e: logger.error(u"Unable to save cover locally at this time.") if oldcomversion is None: if comic['ComicVersion'].isdigit(): comicVol = "v" + comic['ComicVersion'] else: comicVol = None else: comicVol = oldcomversion #for description ... #Cdesc = helpers.cleanhtml(comic['ComicDescription']) #cdes_find = Cdesc.find("Collected") #cdes_removed = Cdesc[:cdes_find] #print cdes_removed controlValueDict = {"ComicID": comicid} newValueDict = { "ComicName": comic['ComicName'], "ComicSortName": sortname, "ComicYear": SeriesYear, "ComicImage": ComicImage, "Total": comicIssues, "ComicVersion": comicVol, "ComicLocation": comlocation, "ComicPublisher": comic['ComicPublisher'], #"Description": Cdesc.decode('utf-8', 'replace'), "DetailURL": comic['ComicURL'], # "ComicPublished": gcdinfo['resultPublished'], "ComicPublished": 'Unknown', "DateAdded": helpers.today(), "Status": "Loading" } myDB.upsert("comics", newValueDict, controlValueDict) #comicsort here... #run the re-sortorder here in order to properly display the page if pullupd is None: helpers.ComicSort(sequence='update') if CV_NoYearGiven == 'no': #if set to 'no' then we haven't pulled down the issues, otherwise we did it already issued = cv.getComic(comicid, 'issue') logger.info(u"Sucessfully retrieved issue details for " + comic['ComicName']) n = 0 iscnt = int(comicIssues) issid = [] issnum = [] issname = [] issdate = [] int_issnum = [] #let's start issue #'s at 0 -- thanks to DC for the new 52 reboot! :) latestiss = "0" latestdate = "0000-00-00" firstiss = "10000000" firstdate = "2099-00-00" #print ("total issues:" + str(iscnt)) #---removed NEW code here--- logger.info(u"Now adding/updating issues for " + comic['ComicName']) if not mylar.CV_ONLY: #fccnt = int(fc['comiccount']) #logger.info(u"Found " + str(fccnt) + "/" + str(iscnt) + " issues of " + comic['ComicName'] + "...verifying") #fcnew = [] if iscnt > 0: #if a series is brand new, it wont have any issues/details yet so skip this part while (n <= iscnt): #---NEW.code try: firstval = issued['issuechoice'][n] except IndexError: break cleanname = helpers.cleanName(firstval['Issue_Name']) issid = str(firstval['Issue_ID']) issnum = str(firstval['Issue_Number']) #print ("issnum: " + str(issnum)) issname = cleanname if '.' in str(issnum): issn_st = str(issnum).find('.') issn_b4dec = str(issnum)[:issn_st] #if the length of decimal is only 1 digit, assume it's a tenth dec_is = str(issnum)[issn_st + 1:] if len(dec_is) == 1: dec_nisval = int(dec_is) * 10 iss_naftdec = str(dec_nisval) if len(dec_is) == 2: dec_nisval = int(dec_is) iss_naftdec = str(dec_nisval) iss_issue = issn_b4dec + "." + iss_naftdec issis = (int(issn_b4dec) * 1000) + dec_nisval elif 'au' in issnum.lower(): print("au detected") stau = issnum.lower().find('au') issnum_au = issnum[:stau] print("issnum_au: " + str(issnum_au)) #account for Age of Ultron mucked up numbering issis = str(int(issnum_au) * 1000) + 'AU' else: issis = int(issnum) * 1000 bb = 0 while (bb <= iscnt): try: gcdval = gcdinfo['gcdchoice'][bb] #print ("gcdval: " + str(gcdval)) except IndexError: #account for gcd variation here if gcdinfo['gcdvariation'] == 'gcd': #logger.fdebug("gcd-variation accounted for.") issdate = '0000-00-00' int_issnum = int(issis / 1000) break if 'nn' in str(gcdval['GCDIssue']): #no number detected - GN, TP or the like logger.warn( u"Non Series detected (Graphic Novel, etc) - cannot proceed at this time." ) updater.no_searchresults(comicid) return elif 'au' in gcdval['GCDIssue'].lower(): #account for Age of Ultron mucked up numbering - this is in format of 5AU.00 gstau = gcdval['GCDIssue'].lower().find('au') gcdis_au = gcdval['GCDIssue'][:gstau] gcdis = str(int(gcdis_au) * 1000) + 'AU' elif '.' in str(gcdval['GCDIssue']): #logger.fdebug("g-issue:" + str(gcdval['GCDIssue'])) issst = str(gcdval['GCDIssue']).find('.') #logger.fdebug("issst:" + str(issst)) issb4dec = str(gcdval['GCDIssue'])[:issst] #logger.fdebug("issb4dec:" + str(issb4dec)) #if the length of decimal is only 1 digit, assume it's a tenth decis = str(gcdval['GCDIssue'])[issst + 1:] #logger.fdebug("decis:" + str(decis)) if len(decis) == 1: decisval = int(decis) * 10 issaftdec = str(decisval) if len(decis) == 2: decisval = int(decis) issaftdec = str(decisval) gcd_issue = issb4dec + "." + issaftdec #logger.fdebug("gcd_issue:" + str(gcd_issue)) try: gcdis = (int(issb4dec) * 1000) + decisval except ValueError: logger.error( "This has no issue #'s for me to get - Either a Graphic Novel or one-shot. This feature to allow these will be added in the near future." ) updater.no_searchresults(comicid) return else: gcdis = int(str(gcdval['GCDIssue'])) * 1000 if gcdis == issis: issdate = str(gcdval['GCDDate']) if str(issis).isdigit(): int_issnum = int(gcdis / 1000) else: if 'au' in issis.lower(): int_issnum = str(int(gcdis[:-2]) / 1000) + 'AU' else: logger.error( "this has an alpha-numeric in the issue # which I cannot account for. Get on github and log the issue for evilhero." ) return #get the latest issue / date using the date. if gcdval['GCDDate'] > latestdate: latestiss = str(issnum) latestdate = str(gcdval['GCDDate']) break #bb = iscnt bb += 1 #print("(" + str(n) + ") IssueID: " + str(issid) + " IssueNo: " + str(issnum) + " Date" + str(issdate)) #---END.NEW. # check if the issue already exists iss_exists = myDB.action( 'SELECT * from issues WHERE IssueID=?', [issid]).fetchone() # Only change the status & add DateAdded if the issue is already in the database if iss_exists is None: newValueDict['DateAdded'] = helpers.today() controlValueDict = {"IssueID": issid} newValueDict = { "ComicID": comicid, "ComicName": comic['ComicName'], "IssueName": issname, "Issue_Number": issnum, "IssueDate": issdate, "Int_IssueNumber": int_issnum } if mylar.AUTOWANT_ALL: newValueDict['Status'] = "Wanted" elif issdate > helpers.today() and mylar.AUTOWANT_UPCOMING: newValueDict['Status'] = "Wanted" else: newValueDict['Status'] = "Skipped" if iss_exists: #print ("Existing status : " + str(iss_exists['Status'])) newValueDict['Status'] = iss_exists['Status'] try: myDB.upsert("issues", newValueDict, controlValueDict) except sqlite3.InterfaceError, e: #raise sqlite3.InterfaceError(e) logger.error( "MAJOR error trying to get issue data, this is most likey a MULTI-VOLUME series and you need to use the custom_exceptions.csv file." ) myDB.action("DELETE FROM comics WHERE ComicID=?", [comicid]) return n += 1
def addComictoDB(comicid): # Putting this here to get around the circular import. Will try to use this to update images at later date. from mylar import cache myDB = db.DBConnection() # myDB.action('DELETE from blacklist WHERE ComicID=?', [comicid]) # We need the current minimal info in the database instantly # so we don't throw a 500 error when we redirect to the artistPage controlValueDict = {"ComicID": comicid} dbcomic = myDB.action('SELECT * FROM comics WHERE ComicID=?', [comicid]).fetchone() if dbcomic is None: newValueDict = {"ComicName": "Comic ID: %s" % (comicid), "Status": "Loading"} else: newValueDict = {"Status": "Loading"} myDB.upsert("comics", newValueDict, controlValueDict) # we need to lookup the info for the requested ComicID in full now comic = cv.getComic(comicid,'comic') if not comic: logger.warn("Error fetching comic. ID for : " + comicid) if dbcomic is None: newValueDict = {"ComicName": "Fetch failed, try refreshing. (%s)" % (comicid), "Status": "Active"} else: newValueDict = {"Status": "Active"} myDB.upsert("comics", newValueDict, controlValueDict) return if comic['ComicName'].startswith('The '): sortname = comic['ComicName'][4:] else: sortname = comic['ComicName'] logger.info(u"Now adding/updating: " + comic['ComicName']) #--Now that we know ComicName, let's try some scraping #--Start # gcd will return issue details (most importantly publishing date) gcdinfo=parseit.GCDScraper(comic['ComicName'], comic['ComicYear'], comic['ComicIssues'], comicid) if gcdinfo == "No Match": logger.warn("No matching result found for " + comic['ComicName'] + " (" + comic['ComicYear'] + ")" ) updater.no_searchresults(comicid) return logger.info(u"Sucessfully retrieved details for " + comic['ComicName'] ) # print ("Series Published" + parseit.resultPublished) #--End #comic book location on machine # setup default location here if ':' in comic['ComicName']: comicdir = comic['ComicName'].replace(':','') else: comicdir = comic['ComicName'] comlocation = mylar.DESTINATION_DIR + "/" + comicdir + " (" + comic['ComicYear'] + ")" if mylar.DESTINATION_DIR == "": logger.error(u"There is no general directory specified - please specify in Config/Post-Processing.") return if mylar.REPLACE_SPACES: #mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot comlocation = comlocation.replace(' ', mylar.REPLACE_CHAR) #if it doesn't exist - create it (otherwise will bugger up later on) if os.path.isdir(str(comlocation)): logger.info(u"Directory (" + str(comlocation) + ") already exists! Continuing...") else: #print ("Directory doesn't exist!") try: os.makedirs(str(comlocation)) logger.info(u"Directory successfully created at: " + str(comlocation)) except OSError.e: if e.errno != errno.EEXIST: raise #print ("root dir for series: " + comlocation) #try to account for CV not updating new issues as fast as GCD #seems CV doesn't update total counts #comicIssues = gcdinfo['totalissues'] if gcdinfo['gcdvariation'] == "cv": comicIssues = str(int(comic['ComicIssues']) + 1) else: comicIssues = comic['ComicIssues'] controlValueDict = {"ComicID": comicid} newValueDict = {"ComicName": comic['ComicName'], "ComicSortName": sortname, "ComicYear": comic['ComicYear'], "ComicImage": comic['ComicImage'], "Total": comicIssues, "ComicLocation": comlocation, "ComicPublisher": comic['ComicPublisher'], "ComicPublished": parseit.resultPublished, "DateAdded": helpers.today(), "Status": "Loading"} myDB.upsert("comics", newValueDict, controlValueDict) issued = cv.getComic(comicid,'issue') logger.info(u"Sucessfully retrieved issue details for " + comic['ComicName'] ) n = 0 iscnt = int(comicIssues) issid = [] issnum = [] issname = [] issdate = [] int_issnum = [] #let's start issue #'s at 0 -- thanks to DC for the new 52 reboot! :) latestiss = "0" latestdate = "0000-00-00" #print ("total issues:" + str(iscnt)) #---removed NEW code here--- logger.info(u"Now adding/updating issues for" + comic['ComicName']) # file check to see if issue exists logger.info(u"Checking directory for existing issues.") #fc = filechecker.listFiles(dir=comlocation, watchcomic=comic['ComicName']) #havefiles = 0 #fccnt = int(fc['comiccount']) #logger.info(u"Found " + str(fccnt) + "/" + str(iscnt) + " issues of " + comic['ComicName'] + "...verifying") #fcnew = [] while (n <= iscnt): #---NEW.code try: firstval = issued['issuechoice'][n] except IndexError: break cleanname = helpers.cleanName(firstval['Issue_Name']) issid = str(firstval['Issue_ID']) issnum = str(firstval['Issue_Number']) issname = cleanname if '.' in str(issnum): issn_st = str(issnum).find('.') issn_b4dec = str(issnum)[:issn_st] #if the length of decimal is only 1 digit, assume it's a tenth dec_is = str(issnum)[issn_st + 1:] if len(dec_is) == 1: dec_nisval = int(dec_is) * 10 iss_naftdec = str(dec_nisval) if len(dec_is) == 2: dec_nisval = int(dec_is) iss_naftdec = str(dec_nisval) iss_issue = issn_b4dec + "." + iss_naftdec issis = (int(issn_b4dec) * 1000) + dec_nisval else: issis = int(issnum) * 1000 bb = 0 while (bb <= iscnt): try: gcdval = gcdinfo['gcdchoice'][bb] except IndexError: #account for gcd variation here if gcdinfo['gcdvariation'] == 'gcd': print ("gcd-variation accounted for.") issdate = '0000-00-00' int_issnum = int ( issis / 1000 ) break if 'nn' in str(gcdval['GCDIssue']): #no number detected - GN, TP or the like logger.warn(u"Non Series detected (Graphic Novel, etc) - cannot proceed at this time.") updater.no_searchresults(comicid) return elif '.' in str(gcdval['GCDIssue']): issst = str(gcdval['GCDIssue']).find('.') issb4dec = str(gcdval['GCDIssue'])[:issst] #if the length of decimal is only 1 digit, assume it's a tenth decis = str(gcdval['GCDIssue'])[issst+1:] if len(decis) == 1: decisval = int(decis) * 10 issaftdec = str(decisval) if len(decis) == 2: decisval = int(decis) issaftdec = str(decisval) gcd_issue = issb4dec + "." + issaftdec gcdis = (int(issb4dec) * 1000) + decisval else: gcdis = int(str(gcdval['GCDIssue'])) * 1000 if gcdis == issis: issdate = str(gcdval['GCDDate']) int_issnum = int( gcdis / 1000 ) #get the latest issue / date using the date. if gcdval['GCDDate'] > latestdate: latestiss = str(issnum) latestdate = str(gcdval['GCDDate']) break #bb = iscnt bb+=1 #print("(" + str(n) + ") IssueID: " + str(issid) + " IssueNo: " + str(issnum) + " Date" + str(issdate)) #---END.NEW. # check if the issue already exists iss_exists = myDB.select('SELECT * from issues WHERE IssueID=?', [issid]) # Only change the status & add DateAdded if the issue is not already in the database if not len(iss_exists): newValueDict['DateAdded'] = helpers.today() controlValueDict = {"IssueID": issid} newValueDict = {"ComicID": comicid, "ComicName": comic['ComicName'], "IssueName": issname, "Issue_Number": issnum, "IssueDate": issdate, "Int_IssueNumber": int_issnum } if mylar.AUTOWANT_ALL: newValueDict['Status'] = "Wanted" #elif release_dict['releasedate'] > helpers.today() and mylar.AUTOWANT_UPCOMING: # newValueDict['Status'] = "Wanted" else: newValueDict['Status'] = "Skipped" myDB.upsert("issues", newValueDict, controlValueDict) n+=1 # logger.debug(u"Updating comic cache for " + comic['ComicName']) # cache.getThumb(ComicID=issue['issueid']) # logger.debug(u"Updating cache for: " + comic['ComicName']) # cache.getThumb(ComicIDcomicid) #check for existing files... updater.forceRescan(comicid) controlValueStat = {"ComicID": comicid} newValueStat = {"Status": "Active", "LatestIssue": latestiss, "LatestDate": latestdate } myDB.upsert("comics", newValueStat, controlValueStat) logger.info(u"Updating complete for: " + comic['ComicName']) #here we grab issues that have been marked as wanted above... results = myDB.select("SELECT * FROM issues where ComicID=? AND Status='Wanted'", [comicid]) if results: logger.info(u"Attempting to grab wanted issues for : " + comic['ComicName']) for result in results: foundNZB = "none" if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL) and (mylar.SAB_HOST): foundNZB = search.searchforissue(result['IssueID']) if foundNZB == "yes": updater.foundsearch(result['ComicID'], result['IssueID']) else: logger.info(u"No issues marked as wanted for " + comic['ComicName']) logger.info(u"Finished grabbing what I could.")
def scanLibrary(scan=None, queue=None): mylar.IMPORT_FILES = 0 mylar.IMPORT_PARSED_COUNT = 0 valreturn = [] if scan: try: soma = libraryScan(queue=queue) except Exception, e: logger.error('[IMPORT] Unable to complete the scan: %s' % e) mylar.IMPORT_STATUS = None valreturn.append({"somevalue": 'self.ie', "result": 'error'}) return queue.put(valreturn) if soma == "Completed": logger.info('[IMPORT] Sucessfully completed import.') elif soma == "Fail": mylar.IMPORT_STATUS = 'Failure' valreturn.append({"somevalue": 'self.ie', "result": 'error'}) return queue.put(valreturn) else: mylar.IMPORT_STATUS = 'Now adding the completed results to the DB.' logger.info('[IMPORT] Parsing/Reading of files completed!') logger.info('[IMPORT] Attempting to import ' + str(int(soma['import_cv_ids'] + soma['import_count'])) + ' files into your watchlist.') logger.info('[IMPORT-BREAKDOWN] Files with ComicIDs successfully extracted: ' + str(soma['import_cv_ids'])) logger.info('[IMPORT-BREAKDOWN] Files that had to be parsed: ' + str(soma['import_count'])) logger.info('[IMPORT-BREAKDOWN] Files that were unable to be parsed: ' + str(len(soma['failure_list']))) logger.info('[IMPORT-BREAKDOWN] Files that caused errors during the import: ' + str(len(soma['utter_failure_list']))) #logger.info('[IMPORT-BREAKDOWN] Failure Files: ' + str(soma['failure_list'])) myDB = db.DBConnection() #first we do the CV ones. if int(soma['import_cv_ids']) > 0: for i in soma['CV_import_comicids']: #we need to find the impid in the issueid_list as that holds the impid + other info abc = [x for x in soma['issueid_list'] if x['issueid'] == i['IssueID']] ghi = abc[0]['importinfo'] nspace_dynamicname = re.sub('[\|\s]', '', ghi['dynamicname'].lower()).strip() #these all have related ComicID/IssueID's...just add them as is. controlValue = {"impID": ghi['impid']} newValue = {"Status": "Not Imported", "ComicName": helpers.conversion(i['ComicName']), "DisplayName": helpers.conversion(i['ComicName']), "DynamicName": helpers.conversion(nspace_dynamicname), "ComicID": i['ComicID'], "IssueID": i['IssueID'], "IssueNumber": helpers.conversion(i['Issue_Number']), "Volume": ghi['volume'], "ComicYear": ghi['comicyear'], "ComicFilename": helpers.conversion(ghi['comfilename']), "ComicLocation": helpers.conversion(ghi['comlocation']), "ImportDate": helpers.today(), "WatchMatch": None} #i['watchmatch']} myDB.upsert("importresults", newValue, controlValue) if int(soma['import_count']) > 0: for ss in soma['import_by_comicids']: nspace_dynamicname = re.sub('[\|\s]', '', ss['dynamicname'].lower()).strip() controlValue = {"impID": ss['impid']} newValue = {"ComicYear": ss['comicyear'], "Status": "Not Imported", "ComicName": helpers.conversion(ss['comicname']), "DisplayName": helpers.conversion(ss['displayname']), "DynamicName": helpers.conversion(nspace_dynamicname), "ComicID": ss['comicid'], #if it's been scanned in for cvinfo, this will be the CID - otherwise it's None "IssueID": None, "Volume": ss['volume'], "IssueNumber": helpers.conversion(ss['issuenumber']), "ComicFilename": helpers.conversion(ss['comfilename']), "ComicLocation": helpers.conversion(ss['comlocation']), "ImportDate": helpers.today(), "WatchMatch": ss['watchmatch']} myDB.upsert("importresults", newValue, controlValue) # because we could be adding volumes/series that span years, we need to account for this # add the year to the db under the term, valid-years # add the issue to the db under the term, min-issue #locate metadata here. # unzip -z filename.cbz will show the comment field of the zip which contains the metadata. #self.importResults() mylar.IMPORT_STATUS = 'Import completed.' valreturn.append({"somevalue": 'self.ie', "result": 'success'}) return queue.put(valreturn)
def addComictoDB(comicid, mismatch=None): # Putting this here to get around the circular import. Will try to use this to update images at later date. from mylar import cache myDB = db.DBConnection() # We need the current minimal info in the database instantly # so we don't throw a 500 error when we redirect to the artistPage controlValueDict = {"ComicID": comicid} dbcomic = myDB.action("SELECT * FROM comics WHERE ComicID=?", [comicid]).fetchone() if dbcomic is None: newValueDict = {"ComicName": "Comic ID: %s" % (comicid), "Status": "Loading"} comlocation = None else: newValueDict = {"Status": "Loading"} comlocation = dbcomic["ComicLocation"] myDB.upsert("comics", newValueDict, controlValueDict) # we need to lookup the info for the requested ComicID in full now comic = cv.getComic(comicid, "comic") # comic = myDB.action('SELECT * FROM comics WHERE ComicID=?', [comicid]).fetchone() if not comic: logger.warn("Error fetching comic. ID for : " + comicid) if dbcomic is None: newValueDict = {"ComicName": "Fetch failed, try refreshing. (%s)" % (comicid), "Status": "Active"} else: newValueDict = {"Status": "Active"} myDB.upsert("comics", newValueDict, controlValueDict) return if comic["ComicName"].startswith("The "): sortname = comic["ComicName"][4:] else: sortname = comic["ComicName"] logger.info(u"Now adding/updating: " + comic["ComicName"]) # --Now that we know ComicName, let's try some scraping # --Start # gcd will return issue details (most importantly publishing date) if mismatch == "no" or mismatch is None: gcdinfo = parseit.GCDScraper(comic["ComicName"], comic["ComicYear"], comic["ComicIssues"], comicid) mismatch_com = "no" if gcdinfo == "No Match": updater.no_searchresults(comicid) nomatch = "true" logger.info( u"There was an error when trying to add " + comic["ComicName"] + " (" + comic["ComicYear"] + ")" ) return nomatch else: mismatch_com = "yes" # print ("gcdinfo:" + str(gcdinfo)) elif mismatch == "yes": CV_EXcomicid = myDB.action("SELECT * from exceptions WHERE ComicID=?", [comicid]).fetchone() if CV_EXcomicid["variloop"] is None: pass else: vari_loop = CV_EXcomicid["variloop"] NewComicID = CV_EXcomicid["NewComicID"] gcomicid = CV_EXcomicid["GComicID"] resultURL = "/series/" + str(NewComicID) + "/" # print ("variloop" + str(CV_EXcomicid['variloop'])) # if vari_loop == '99': gcdinfo = parseit.GCDdetails( comseries=None, resultURL=resultURL, vari_loop=0, ComicID=comicid, TotalIssues=0, issvariation="no", resultPublished=None, ) logger.info(u"Sucessfully retrieved details for " + comic["ComicName"]) # print ("Series Published" + parseit.resultPublished) # comic book location on machine # setup default location here if comlocation is None: if ":" in comic["ComicName"] or "/" in comic["ComicName"] or "," in comic["ComicName"]: comicdir = comic["ComicName"] if ":" in comicdir: comicdir = comicdir.replace(":", "") if "/" in comicdir: comicdir = comicdir.replace("/", "-") if "," in comicdir: comicdir = comicdir.replace(",", "") else: comicdir = comic["ComicName"] series = comicdir publisher = comic["ComicPublisher"] year = comic["ComicYear"] # do work to generate folder path values = {"$Series": series, "$Publisher": publisher, "$Year": year} # print mylar.FOLDER_FORMAT # print 'working dir:' # print helpers.replace_all(mylar.FOLDER_FORMAT, values) if mylar.FOLDER_FORMAT == "": comlocation = mylar.DESTINATION_DIR + "/" + comicdir + " (" + comic["ComicYear"] + ")" else: comlocation = mylar.DESTINATION_DIR + "/" + helpers.replace_all(mylar.FOLDER_FORMAT, values) # comlocation = mylar.DESTINATION_DIR + "/" + comicdir + " (" + comic['ComicYear'] + ")" if mylar.DESTINATION_DIR == "": logger.error(u"There is no general directory specified - please specify in Config/Post-Processing.") return if mylar.REPLACE_SPACES: # mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot comlocation = comlocation.replace(" ", mylar.REPLACE_CHAR) # if it doesn't exist - create it (otherwise will bugger up later on) if os.path.isdir(str(comlocation)): logger.info(u"Directory (" + str(comlocation) + ") already exists! Continuing...") else: # print ("Directory doesn't exist!") try: os.makedirs(str(comlocation)) logger.info(u"Directory successfully created at: " + str(comlocation)) except OSError: logger.error(u"Could not create comicdir : " + str(comlocation)) # try to account for CV not updating new issues as fast as GCD # seems CV doesn't update total counts # comicIssues = gcdinfo['totalissues'] if gcdinfo["gcdvariation"] == "cv": comicIssues = str(int(comic["ComicIssues"]) + 1) else: comicIssues = comic["ComicIssues"] # let's download the image... if os.path.exists(mylar.CACHE_DIR): pass else: # let's make the dir. try: os.makedirs(str(mylar.CACHE_DIR)) logger.info(u"Cache Directory successfully created at: " + str(mylar.CACHE_DIR)) except OSError: logger.error("Could not create cache dir. Check permissions of cache dir: " + str(mylar.CACHE_DIR)) coverfile = mylar.CACHE_DIR + "/" + str(comicid) + ".jpg" # try: urllib.urlretrieve(str(comic["ComicImage"]), str(coverfile)) try: with open(str(coverfile)) as f: ComicImage = "cache/" + str(comicid) + ".jpg" logger.info(u"Sucessfully retrieved cover for " + str(comic["ComicName"])) except IOError as e: logger.error(u"Unable to save cover locally at this time.") controlValueDict = {"ComicID": comicid} newValueDict = { "ComicName": comic["ComicName"], "ComicSortName": sortname, "ComicYear": comic["ComicYear"], "ComicImage": ComicImage, "Total": comicIssues, "ComicLocation": comlocation, "ComicPublisher": comic["ComicPublisher"], "ComicPublished": gcdinfo["resultPublished"], "DateAdded": helpers.today(), "Status": "Loading", } myDB.upsert("comics", newValueDict, controlValueDict) issued = cv.getComic(comicid, "issue") logger.info(u"Sucessfully retrieved issue details for " + comic["ComicName"]) n = 0 iscnt = int(comicIssues) issid = [] issnum = [] issname = [] issdate = [] int_issnum = [] # let's start issue #'s at 0 -- thanks to DC for the new 52 reboot! :) latestiss = "0" latestdate = "0000-00-00" # print ("total issues:" + str(iscnt)) # ---removed NEW code here--- logger.info(u"Now adding/updating issues for " + comic["ComicName"]) # file check to see if issue exists logger.info(u"Checking directory for existing issues.") # fc = filechecker.listFiles(dir=comlocation, watchcomic=comic['ComicName']) # havefiles = 0 # fccnt = int(fc['comiccount']) # logger.info(u"Found " + str(fccnt) + "/" + str(iscnt) + " issues of " + comic['ComicName'] + "...verifying") # fcnew = [] while n <= iscnt: # ---NEW.code try: firstval = issued["issuechoice"][n] except IndexError: break cleanname = helpers.cleanName(firstval["Issue_Name"]) issid = str(firstval["Issue_ID"]) issnum = str(firstval["Issue_Number"]) issname = cleanname if "." in str(issnum): issn_st = str(issnum).find(".") issn_b4dec = str(issnum)[:issn_st] # if the length of decimal is only 1 digit, assume it's a tenth dec_is = str(issnum)[issn_st + 1 :] if len(dec_is) == 1: dec_nisval = int(dec_is) * 10 iss_naftdec = str(dec_nisval) if len(dec_is) == 2: dec_nisval = int(dec_is) iss_naftdec = str(dec_nisval) iss_issue = issn_b4dec + "." + iss_naftdec issis = (int(issn_b4dec) * 1000) + dec_nisval else: issis = int(issnum) * 1000 bb = 0 while bb <= iscnt: try: gcdval = gcdinfo["gcdchoice"][bb] except IndexError: # account for gcd variation here if gcdinfo["gcdvariation"] == "gcd": # print ("gcd-variation accounted for.") issdate = "0000-00-00" int_issnum = int(issis / 1000) break if "nn" in str(gcdval["GCDIssue"]): # no number detected - GN, TP or the like logger.warn(u"Non Series detected (Graphic Novel, etc) - cannot proceed at this time.") updater.no_searchresults(comicid) return elif "." in str(gcdval["GCDIssue"]): # print ("g-issue:" + str(gcdval['GCDIssue'])) issst = str(gcdval["GCDIssue"]).find(".") # print ("issst:" + str(issst)) issb4dec = str(gcdval["GCDIssue"])[:issst] # print ("issb4dec:" + str(issb4dec)) # if the length of decimal is only 1 digit, assume it's a tenth decis = str(gcdval["GCDIssue"])[issst + 1 :] # print ("decis:" + str(decis)) if len(decis) == 1: decisval = int(decis) * 10 issaftdec = str(decisval) if len(decis) == 2: decisval = int(decis) issaftdec = str(decisval) gcd_issue = issb4dec + "." + issaftdec # print ("gcd_issue:" + str(gcd_issue)) gcdis = (int(issb4dec) * 1000) + decisval else: gcdis = int(str(gcdval["GCDIssue"])) * 1000 if gcdis == issis: issdate = str(gcdval["GCDDate"]) int_issnum = int(gcdis / 1000) # get the latest issue / date using the date. if gcdval["GCDDate"] > latestdate: latestiss = str(issnum) latestdate = str(gcdval["GCDDate"]) break # bb = iscnt bb += 1 # print("(" + str(n) + ") IssueID: " + str(issid) + " IssueNo: " + str(issnum) + " Date" + str(issdate)) # ---END.NEW. # check if the issue already exists iss_exists = myDB.action("SELECT * from issues WHERE IssueID=?", [issid]).fetchone() # Only change the status & add DateAdded if the issue is already in the database if iss_exists is None: newValueDict["DateAdded"] = helpers.today() controlValueDict = {"IssueID": issid} newValueDict = { "ComicID": comicid, "ComicName": comic["ComicName"], "IssueName": issname, "Issue_Number": issnum, "IssueDate": issdate, "Int_IssueNumber": int_issnum, } if mylar.AUTOWANT_ALL: newValueDict["Status"] = "Wanted" # elif release_dict['releasedate'] > helpers.today() and mylar.AUTOWANT_UPCOMING: # newValueDict['Status'] = "Wanted" else: newValueDict["Status"] = "Skipped" if iss_exists: # print ("Existing status : " + str(iss_exists['Status'])) newValueDict["Status"] = iss_exists["Status"] myDB.upsert("issues", newValueDict, controlValueDict) n += 1 # logger.debug(u"Updating comic cache for " + comic['ComicName']) # cache.getThumb(ComicID=issue['issueid']) # logger.debug(u"Updating cache for: " + comic['ComicName']) # cache.getThumb(ComicIDcomicid) # check for existing files... updater.forceRescan(comicid) controlValueStat = {"ComicID": comicid} newValueStat = { "Status": "Active", "LatestIssue": latestiss, "LatestDate": latestdate, "LastUpdated": helpers.now(), } myDB.upsert("comics", newValueStat, controlValueStat) logger.info(u"Updating complete for: " + comic["ComicName"]) # lets' check the pullist for anyting at this time as well since we're here. if mylar.AUTOWANT_UPCOMING: logger.info(u"Checking this week's pullist for new issues of " + str(comic["ComicName"])) updater.newpullcheck() # here we grab issues that have been marked as wanted above... results = myDB.select("SELECT * FROM issues where ComicID=? AND Status='Wanted'", [comicid]) if results: logger.info(u"Attempting to grab wanted issues for : " + comic["ComicName"]) for result in results: foundNZB = "none" if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL) and (mylar.SAB_HOST): foundNZB = search.searchforissue(result["IssueID"]) if foundNZB == "yes": updater.foundsearch(result["ComicID"], result["IssueID"]) else: logger.info(u"No issues marked as wanted for " + comic["ComicName"]) logger.info(u"Finished grabbing what I could.")
def GCDimport(gcomicid): # this is for importing via GCD only and not using CV. # used when volume spanning is discovered for a Comic (and can't be added using CV). # Issue Counts are wrong (and can't be added). # because Comicvine ComicID and GCD ComicID could be identical at some random point, let's distinguish. # CV = comicid, GCD = gcomicid :) (ie. CV=2740, GCD=G3719) gcdcomicid = gcomicid myDB = db.DBConnection() # We need the current minimal info in the database instantly # so we don't throw a 500 error when we redirect to the artistPage controlValueDict = {"ComicID": gcdcomicid} comic = myDB.action( "SELECT ComicName, ComicYear, Total, ComicPublished, ComicImage, ComicLocation FROM comics WHERE ComicID=?", [gcomicid], ).fetchone() ComicName = comic[0] ComicYear = comic[1] ComicIssues = comic[2] comlocation = comic[5] # ComicImage = comic[4] # print ("Comic:" + str(ComicName)) newValueDict = {"Status": "Loading"} myDB.upsert("comics", newValueDict, controlValueDict) # we need to lookup the info for the requested ComicID in full now # comic = cv.getComic(comicid,'comic') if not comic: logger.warn("Error fetching comic. ID for : " + gcdcomicid) if dbcomic is None: newValueDict = {"ComicName": "Fetch failed, try refreshing. (%s)" % (gcdcomicid), "Status": "Active"} else: newValueDict = {"Status": "Active"} myDB.upsert("comics", newValueDict, controlValueDict) return if ComicName.startswith("The "): sortname = ComicName[4:] else: sortname = ComicName logger.info(u"Now adding/updating: " + ComicName) # --Now that we know ComicName, let's try some scraping # --Start # gcd will return issue details (most importantly publishing date) comicid = gcomicid[1:] resultURL = "/series/" + str(comicid) + "/" gcdinfo = parseit.GCDdetails( comseries=None, resultURL=resultURL, vari_loop=0, ComicID=gcdcomicid, TotalIssues=ComicIssues, issvariation=None, resultPublished=None, ) if gcdinfo == "No Match": logger.warn("No matching result found for " + ComicName + " (" + ComicYear + ")") updater.no_searchresults(gcomicid) nomatch = "true" return nomatch logger.info(u"Sucessfully retrieved details for " + ComicName) # print ("Series Published" + parseit.resultPublished) # --End ComicImage = gcdinfo["ComicImage"] # comic book location on machine # setup default location here if comlocation is None: if ":" in ComicName or "/" in ComicName or "," in ComicName: comicdir = ComicName if ":" in comicdir: comicdir = comicdir.replace(":", "") if "/" in comicdir: comicdir = comicdir.replace("/", "-") if "," in comicdir: comicdir = comicdir.replace(",", "") else: comicdir = ComicName comlocation = mylar.DESTINATION_DIR + "/" + comicdir + " (" + ComicYear + ")" if mylar.DESTINATION_DIR == "": logger.error(u"There is no general directory specified - please specify in Config/Post-Processing.") return if mylar.REPLACE_SPACES: # mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot comlocation = comlocation.replace(" ", mylar.REPLACE_CHAR) # if it doesn't exist - create it (otherwise will bugger up later on) if os.path.isdir(str(comlocation)): logger.info(u"Directory (" + str(comlocation) + ") already exists! Continuing...") else: # print ("Directory doesn't exist!") try: os.makedirs(str(comlocation)) logger.info(u"Directory successfully created at: " + str(comlocation)) except OSError: logger.error(u"Could not create comicdir : " + str(comlocation)) comicIssues = gcdinfo["totalissues"] # let's download the image... if os.path.exists(mylar.CACHE_DIR): pass else: # let's make the dir. try: os.makedirs(str(mylar.CACHE_DIR)) logger.info(u"Cache Directory successfully created at: " + str(mylar.CACHE_DIR)) except OSError: logger.error(u"Could not create cache dir : " + str(mylar.CACHE_DIR)) coverfile = mylar.CACHE_DIR + "/" + str(gcomicid) + ".jpg" urllib.urlretrieve(str(ComicImage), str(coverfile)) try: with open(str(coverfile)) as f: ComicImage = "cache/" + str(gcomicid) + ".jpg" logger.info(u"Sucessfully retrieved cover for " + str(ComicName)) except IOError as e: logger.error(u"Unable to save cover locally at this time.") controlValueDict = {"ComicID": gcomicid} newValueDict = { "ComicName": ComicName, "ComicSortName": sortname, "ComicYear": ComicYear, "Total": comicIssues, "ComicLocation": comlocation, "ComicImage": ComicImage, # "ComicPublisher": comic['ComicPublisher'], # "ComicPublished": comicPublished, "DateAdded": helpers.today(), "Status": "Loading", } myDB.upsert("comics", newValueDict, controlValueDict) logger.info(u"Sucessfully retrieved issue details for " + ComicName) n = 0 iscnt = int(comicIssues) issnum = [] issname = [] issdate = [] int_issnum = [] # let's start issue #'s at 0 -- thanks to DC for the new 52 reboot! :) latestiss = "0" latestdate = "0000-00-00" # print ("total issues:" + str(iscnt)) # ---removed NEW code here--- logger.info(u"Now adding/updating issues for " + ComicName) bb = 0 while bb <= iscnt: # ---NEW.code try: gcdval = gcdinfo["gcdchoice"][bb] # print ("gcdval: " + str(gcdval)) except IndexError: # account for gcd variation here if gcdinfo["gcdvariation"] == "gcd": # print ("gcd-variation accounted for.") issdate = "0000-00-00" int_issnum = int(issis / 1000) break if "nn" in str(gcdval["GCDIssue"]): # no number detected - GN, TP or the like logger.warn(u"Non Series detected (Graphic Novel, etc) - cannot proceed at this time.") updater.no_searchresults(comicid) return elif "." in str(gcdval["GCDIssue"]): issst = str(gcdval["GCDIssue"]).find(".") issb4dec = str(gcdval["GCDIssue"])[:issst] # if the length of decimal is only 1 digit, assume it's a tenth decis = str(gcdval["GCDIssue"])[issst + 1 :] if len(decis) == 1: decisval = int(decis) * 10 issaftdec = str(decisval) if len(decis) == 2: decisval = int(decis) issaftdec = str(decisval) if int(issaftdec) == 0: issaftdec = "00" gcd_issue = issb4dec + "." + issaftdec gcdis = (int(issb4dec) * 1000) + decisval else: gcdis = int(str(gcdval["GCDIssue"])) * 1000 gcd_issue = str(gcdval["GCDIssue"]) # get the latest issue / date using the date. int_issnum = int(gcdis / 1000) issdate = str(gcdval["GCDDate"]) issid = "G" + str(gcdval["IssueID"]) if gcdval["GCDDate"] > latestdate: latestiss = str(gcd_issue) latestdate = str(gcdval["GCDDate"]) # print("(" + str(bb) + ") IssueID: " + str(issid) + " IssueNo: " + str(gcd_issue) + " Date" + str(issdate) ) # ---END.NEW. # check if the issue already exists iss_exists = myDB.action("SELECT * from issues WHERE IssueID=?", [issid]).fetchone() # Only change the status & add DateAdded if the issue is not already in the database if iss_exists is None: newValueDict["DateAdded"] = helpers.today() # adjust for inconsistencies in GCD date format - some dates have ? which borks up things. if "?" in str(issdate): issdate = "0000-00-00" controlValueDict = {"IssueID": issid} newValueDict = { "ComicID": gcomicid, "ComicName": ComicName, "Issue_Number": gcd_issue, "IssueDate": issdate, "Int_IssueNumber": int_issnum, } # print ("issueid:" + str(controlValueDict)) # print ("values:" + str(newValueDict)) if mylar.AUTOWANT_ALL: newValueDict["Status"] = "Wanted" # elif release_dict['releasedate'] > helpers.today() and mylar.AUTOWANT_UPCOMING: # newValueDict['Status'] = "Wanted" else: newValueDict["Status"] = "Skipped" if iss_exists: # print ("Existing status : " + str(iss_exists['Status'])) newValueDict["Status"] = iss_exists["Status"] myDB.upsert("issues", newValueDict, controlValueDict) bb += 1 # logger.debug(u"Updating comic cache for " + ComicName) # cache.getThumb(ComicID=issue['issueid']) # logger.debug(u"Updating cache for: " + ComicName) # cache.getThumb(ComicIDcomicid) # check for existing files... updater.forceRescan(gcomicid) controlValueStat = {"ComicID": gcomicid} newValueStat = { "Status": "Active", "LatestIssue": latestiss, "LatestDate": latestdate, "LastUpdated": helpers.now(), } myDB.upsert("comics", newValueStat, controlValueStat) logger.info(u"Updating complete for: " + ComicName) # lets' check the pullist for anyting at this time as well since we're here. if mylar.AUTOWANT_UPCOMING: logger.info(u"Checking this week's pullist for new issues of " + str(ComicName)) updater.newpullcheck() # here we grab issues that have been marked as wanted above... results = myDB.select("SELECT * FROM issues where ComicID=? AND Status='Wanted'", [gcomicid]) if results: logger.info(u"Attempting to grab wanted issues for : " + ComicName) for result in results: foundNZB = "none" if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL) and (mylar.SAB_HOST): foundNZB = search.searchforissue(result["IssueID"]) if foundNZB == "yes": updater.foundsearch(result["ComicID"], result["IssueID"]) else: logger.info(u"No issues marked as wanted for " + ComicName) logger.info(u"Finished grabbing what I could.")
def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None): # Putting this here to get around the circular import. Will try to use this to update images at later date. # from mylar import cache myDB = db.DBConnection() # We need the current minimal info in the database instantly # so we don't throw a 500 error when we redirect to the artistPage controlValueDict = {"ComicID": comicid} dbcomic = myDB.action('SELECT * FROM comics WHERE ComicID=?', [comicid]).fetchone() if dbcomic is None: newValueDict = {"ComicName": "Comic ID: %s" % (comicid), "Status": "Loading"} comlocation = None else: newValueDict = {"Status": "Loading"} comlocation = dbcomic['ComicLocation'] myDB.upsert("comics", newValueDict, controlValueDict) # we need to lookup the info for the requested ComicID in full now comic = cv.getComic(comicid,'comic') #comic = myDB.action('SELECT * FROM comics WHERE ComicID=?', [comicid]).fetchone() if not comic: logger.warn("Error fetching comic. ID for : " + comicid) if dbcomic is None: newValueDict = {"ComicName": "Fetch failed, try refreshing. (%s)" % (comicid), "Status": "Active"} else: newValueDict = {"Status": "Active"} myDB.upsert("comics", newValueDict, controlValueDict) return if comic['ComicName'].startswith('The '): sortname = comic['ComicName'][4:] else: sortname = comic['ComicName'] logger.info(u"Now adding/updating: " + comic['ComicName']) #--Now that we know ComicName, let's try some scraping #--Start # gcd will return issue details (most importantly publishing date) if mismatch == "no" or mismatch is None: gcdinfo=parseit.GCDScraper(comic['ComicName'], comic['ComicYear'], comic['ComicIssues'], comicid) #print ("gcdinfo: " + str(gcdinfo)) mismatch_com = "no" if gcdinfo == "No Match": updater.no_searchresults(comicid) nomatch = "true" logger.info(u"There was an error when trying to add " + comic['ComicName'] + " (" + comic['ComicYear'] + ")" ) return nomatch else: mismatch_com = "yes" #print ("gcdinfo:" + str(gcdinfo)) elif mismatch == "yes": CV_EXcomicid = myDB.action("SELECT * from exceptions WHERE ComicID=?", [comicid]).fetchone() if CV_EXcomicid['variloop'] is None: pass else: vari_loop = CV_EXcomicid['variloop'] NewComicID = CV_EXcomicid['NewComicID'] gcomicid = CV_EXcomicid['GComicID'] resultURL = "/series/" + str(NewComicID) + "/" #print ("variloop" + str(CV_EXcomicid['variloop'])) #if vari_loop == '99': gcdinfo = parseit.GCDdetails(comseries=None, resultURL=resultURL, vari_loop=0, ComicID=comicid, TotalIssues=0, issvariation="no", resultPublished=None) logger.info(u"Sucessfully retrieved details for " + comic['ComicName'] ) # print ("Series Published" + parseit.resultPublished) #comic book location on machine # setup default location here if comlocation is None: if ':' in comic['ComicName'] or '/' in comic['ComicName'] or ',' in comic['ComicName']: comicdir = comic['ComicName'] if ':' in comicdir: comicdir = comicdir.replace(':','') if '/' in comicdir: comicdir = comicdir.replace('/','-') if ',' in comicdir: comicdir = comicdir.replace(',','') else: comicdir = comic['ComicName'] series = comicdir publisher = comic['ComicPublisher'] year = comic['ComicYear'] #do work to generate folder path values = {'$Series': series, '$Publisher': publisher, '$Year': year, '$series': series.lower(), '$publisher': publisher.lower(), '$Volume': year } #print mylar.FOLDER_FORMAT #print 'working dir:' #print helpers.replace_all(mylar.FOLDER_FORMAT, values) if mylar.FOLDER_FORMAT == '': comlocation = mylar.DESTINATION_DIR + "/" + comicdir + " (" + comic['ComicYear'] + ")" else: comlocation = mylar.DESTINATION_DIR + "/" + helpers.replace_all(mylar.FOLDER_FORMAT, values) #comlocation = mylar.DESTINATION_DIR + "/" + comicdir + " (" + comic['ComicYear'] + ")" if mylar.DESTINATION_DIR == "": logger.error(u"There is no general directory specified - please specify in Config/Post-Processing.") return if mylar.REPLACE_SPACES: #mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot comlocation = comlocation.replace(' ', mylar.REPLACE_CHAR) #if it doesn't exist - create it (otherwise will bugger up later on) if os.path.isdir(str(comlocation)): logger.info(u"Directory (" + str(comlocation) + ") already exists! Continuing...") else: #print ("Directory doesn't exist!") try: os.makedirs(str(comlocation)) logger.info(u"Directory successfully created at: " + str(comlocation)) except OSError: logger.error(u"Could not create comicdir : " + str(comlocation)) #try to account for CV not updating new issues as fast as GCD #seems CV doesn't update total counts #comicIssues = gcdinfo['totalissues'] if gcdinfo['gcdvariation'] == "cv": comicIssues = str(int(comic['ComicIssues']) + 1) else: comicIssues = comic['ComicIssues'] #let's download the image... if os.path.exists(mylar.CACHE_DIR):pass else: #let's make the dir. try: os.makedirs(str(mylar.CACHE_DIR)) logger.info(u"Cache Directory successfully created at: " + str(mylar.CACHE_DIR)) except OSError: logger.error('Could not create cache dir. Check permissions of cache dir: ' + str(mylar.CACHE_DIR)) coverfile = mylar.CACHE_DIR + "/" + str(comicid) + ".jpg" #try: urllib.urlretrieve(str(comic['ComicImage']), str(coverfile)) try: with open(str(coverfile)) as f: ComicImage = os.path.join('cache',str(comicid) + ".jpg") logger.info(u"Sucessfully retrieved cover for " + str(comic['ComicName'])) #if the comic cover local is checked, save a cover.jpg to the series folder. if mylar.COMIC_COVER_LOCAL: comiclocal = os.path.join(str(comlocation) + "/cover.jpg") shutil.copy(ComicImage,comiclocal) except IOError as e: logger.error(u"Unable to save cover locally at this time.") controlValueDict = {"ComicID": comicid} newValueDict = {"ComicName": comic['ComicName'], "ComicSortName": sortname, "ComicYear": comic['ComicYear'], "ComicImage": ComicImage, "Total": comicIssues, "ComicLocation": comlocation, "ComicPublisher": comic['ComicPublisher'], "ComicPublished": gcdinfo['resultPublished'], "DateAdded": helpers.today(), "Status": "Loading"} myDB.upsert("comics", newValueDict, controlValueDict) issued = cv.getComic(comicid,'issue') logger.info(u"Sucessfully retrieved issue details for " + comic['ComicName'] ) n = 0 iscnt = int(comicIssues) issid = [] issnum = [] issname = [] issdate = [] int_issnum = [] #let's start issue #'s at 0 -- thanks to DC for the new 52 reboot! :) latestiss = "0" latestdate = "0000-00-00" #print ("total issues:" + str(iscnt)) #---removed NEW code here--- logger.info(u"Now adding/updating issues for " + comic['ComicName']) # file check to see if issue exists logger.info(u"Checking directory for existing issues.") #fc = filechecker.listFiles(dir=comlocation, watchcomic=comic['ComicName']) #havefiles = 0 #fccnt = int(fc['comiccount']) #logger.info(u"Found " + str(fccnt) + "/" + str(iscnt) + " issues of " + comic['ComicName'] + "...verifying") #fcnew = [] if iscnt > 0: #if a series is brand new, it wont have any issues/details yet so skip this part while (n <= iscnt): #---NEW.code try: firstval = issued['issuechoice'][n] except IndexError: break cleanname = helpers.cleanName(firstval['Issue_Name']) issid = str(firstval['Issue_ID']) issnum = str(firstval['Issue_Number']) issname = cleanname if '.' in str(issnum): issn_st = str(issnum).find('.') issn_b4dec = str(issnum)[:issn_st] #if the length of decimal is only 1 digit, assume it's a tenth dec_is = str(issnum)[issn_st + 1:] if len(dec_is) == 1: dec_nisval = int(dec_is) * 10 iss_naftdec = str(dec_nisval) if len(dec_is) == 2: dec_nisval = int(dec_is) iss_naftdec = str(dec_nisval) iss_issue = issn_b4dec + "." + iss_naftdec issis = (int(issn_b4dec) * 1000) + dec_nisval else: issis = int(issnum) * 1000 bb = 0 while (bb <= iscnt): try: gcdval = gcdinfo['gcdchoice'][bb] except IndexError: #account for gcd variation here if gcdinfo['gcdvariation'] == 'gcd': #logger.fdebug("gcd-variation accounted for.") issdate = '0000-00-00' int_issnum = int ( issis / 1000 ) break if 'nn' in str(gcdval['GCDIssue']): #no number detected - GN, TP or the like logger.warn(u"Non Series detected (Graphic Novel, etc) - cannot proceed at this time.") updater.no_searchresults(comicid) return elif '.' in str(gcdval['GCDIssue']): #logger.fdebug("g-issue:" + str(gcdval['GCDIssue'])) issst = str(gcdval['GCDIssue']).find('.') #logger.fdebug("issst:" + str(issst)) issb4dec = str(gcdval['GCDIssue'])[:issst] #logger.fdebug("issb4dec:" + str(issb4dec)) #if the length of decimal is only 1 digit, assume it's a tenth decis = str(gcdval['GCDIssue'])[issst+1:] #logger.fdebug("decis:" + str(decis)) if len(decis) == 1: decisval = int(decis) * 10 issaftdec = str(decisval) if len(decis) == 2: decisval = int(decis) issaftdec = str(decisval) gcd_issue = issb4dec + "." + issaftdec #logger.fdebug("gcd_issue:" + str(gcd_issue)) gcdis = (int(issb4dec) * 1000) + decisval else: gcdis = int(str(gcdval['GCDIssue'])) * 1000 if gcdis == issis: issdate = str(gcdval['GCDDate']) int_issnum = int( gcdis / 1000 ) #get the latest issue / date using the date. if gcdval['GCDDate'] > latestdate: latestiss = str(issnum) latestdate = str(gcdval['GCDDate']) break #bb = iscnt bb+=1 #print("(" + str(n) + ") IssueID: " + str(issid) + " IssueNo: " + str(issnum) + " Date" + str(issdate)) #---END.NEW. # check if the issue already exists iss_exists = myDB.action('SELECT * from issues WHERE IssueID=?', [issid]).fetchone() # Only change the status & add DateAdded if the issue is already in the database if iss_exists is None: newValueDict['DateAdded'] = helpers.today() controlValueDict = {"IssueID": issid} newValueDict = {"ComicID": comicid, "ComicName": comic['ComicName'], "IssueName": issname, "Issue_Number": issnum, "IssueDate": issdate, "Int_IssueNumber": int_issnum } if mylar.AUTOWANT_ALL: newValueDict['Status'] = "Wanted" elif issdate > helpers.today() and mylar.AUTOWANT_UPCOMING: newValueDict['Status'] = "Wanted" else: newValueDict['Status'] = "Skipped" if iss_exists: #print ("Existing status : " + str(iss_exists['Status'])) newValueDict['Status'] = iss_exists['Status'] try: myDB.upsert("issues", newValueDict, controlValueDict) except sqlite3.InterfaceError, e: #raise sqlite3.InterfaceError(e) logger.error("MAJOR error trying to get issue data, this is most likey a MULTI-VOLUME series and you need to use the custom_exceptions.csv file.") myDB.action("DELETE FROM comics WHERE ComicID=?", [comicid]) return n+=1
except ValueError, e: logger.error(str(issnum) + "this has an alpha-numeric in the issue # which I cannot account for.") return #get the latest issue / date using the date. if firstval['Issue_Date'] > latestdate: latestiss = issnum latestdate = str(firstval['Issue_Date']) if firstval['Issue_Date'] < firstdate: firstiss = issnum firstdate = str(firstval['Issue_Date']) # check if the issue already exists iss_exists = myDB.action('SELECT * from issues WHERE IssueID=?', [issid]).fetchone() # Only change the status & add DateAdded if the issue is already in the database if iss_exists is None: newValueDict['DateAdded'] = helpers.today() controlValueDict = {"IssueID": issid} newValueDict = {"ComicID": comicid, "ComicName": comic['ComicName'], "IssueName": issname, "Issue_Number": issnum, "IssueDate": issdate, "Int_IssueNumber": int_issnum } if iss_exists: #print ("Existing status : " + str(iss_exists['Status'])) newValueDict['Status'] = iss_exists['Status'] else: if mylar.AUTOWANT_ALL:
def syncreading(self): #3 status' exist for the readlist. # Added (Not Read) - Issue is added to the readlist and is awaiting to be 'sent' to your reading client. # Read - Issue has been read # Not Read - Issue has been downloaded to your reading client after the syncfiles has taken place. module = '[READLIST-TRANSFER]' myDB = db.DBConnection() readlist = [] cidlist = [] sendlist = [] if self.filelist is None: rl = myDB.select('SELECT * FROM readlist WHERE Status="Added"') if rl is None: logger.info(module + ' No issues have been marked to be synced. Aborting syncfiles') return for rlist in rl: readlist.append({"filepath": rlist['Location'], "issueid": rlist['IssueID'], "comicid": rlist['ComicID']}) else: readlist = self.filelist if len(readlist) > 0: for clist in readlist: if clist['filepath'] == 'None' or clist['filepath'] is None: logger.warn(module + ' There was a problem with ComicID/IssueID: [' + clist['comicid'] + '/' + clist['issueid'] + ']. I cannot locate the file in the given location (try re-adding to your readlist)[' + clist['filepath'] + ']') continue else: # multiplecid = False # for x in cidlist: # if clist['comicid'] == x['comicid']: # comicid = x['comicid'] # comiclocation = x['location'] # multiplecid = True # if multiplecid == False: # cid = myDB.selectone("SELECT * FROM comics WHERE ComicID=?", [clist['comicid']]).fetchone() # if cid is None: # continue # else: # comiclocation = cid['ComicLocation'] # comicid = cid['ComicID'] # if mylar.MULTIPLE_DEST_DIRS is not None and mylar.MULTIPLE_DEST_DIRS != 'None' and os.path.join(mylar.MULTIPLE_DEST_DIRS, os.path.basename(comiclocation)) != comiclocation: # logger.fdebug(module + ' Multiple_dest_dirs:' + mylar.MULTIPLE_DEST_DIRS) # logger.fdebug(module + ' Dir: ' + comiclocation) # logger.fdebug(module + ' Os.path.basename: ' + os.path.basename(comiclocation)) # pathdir = os.path.join(mylar.MULTIPLE_DEST_DIRS, os.path.basename(comiclocation)) if os.path.exists(clist['filepath']): sendlist.append({"issueid": clist['issueid'], "filepath": clist['filepath'], "filename": os.path.split(clist['filepath'])[1]}) # else: # if os.path.exists(os.path.join(comiclocation, clist['filename'])): # sendlist.append({"issueid": clist['issueid'], # "filepath": comiclocation, # "filename": clist['filename']}) # else: # if os.path.exists(os.path.join(comiclocation, clist['filename'])): # sendlist.append({"issueid": clist['issueid'], # "filepath": comiclocation, # "filename": clist['filename']}) else: logger.warn(module + ' ' + clist['filepath'] + ' does not exist in the given location. Remove from the Reading List and Re-add and/or confirm the file exists in the specified location') continue # #cidlist is just for this reference loop to not make unnecessary db calls if the comicid has already been processed. # cidlist.append({"comicid": clist['comicid'], # "issueid": clist['issueid'], # "location": comiclocation}) #store the comicid so we don't make multiple sql requests if len(sendlist) == 0: logger.info(module + ' Nothing to send from your readlist') return logger.info(module + ' ' + str(len(sendlist)) + ' issues will be sent to your reading device.') # test if IP is up. import shlex import subprocess #fhost = mylar.TAB_HOST.find(':') host = mylar.TAB_HOST[:mylar.TAB_HOST.find(':')] cmdstring = str('ping -c1 ' + str(host)) cmd = shlex.split(cmdstring) try: output = subprocess.check_output(cmd) except subprocess.CalledProcessError, e: logger.info(module + ' The host {0} is not Reachable at this time.'.format(cmd[-1])) return else: logger.info(module + ' The host {0} is Reachable. Preparing to send files.'.format(cmd[-1])) success = mylar.ftpsshup.sendfiles(sendlist) if len(success) > 0: for succ in success: newCTRL = {"issueid": succ['issueid']} newVAL = {"Status": 'Downloaded', "StatusChange": helpers.today()} myDB.upsert("readlist", newVAL, newCTRL)
def GCDimport(gcomicid, pullupd=None,imported=None,ogcname=None): # this is for importing via GCD only and not using CV. # used when volume spanning is discovered for a Comic (and can't be added using CV). # Issue Counts are wrong (and can't be added). # because Comicvine ComicID and GCD ComicID could be identical at some random point, let's distinguish. # CV = comicid, GCD = gcomicid :) (ie. CV=2740, GCD=G3719) gcdcomicid = gcomicid myDB = db.DBConnection() # We need the current minimal info in the database instantly # so we don't throw a 500 error when we redirect to the artistPage controlValueDict = {"ComicID": gcdcomicid} comic = myDB.action('SELECT ComicName, ComicYear, Total, ComicPublished, ComicImage, ComicLocation, ComicPublisher FROM comics WHERE ComicID=?', [gcomicid]).fetchone() ComicName = comic[0] ComicYear = comic[1] ComicIssues = comic[2] ComicPublished = comic[3] comlocation = comic[5] ComicPublisher = comic[6] #ComicImage = comic[4] #print ("Comic:" + str(ComicName)) newValueDict = {"Status": "Loading"} myDB.upsert("comics", newValueDict, controlValueDict) # we need to lookup the info for the requested ComicID in full now #comic = cv.getComic(comicid,'comic') if not comic: logger.warn("Error fetching comic. ID for : " + gcdcomicid) if dbcomic is None: newValueDict = {"ComicName": "Fetch failed, try refreshing. (%s)" % (gcdcomicid), "Status": "Active"} else: newValueDict = {"Status": "Active"} myDB.upsert("comics", newValueDict, controlValueDict) return #run the re-sortorder here in order to properly display the page if pullupd is None: helpers.ComicSort(comicorder=mylar.COMICSORT, imported=gcomicid) if ComicName.startswith('The '): sortname = ComicName[4:] else: sortname = ComicName logger.info(u"Now adding/updating: " + ComicName) #--Now that we know ComicName, let's try some scraping #--Start # gcd will return issue details (most importantly publishing date) comicid = gcomicid[1:] resultURL = "/series/" + str(comicid) + "/" gcdinfo=parseit.GCDdetails(comseries=None, resultURL=resultURL, vari_loop=0, ComicID=gcdcomicid, TotalIssues=ComicIssues, issvariation=None, resultPublished=None) if gcdinfo == "No Match": logger.warn("No matching result found for " + ComicName + " (" + ComicYear + ")" ) updater.no_searchresults(gcomicid) nomatch = "true" return nomatch logger.info(u"Sucessfully retrieved details for " + ComicName ) # print ("Series Published" + parseit.resultPublished) #--End ComicImage = gcdinfo['ComicImage'] #comic book location on machine # setup default location here if comlocation is None: # let's remove the non-standard characters here. u_comicnm = ComicName u_comicname = u_comicnm.encode('ascii', 'ignore').strip() if ':' in u_comicname or '/' in u_comicname or ',' in u_comicname: comicdir = u_comicname if ':' in comicdir: comicdir = comicdir.replace(':','') if '/' in comicdir: comicdir = comicdir.replace('/','-') if ',' in comicdir: comicdir = comicdir.replace(',','') else: comicdir = u_comicname series = comicdir publisher = ComicPublisher year = ComicYear #do work to generate folder path values = {'$Series': series, '$Publisher': publisher, '$Year': year, '$series': series.lower(), '$publisher': publisher.lower(), '$Volume': year } if mylar.FOLDER_FORMAT == '': comlocation = mylar.DESTINATION_DIR + "/" + comicdir + " (" + comic['ComicYear'] + ")" else: comlocation = mylar.DESTINATION_DIR + "/" + helpers.replace_all(mylar.FOLDER_FORMAT, values) #comlocation = mylar.DESTINATION_DIR + "/" + comicdir + " (" + ComicYear + ")" if mylar.DESTINATION_DIR == "": logger.error(u"There is no general directory specified - please specify in Config/Post-Processing.") return if mylar.REPLACE_SPACES: #mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot comlocation = comlocation.replace(' ', mylar.REPLACE_CHAR) #if it doesn't exist - create it (otherwise will bugger up later on) if os.path.isdir(str(comlocation)): logger.info(u"Directory (" + str(comlocation) + ") already exists! Continuing...") else: #print ("Directory doesn't exist!") #try: # os.makedirs(str(comlocation)) # logger.info(u"Directory successfully created at: " + str(comlocation)) #except OSError: # logger.error(u"Could not create comicdir : " + str(comlocation)) filechecker.validateAndCreateDirectory(comlocation, True) comicIssues = gcdinfo['totalissues'] #let's download the image... if os.path.exists(mylar.CACHE_DIR):pass else: #let's make the dir. try: os.makedirs(str(mylar.CACHE_DIR)) logger.info(u"Cache Directory successfully created at: " + str(mylar.CACHE_DIR)) except OSError: logger.error(u"Could not create cache dir : " + str(mylar.CACHE_DIR)) coverfile = os.path.join(mylar.CACHE_DIR, str(gcomicid) + ".jpg") #try: urllib.urlretrieve(str(ComicImage), str(coverfile)) try: with open(str(coverfile)) as f: ComicImage = os.path.join('cache',str(gcomicid) + ".jpg") #this is for Firefox when outside the LAN...it works, but I don't know how to implement it #without breaking the normal flow for inside the LAN (above) #ComicImage = "http://" + str(mylar.HTTP_HOST) + ":" + str(mylar.HTTP_PORT) + "/cache/" + str(comi$ logger.info(u"Sucessfully retrieved cover for " + ComicName) #if the comic cover local is checked, save a cover.jpg to the series folder. if mylar.COMIC_COVER_LOCAL: comiclocal = os.path.join(str(comlocation) + "/cover.jpg") shutil.copy(ComicImage,comiclocal) except IOError as e: logger.error(u"Unable to save cover locally at this time.") #if comic['ComicVersion'].isdigit(): # comicVol = "v" + comic['ComicVersion'] #else: # comicVol = None controlValueDict = {"ComicID": gcomicid} newValueDict = {"ComicName": ComicName, "ComicSortName": sortname, "ComicYear": ComicYear, "Total": comicIssues, "ComicLocation": comlocation, #"ComicVersion": comicVol, "ComicImage": ComicImage, #"ComicPublisher": comic['ComicPublisher'], #"ComicPublished": comicPublished, "DateAdded": helpers.today(), "Status": "Loading"} myDB.upsert("comics", newValueDict, controlValueDict) #comicsort here... #run the re-sortorder here in order to properly display the page if pullupd is None: helpers.ComicSort(sequence='update') logger.info(u"Sucessfully retrieved issue details for " + ComicName ) n = 0 iscnt = int(comicIssues) issnum = [] issname = [] issdate = [] int_issnum = [] #let's start issue #'s at 0 -- thanks to DC for the new 52 reboot! :) latestiss = "0" latestdate = "0000-00-00" #print ("total issues:" + str(iscnt)) #---removed NEW code here--- logger.info(u"Now adding/updating issues for " + ComicName) bb = 0 while (bb <= iscnt): #---NEW.code try: gcdval = gcdinfo['gcdchoice'][bb] #print ("gcdval: " + str(gcdval)) except IndexError: #account for gcd variation here if gcdinfo['gcdvariation'] == 'gcd': #print ("gcd-variation accounted for.") issdate = '0000-00-00' int_issnum = int ( issis / 1000 ) break if 'nn' in str(gcdval['GCDIssue']): #no number detected - GN, TP or the like logger.warn(u"Non Series detected (Graphic Novel, etc) - cannot proceed at this time.") updater.no_searchresults(comicid) return elif '.' in str(gcdval['GCDIssue']): issst = str(gcdval['GCDIssue']).find('.') issb4dec = str(gcdval['GCDIssue'])[:issst] #if the length of decimal is only 1 digit, assume it's a tenth decis = str(gcdval['GCDIssue'])[issst+1:] if len(decis) == 1: decisval = int(decis) * 10 issaftdec = str(decisval) if len(decis) == 2: decisval = int(decis) issaftdec = str(decisval) if int(issaftdec) == 0: issaftdec = "00" gcd_issue = issb4dec + "." + issaftdec gcdis = (int(issb4dec) * 1000) + decisval else: gcdis = int(str(gcdval['GCDIssue'])) * 1000 gcd_issue = str(gcdval['GCDIssue']) #get the latest issue / date using the date. int_issnum = int( gcdis / 1000 ) issdate = str(gcdval['GCDDate']) issid = "G" + str(gcdval['IssueID']) if gcdval['GCDDate'] > latestdate: latestiss = str(gcd_issue) latestdate = str(gcdval['GCDDate']) #print("(" + str(bb) + ") IssueID: " + str(issid) + " IssueNo: " + str(gcd_issue) + " Date" + str(issdate) ) #---END.NEW. # check if the issue already exists iss_exists = myDB.action('SELECT * from issues WHERE IssueID=?', [issid]).fetchone() # Only change the status & add DateAdded if the issue is not already in the database if iss_exists is None: newValueDict['DateAdded'] = helpers.today() #adjust for inconsistencies in GCD date format - some dates have ? which borks up things. if "?" in str(issdate): issdate = "0000-00-00" controlValueDict = {"IssueID": issid} newValueDict = {"ComicID": gcomicid, "ComicName": ComicName, "Issue_Number": gcd_issue, "IssueDate": issdate, "Int_IssueNumber": int_issnum } #print ("issueid:" + str(controlValueDict)) #print ("values:" + str(newValueDict)) if mylar.AUTOWANT_ALL: newValueDict['Status'] = "Wanted" elif issdate > helpers.today() and mylar.AUTOWANT_UPCOMING: newValueDict['Status'] = "Wanted" else: newValueDict['Status'] = "Skipped" if iss_exists: #print ("Existing status : " + str(iss_exists['Status'])) newValueDict['Status'] = iss_exists['Status'] myDB.upsert("issues", newValueDict, controlValueDict) bb+=1 # logger.debug(u"Updating comic cache for " + ComicName) # cache.getThumb(ComicID=issue['issueid']) # logger.debug(u"Updating cache for: " + ComicName) # cache.getThumb(ComicIDcomicid) controlValueStat = {"ComicID": gcomicid} newValueStat = {"Status": "Active", "LatestIssue": latestiss, "LatestDate": latestdate, "LastUpdated": helpers.now() } myDB.upsert("comics", newValueStat, controlValueStat) if mylar.CVINFO: if not os.path.exists(comlocation + "/cvinfo"): with open(comlocation + "/cvinfo","w") as text_file: text_file.write("http://www.comicvine.com/volume/49-" + str(comicid)) logger.info(u"Updating complete for: " + ComicName) #move the files...if imported is not empty (meaning it's not from the mass importer.) if imported is None or imported == 'None': pass else: if mylar.IMP_MOVE: logger.info("Mass import - Move files") moveit.movefiles(gcomicid,comlocation,ogcname) else: logger.info("Mass import - Moving not Enabled. Setting Archived Status for import.") moveit.archivefiles(gcomicid,ogcname) #check for existing files... updater.forceRescan(gcomicid) if pullupd is None: # lets' check the pullist for anyting at this time as well since we're here. if mylar.AUTOWANT_UPCOMING and 'Present' in ComicPublished: logger.info(u"Checking this week's pullist for new issues of " + ComicName) updater.newpullcheck(comic['ComicName'], gcomicid) #here we grab issues that have been marked as wanted above... results = myDB.select("SELECT * FROM issues where ComicID=? AND Status='Wanted'", [gcomicid]) if results: logger.info(u"Attempting to grab wanted issues for : " + ComicName) for result in results: foundNZB = "none" if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL or mylar.NEWZNAB or mylar.NZBX) and (mylar.SAB_HOST): foundNZB = search.searchforissue(result['IssueID']) if foundNZB == "yes": updater.foundsearch(result['ComicID'], result['IssueID']) else: logger.info(u"No issues marked as wanted for " + ComicName) logger.info(u"Finished grabbing what I could.")
return #get the latest issue / date using the date. if firstval['Issue_Date'] > latestdate: latestiss = issnum latestdate = str(firstval['Issue_Date']) if firstval['Issue_Date'] < firstdate: firstiss = issnum firstdate = str(firstval['Issue_Date']) # check if the issue already exists iss_exists = myDB.action( 'SELECT * from issues WHERE IssueID=?', [issid]).fetchone() # Only change the status & add DateAdded if the issue is already in the database if iss_exists is None: newValueDict['DateAdded'] = helpers.today() controlValueDict = {"IssueID": issid} newValueDict = { "ComicID": comicid, "ComicName": comic['ComicName'], "IssueName": issname, "Issue_Number": issnum, "IssueDate": issdate, "Int_IssueNumber": int_issnum } if iss_exists: #print ("Existing status : " + str(iss_exists['Status'])) newValueDict['Status'] = iss_exists['Status'] else: