def _delComic(self, **kwargs): if 'id' not in kwargs: self.data = self._failureResponse('Missing parameter: id') return else: self.id = kwargs['id'] myDB = db.DBConnection() myDB.action('DELETE from comics WHERE ComicID="' + self.id + '"') myDB.action('DELETE from issues WHERE ComicID="' + self.id + '"') myDB.action('DELETE from upcoming WHERE ComicID="' + self.id + '"')
def _pauseComic(self, **kwargs): if 'id' not in kwargs: self.data = self._failureResponse('Missing parameter: id') return else: self.id = kwargs['id'] myDB = db.DBConnection() controlValueDict = {'ComicID': self.id} newValueDict = {'Status': 'Paused'} myDB.upsert("comics", newValueDict, controlValueDict)
def _dic_from_query(self, query): myDB = db.DBConnection() rows = myDB.select(query) rows_as_dic = [] for row in rows: row_as_dic = dict(list(zip(list(row.keys()), row))) rows_as_dic.append(row_as_dic) return rows_as_dic
def no_searchresults(ComicID): # when there's a mismatch between CV & GCD - let's change the status to # something other than 'Loaded' myDB = db.DBConnection() controlValue = {"ComicID": ComicID} newValue = { "Status": "Error", "LatestDate": "Error", "LatestIssue": "Error" } myDB.upsert("comics", newValue, controlValue)
def _resumeComic(self, **kwargs): if 'id' not in kwargs: self.data = 'Missing parameter: id' return else: self.id = kwargs['id'] myDB = db.DBConnection() controlValueDict = {'ComicID': self.id} newValueDict = {'Status': 'Active'} myDB.upsert("comics", newValueDict, controlValueDict)
def _unqueueIssue(self, **kwargs): if 'id' not in kwargs: self.data = self._failureResponse('Missing parameter: id') return else: self.id = kwargs['id'] myDB = db.DBConnection() controlValueDict = {'IssueID': self.id} newValueDict = {'Status': 'Skipped'} myDB.upsert("issues", newValueDict, controlValueDict)
def _StoryArcs(self, **kwargs): index = 0 if 'index' in kwargs: index = int(kwargs['index']) myDB = db.DBConnection() links = [] entries=[] arcs = [] storyArcs = mylar.helpers.listStoryArcs() for arc in storyArcs: issuecount = 0 arcname = '' updated = '0000-00-00' arclist = myDB.select("SELECT * from storyarcs WHERE StoryArcID=?", (arc,)) for issue in arclist: if issue['Status'] == 'Downloaded': issuecount += 1 arcname = issue['StoryArc'] if issue['IssueDate'] > updated: updated = issue['IssueDate'] if issuecount > 0: arcs.append({'StoryArcName': arcname, 'StoryArcID': arc, 'IssueCount': issuecount, 'updated': updated}) newlist = sorted(arcs, key=itemgetter('StoryArcName')) subset = newlist[index:(index + self.PAGE_SIZE)] for arc in subset: entries.append( { 'title': '%s (%s)' % (arc['StoryArcName'],arc['IssueCount']), 'id': escape('storyarc:%s' % (arc['StoryArcID'])), 'updated': arc['updated'], 'content': '%s (%s)' % (arc['StoryArcName'],arc['IssueCount']), 'href': '%s?cmd=StoryArc&arcid=%s' % (self.opdsroot, quote_plus(arc['StoryArcID'])), 'kind': 'acquisition', 'rel': 'subsection', } ) feed = {} feed['title'] = 'Mylar OPDS - Story Arcs' feed['id'] = 'StoryArcs' feed['updated'] = mylar.helpers.now() links.append(getLink(href=self.opdsroot,type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='start', title='Home')) links.append(getLink(href='%s?cmd=StoryArcs' % self.opdsroot,type='application/atom+xml; profile=opds-catalog; kind=navigation',rel='self')) if len(arcs) > (index + self.PAGE_SIZE): links.append( getLink(href='%s?cmd=StoryArcs&index=%s' % (self.opdsroot, index+self.PAGE_SIZE), type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='next')) if index >= self.PAGE_SIZE: links.append( getLink(href='%s?cmd=StoryArcs&index=%s' % (self.opdsroot, index-self.PAGE_SIZE), type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='previous')) feed['links'] = links feed['entries'] = entries self.data = feed return
def _queueIssue(self, **kwargs): if 'id' not in kwargs: self.data = self._error_with_message('Missing parameter: id') return else: self.id = kwargs['id'] myDB = db.DBConnection() controlValueDict = {'IssueID': self.id} newValueDict = {'Status': 'Wanted'} myDB.upsert("issues", newValueDict, controlValueDict) search.searchforissue(self.id)
def weekly_update(ComicName, IssueNumber, CStatus, CID, futurepull=None, altissuenumber=None): if futurepull: logger.fdebug('future_update of table : ' + str(ComicName) + ' #:' + str(IssueNumber) + ' to a status of ' + str(CStatus)) else: logger.fdebug('weekly_update of table : ' + str(ComicName) + ' #:' + str(IssueNumber) + ' to a status of ' + str(CStatus)) if altissuenumber: logger.fdebug('weekly_update of table : ' + str(ComicName) + ' (Alternate Issue #):' + str(altissuenumber) + ' to a status of ' + str(CStatus)) # here we update status of weekly table... # added Issue to stop false hits on series' that have multiple releases in a week # added CStatus to update status flags on Pullist screen myDB = db.DBConnection() if futurepull is None: issuecheck = myDB.action( "SELECT * FROM weekly WHERE COMIC=? AND ISSUE=?", [ComicName, IssueNumber]).fetchone() else: issuecheck = myDB.action( "SELECT * FROM future WHERE COMIC=? AND ISSUE=?", [ComicName, IssueNumber]).fetchone() if issuecheck is not None: controlValue = {"COMIC": str(ComicName), "ISSUE": str(IssueNumber)} if CStatus: newValue = {"STATUS": CStatus, "ComicID": CID} else: if mylar.AUTOWANT_UPCOMING: newValue = {"STATUS": "Wanted"} else: newValue = {"STATUS": "Skipped"} if futurepull is None: myDB.upsert("weekly", newValue, controlValue) else: logger.info('checking ' + str(issuecheck['ComicID']) + ' status of : ' + str(CStatus)) if issuecheck['ComicID'] is not None and CStatus != None: newValue = { "STATUS": "Wanted", "ComicID": issuecheck['ComicID'] } logger.info('updating value: ' + str(newValue)) logger.info('updating control: ' + str(controlValue)) myDB.upsert("future", newValue, controlValue)
def markFailed(self): #use this to forcibly mark a single issue as being Failed (ie. if a search result is sent to a client, but the result #ends up passing in a 404 or something that makes it so that the download can't be initiated). module = '[FAILED-DOWNLOAD]' myDB = db.DBConnection() logger.info(module + ' Marking as a Failed Download.') logger.fdebug(module + 'nzb_name: ' + self.nzb_name) logger.fdebug(module + 'issueid: ' + str(self.issueid)) logger.fdebug(module + 'nzb_id: ' + str(self.id)) logger.fdebug(module + 'prov: ' + self.prov) logger.fdebug('oneoffinfo: ' + str(self.oneoffinfo)) if self.oneoffinfo: ComicName = self.oneoffinfo['ComicName'] IssueNumber = self.oneoffinfo['IssueNumber'] else: if 'annual' in self.nzb_name.lower(): logger.info(module + ' Annual detected.') annchk = "yes" issuenzb = myDB.selectone( "SELECT * from annuals WHERE IssueID=? AND ComicName NOT NULL", [self.issueid]).fetchone() else: issuenzb = myDB.selectone( "SELECT * from issues WHERE IssueID=? AND ComicName NOT NULL", [self.issueid]).fetchone() ctrlVal = {"IssueID": self.issueid} Vals = {"Status": 'Failed'} myDB.upsert("issues", Vals, ctrlVal) ComicName = issuenzb['ComicName'] IssueNumber = issuenzb['Issue_Number'] ctrlVal = { "ID": self.id, "Provider": self.prov, "NZBName": self.nzb_name } Vals = { "Status": 'Failed', "ComicName": ComicName, "Issue_Number": IssueNumber, "IssueID": self.issueid, "ComicID": self.comicid, "DateFailed": helpers.now() } myDB.upsert("failed", Vals, ctrlVal) logger.info(module + ' Successfully marked as Failed.')
def is_exists(comicid): myDB = db.DBConnection() # See if the artist is already in the database comiclist = myDB.select( 'SELECT ComicID, ComicName from comics WHERE ComicID=?', [comicid]) if any(comicid in x for x in comiclist): logger.info(comiclist[0][1] + u" is already in the database.") return False else: return False
def archivefiles(comicid, ogcname): myDB = db.DBConnection() # if move files isn't enabled, let's set all found comics to Archive status :) result = myDB.select("SELECT * FROM importresults WHERE ComicName=?", [ogcname]) if result is None: pass else: ogdir = result['Location'] origdir = os.path.join(os.path.dirname(ogdir)) updater.forceRescan( comicid, archive=origdir) #send to rescanner with archive mode turned on
def nzbdbsearch(seriesname,issue,comicid=None,nzbprov=None): myDB = db.DBConnection() seriesname_alt = None if comicid is None or comicid == 'None': pass else: snm = myDB.action("SELECT * FROM comics WHERE comicid=?", [comicid]).fetchone() if snm is None: logger.info('Invalid ComicID of ' + str(comicid) + '. Aborting search.') return else: seriesname = snm['ComicName'] seriesname_alt = snm['AlternateSearch'] nsearch_seriesname = re.sub('[\'\!\@\#\$\%\:\;\/\\=\?\.\s]', '%',seriesname) formatrem_seriesname = re.sub('[\'\!\@\#\$\%\:\;\/\\=\?\.]', '',seriesname) nsearch = nsearch_seriesname + "%" nresults = myDB.action("SELECT * FROM rssdb WHERE Title like ? AND Site != 'comicBT' AND Site != 'KAT'", [nsearch]) if nresults is None: logger.fdebug('nzb search returned no results for ' + seriesname) if seriesname_alt is None: logger.fdebug('no nzb Alternate name given. Aborting search.') return "no results" else: chkthealt = seriesname_alt.split('##') if chkthealt == 0: AS_Alternate = AlternateSearch for calt in chkthealt: AS_Alternate = re.sub('##','',calt) nresults += myDB.action("SELECT * FROM rssdb WHERE Title like ? AND Site != 'comicBT' AND Site != 'KAT'", [AS_Alternate]) if nresults is None: logger.fdebug('nzb alternate name search returned no results.') return "no results" nzbtheinfo = [] nzbinfo = {} for nzb in nresults: # no need to parse here, just compile and throw it back .... nzbtheinfo.append({ 'title': nzb['Title'], 'link': nzb['Link'], 'pubdate': nzb['Pubdate'], 'site': nzb['Site'], 'length': nzb['Size'] }) logger.fdebug("entered info for " + nzb['Title']) nzbinfo['entries'] = nzbtheinfo return nzbinfo
def loaditup(comicname, comicid, issue, chktype): myDB = db.DBConnection() issue_number = helpers.issuedigits(issue) if chktype == 'annual': typedisplay = 'annual issue' logger.fdebug('[' + comicname + '] trying to locate ' + str(typedisplay) + ' ' + str(issue) + ' to do comparitive issue analysis for pull-list') issueload = myDB.action('SELECT * FROM annuals WHERE ComicID=? AND Int_IssueNumber=?', [comicid, issue_number]).fetchone() else: typedisplay = 'issue' logger.fdebug('[' + comicname + '] trying to locate ' + str(typedisplay) + ' ' + str(issue) + ' to do comparitive issue analysis for pull-list') issueload = myDB.action('SELECT * FROM issues WHERE ComicID=? AND Int_IssueNumber=?', [comicid, issue_number]).fetchone() if issueload is None: logger.fdebug('No results matched for Issue number - either this is a NEW issue with no data yet, or something is wrong') return 'no results' dataissue = [] releasedate = issueload['ReleaseDate'] storedate = issueload['IssueDate'] status = issueload['Status'] if releasedate == '0000-00-00': logger.fdebug('Store date of 0000-00-00 returned for ' + str(typedisplay) + ' # ' + str(issue) + '. Refreshing series to see if valid date present') mismatch = 'no' issuerecheck = mylar.importer.addComictoDB(comicid,mismatch,calledfrom='weekly',issuechk=issue_number,issuetype=chktype) if issuerecheck is not None: for il in issuerecheck: #this is only one record.. releasedate = il['IssueDate'] storedate = il['ReleaseDate'] status = il['Status'] logger.fdebug('issue-recheck releasedate is : ' + str(releasedate)) logger.fdebug('issue-recheck storedate of : ' + str(storedate)) if releasedate is not None and releasedate != "None" and releasedate != "": logger.fdebug('Returning Release Date for ' + str(typedisplay) + ' # ' + str(issue) + ' of ' + str(releasedate)) thedate = re.sub("[^0-9]", "", releasedate) #convert date to numerics only (should be in yyyymmdd) #return releasedate else: logger.fdebug('Returning Publication Date for issue ' + str(typedisplay) + ' # ' + str(issue) + ' of ' + str(storedate)) if storedate is None and storedate != "None" and storedate != "": logger.fdebug('no issue data available - both release date & store date. Returning no results') return 'no results' thedate = re.sub("[^0-9]", "", storedate) #convert date to numerics only (should be in yyyymmdd) #return storedate dataissue.append({"issuedate": thedate, "status": status}) return dataissue
def series_folder_collision_detection(self, comlocation, comicid, booktype, comicyear, volume): myDB = db.DBConnection() chk = myDB.select('SELECT * FROM comics WHERE ComicLocation LIKE "%'+comlocation+'%" AND ComicID !=?', [comicid]) tryit = None if chk: for ck in chk: comloc = ck['ComicLocation'] if comloc == comlocation: logger.info('[SERIES_FOLDER_COLLISION_DETECTION] %s already exists for %s (%s).' % (ck['ComicLocation'], ck['ComicName'], ck['ComicYear'])) tmp_ff = os.path.basename(mylar.CONFIG.FOLDER_FORMAT) if ck['ComicYear'] != comicyear: volumeyear = True else: volumeyear = False if '$Type' not in tmp_ff and booktype != ck['Type']: logger.fdebug('[SERIES_FOLDER_COLLISION_DETECTION] Trying to rename using BookType declaration.') new_format = '%s [%s]' % (tmp_ff, '$Type') elif '$Volume' not in tmp_ff: logger.fdebug('[SERIES_FOLDER_COLLISION_DETECTION] Trying to rename using Volume declaration.') volume_choice = '$VolumeY' #use volume instead of ck['ComicVersion'] since volume has already had changes applied in other module if volumeyear is False: if volume is None: volume_choice = '$VolumeY' else: volume_choice = '$VolumeN' t_name = tmp_ff.find('$Series') if t_name != -1: new_format = '%s %s %s' % (tmp_ff[:t_name+len('$Series'):], volume_choice, tmp_ff[t_name+len('$Series')+1:]) self.comic = {'ComicPublisher': ck['ComicPublisher'], 'PublisherImprint': ck['PublisherImprint'], 'Corrected_Type': ck['Corrected_Type'], 'Type': booktype, 'ComicYear': comicyear, 'ComicName': ck['ComicName'], 'ComicLocation': ck['ComicLocation'], 'ComicVersion': volume} update_loc = {'temppath': mylar.CONFIG.DESTINATION_DIR, 'tempff': True, 'tempformat': new_format, 'comicid': ck['ComicID']} tryit = self.folder_create(update_loc=update_loc) break logger.fdebug('tryit_response: %s' % (tryit,)) return tryit
def foundsearch(ComicID, IssueID, down=None): # When doing a Force Search (Wanted tab), the resulting search calls this to update. # this is all redudant code that forceRescan already does. # should be redone at some point so that instead of rescanning entire # series directory, it just scans for the issue it just downloaded and # and change the status to Snatched accordingly. It is not to increment the have count # at this stage as it's not downloaded - just the .nzb has been snatched and sent to SAB. myDB = db.DBConnection() comic = myDB.action('SELECT * FROM comics WHERE ComicID=?', [ComicID]).fetchone() issue = myDB.action('SELECT * FROM issues WHERE IssueID=?', [IssueID]).fetchone() CYear = issue['IssueDate'][:4] if down is None: # update the status to Snatched (so it won't keep on re-downloading!) logger.fdebug("updating status to snatched") controlValue = {"IssueID": IssueID} newValue = {"Status": "Snatched"} myDB.upsert("issues", newValue, controlValue) # update the snatched DB snatchedupdate = {"IssueID": IssueID, "Status": "Snatched"} newsnatchValues = { "ComicName": comic['ComicName'], "ComicID": ComicID, "Issue_Number": issue['Issue_Number'], "DateAdded": helpers.now(), "Status": "Snatched" } myDB.upsert("snatched", newsnatchValues, snatchedupdate) else: snatchedupdate = {"IssueID": IssueID, "Status": "Downloaded"} newsnatchValues = { "ComicName": comic['ComicName'], "ComicID": ComicID, "Issue_Number": issue['Issue_Number'], "DateAdded": helpers.now(), "Status": "Downloaded" } myDB.upsert("snatched", newsnatchValues, snatchedupdate) #print ("finished updating snatched db.") logger.info(u"Updating now complete for " + comic['ComicName'] + " issue: " + str(issue['Issue_Number'])) return
def movefiles(comicid, comlocation, ogcname, imported=None): myDB = db.DBConnection() logger.fdebug('comlocation is : ' + str(comlocation)) logger.fdebug('original comicname is : ' + str(ogcname)) impres = myDB.select("SELECT * from importresults WHERE ComicName=?", [ogcname]) if impres is not None: #print ("preparing to move " + str(len(impres)) + " files into the right directory now.") for impr in impres: srcimp = impr['ComicLocation'] orig_filename = impr['ComicFilename'] orig_iss = impr['impID'].rfind('-') orig_iss = impr['impID'][orig_iss + 1:] logger.fdebug("Issue :" + str(orig_iss)) #before moving check to see if Rename to Mylar structure is enabled. if mylar.IMP_RENAME and mylar.FILE_FORMAT != '': logger.fdebug( "Renaming files according to configuration details : " + str(mylar.FILE_FORMAT)) renameit = helpers.rename_param(comicid, impr['ComicName'], orig_iss, orig_filename) nfilename = renameit['nfilename'] dstimp = os.path.join(comlocation, nfilename) else: logger.fdebug( "Renaming files not enabled, keeping original filename(s)") dstimp = os.path.join(comlocation, orig_filename) logger.info("moving " + str(srcimp) + " ... to " + str(dstimp)) try: shutil.move(srcimp, dstimp) except (OSError, IOError): logger.error( "Failed to move files - check directories and manually re-run." ) logger.fdebug("all files moved.") #now that it's moved / renamed ... we remove it from importResults or mark as completed. results = myDB.select("SELECT * from importresults WHERE ComicName=?", [ogcname]) if results is not None: for result in results: controlValue = {"impID": result['impid']} newValue = {"Status": "Imported"} myDB.upsert("importresults", newValue, controlValue) return
def rssdbupdate(feeddata, i, type): rsschktime = 15 myDB = db.DBConnection() #let's add the entries into the db so as to save on searches #also to build up the ID's ;) x = 1 while x <= i: try: dataval = feeddata[x] except IndexError: logger.fdebug( 'reached the end of populating. Exiting the process.') break #print "populating : " + str(dataval) #remove passkey so it doesn't end up in db if type == 'torrent': newlink = dataval['link'][:(dataval['link'].find('&passkey'))] newVal = { "Link": newlink, "Pubdate": dataval['pubdate'], "Site": dataval['site'], "Size": dataval['size'] } ctrlVal = {"Title": dataval['title']} # if dataval['Site'] == 'KAT': # newVal['Size'] = dataval['Size'] else: newlink = dataval['Link'] newVal = { "Link": newlink, "Pubdate": dataval['Pubdate'], "Site": dataval['Site'], "Size": dataval['Size'] } ctrlVal = {"Title": dataval['Title']} myDB.upsert("rssdb", newVal, ctrlVal) x += 1 logger.fdebug('Completed adding new data to RSS DB. Next add in ' + str(mylar.RSS_CHECKINTERVAL) + ' minutes') return
def _Publishers(self, **kwargs): index = 0 if 'index' in kwargs: index = int(kwargs['index']) myDB = db.DBConnection() feed = {} feed['title'] = 'Mylar OPDS - Publishers' feed['id'] = 'Publishers' feed['updated'] = mylar.helpers.now() links = [] entries=[] links.append(getLink(href=self.opdsroot,type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='start', title='Home')) links.append(getLink(href='%s?cmd=Publishers' % self.opdsroot,type='application/atom+xml; profile=opds-catalog; kind=navigation',rel='self')) publishers = myDB.select("SELECT ComicPublisher from comics GROUP BY ComicPublisher") comics = mylar.helpers.havetotals() for publisher in publishers: lastupdated = '0000-00-00' totaltitles = 0 for comic in comics: if comic['ComicPublisher'] == publisher['ComicPublisher'] and comic['haveissues'] > 0: totaltitles += 1 if comic['DateAdded'] > lastupdated: lastupdated = comic['DateAdded'] if totaltitles > 0: entries.append( { 'title': escape('%s (%s)' % (publisher['ComicPublisher'], totaltitles)), 'id': escape('publisher:%s' % publisher['ComicPublisher']), 'updated': lastupdated, 'content': escape('%s (%s)' % (publisher['ComicPublisher'], totaltitles)), 'href': '%s?cmd=Publisher&pubid=%s' % (self.opdsroot, quote_plus(publisher['ComicPublisher'])), 'kind': 'navigation', 'rel': 'subsection', } ) if len(entries) > (index + self.PAGE_SIZE): links.append( getLink(href='%s?cmd=AllTitles&index=%s' % (self.opdsroot, index+self.PAGE_SIZE), type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='next')) if index >= self.PAGE_SIZE: links.append( getLink(href='%s?cmd=AllTitles&index=%s' % (self.opdsroot, index-self.PAGE_SIZE), type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='previous')) feed['links'] = links feed['entries'] = entries[index:(index+self.PAGE_SIZE)] self.data = feed return
def weekly_update(ComicName, IssueNumber, CStatus, CID): # here we update status of weekly table... # added Issue to stop false hits on series' that have multiple releases in a week # added CStatus to update status flags on Pullist screen myDB = db.DBConnection() issuecheck = myDB.action("SELECT * FROM weekly WHERE COMIC=? AND ISSUE=?", [ComicName, IssueNumber]).fetchone() if issuecheck is not None: controlValue = {"COMIC": str(ComicName), "ISSUE": str(IssueNumber)} if CStatus: newValue = {"STATUS": CStatus, "ComicID": CID} else: if mylar.AUTOWANT_UPCOMING: newValue = {"STATUS": "Wanted"} else: newValue = {"STATUS": "Skipped"} myDB.upsert("weekly", newValue, controlValue)
def failed_check(self): #issueid = self.issueid #comicid = self.comicid # ID = ID passed by search upon a match upon preparing to send it to client to download. # ID is provider dependent, so the same file should be different for every provider. module = '[FAILED_DOWNLOAD_CHECKER]' myDB = db.DBConnection() # Querying on NZBName alone will result in all downloads regardless of provider. # This will make sure that the files being downloaded are different regardless of provider. # Perhaps later improvement might be to break it down by provider so that Mylar will attempt to # download same issues on different providers (albeit it shouldn't matter, if it's broke it's broke). logger.info('prov : ' + str(self.prov) + '[' + str(self.id) + ']') # if this is from nzbhydra, we need to rejig the id line so that the searchid is removed since it's always unique to the search. if 'indexerguid' in self.id: st = self.id.find('searchid:') end = self.id.find(',',st) self.id = '%' + self.id[:st] + '%' + self.id[end+1:len(self.id)-1] + '%' chk_fail = myDB.selectone('SELECT * FROM failed WHERE ID LIKE ?', [self.id]).fetchone() else: chk_fail = myDB.selectone('SELECT * FROM failed WHERE ID=?', [self.id]).fetchone() if chk_fail is None: logger.info(module + ' Successfully marked this download as Good for downloadable content') return 'Good' else: if chk_fail['status'] == 'Good': logger.info(module + ' result has a status of GOOD - which means it does not currently exist in the failed download list.') return chk_fail['status'] elif chk_fail['status'] == 'Failed': logger.info(module + ' result has a status of FAIL which indicates it is not a good choice to download.') logger.info(module + ' continuing search for another download.') return chk_fail['status'] elif chk_fail['status'] == 'Retry': logger.info(module + ' result has a status of RETRY which indicates it was a failed download that retried .') return chk_fail['status'] elif chk_fail['status'] == 'Retrysame': logger.info(module + ' result has a status of RETRYSAME which indicates it was a failed download that retried the initial download.') return chk_fail['status'] else: logger.info(module + ' result has a status of ' + chk_fail['status'] + '. I am not sure what to do now.') return "nope"
def pullit(forcecheck=None): myDB = db.DBConnection() popit = myDB.select("SELECT count(*) FROM sqlite_master WHERE name='weekly' and type='table'") if popit: try: pull_date = myDB.action("SELECT SHIPDATE from weekly").fetchone() logger.info(u"Weekly pull list present - checking if it's up-to-date..") if (pull_date is None): pulldate = '00000000' else: pulldate = pull_date['SHIPDATE'] except (sqlite3.OperationalError, TypeError),msg: conn=sqlite3.connect(mylar.DB_FILE) c=conn.cursor() logger.info(u"Error Retrieving weekly pull list - attempting to adjust") c.execute('DROP TABLE weekly') c.execute('CREATE TABLE IF NOT EXISTS weekly (SHIPDATE text, PUBLISHER text, ISSUE text, COMIC VARCHAR(150), EXTRA text, STATUS text, ComicID text)') pulldate = '00000000' logger.fdebug(u"Table re-created, trying to populate")
def _Publisher(self, **kwargs): index = 0 if 'index' in kwargs: index = int(kwargs['index']) myDB = db.DBConnection() if 'pubid' not in kwargs: self.data =self._error_with_message('No Publisher Provided') return links = [] entries=[] allcomics = mylar.helpers.havetotals() for comic in allcomics: if comic['ComicPublisher'] == kwargs['pubid'] and comic['haveissues'] > 0: entries.append( { 'title': escape('%s (%s)' % (comic['ComicName'], comic['ComicYear'])), 'id': escape('comic:%s (%s)' % (comic['ComicName'], comic['ComicYear'])), 'updated': comic['DateAdded'], 'content': escape('%s (%s)' % (comic['ComicName'], comic['ComicYear'])), 'href': '%s?cmd=Comic&comicid=%s' % (self.opdsroot, quote_plus(comic['ComicID'])), 'kind': 'acquisition', 'rel': 'subsection', } ) feed = {} pubname = '%s (%s)' % (escape(kwargs['pubid']),len(entries)) feed['title'] = 'Mylar OPDS - %s' % (pubname) feed['id'] = 'publisher:%s' % escape(kwargs['pubid']) feed['updated'] = mylar.helpers.now() links.append(getLink(href=self.opdsroot,type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='start', title='Home')) links.append(getLink(href='%s?cmd=Publishers' % self.opdsroot,type='application/atom+xml; profile=opds-catalog; kind=navigation',rel='self')) if len(entries) > (index + self.PAGE_SIZE): links.append( getLink(href='%s?cmd=Publisher&pubid=%s&index=%s' % (self.opdsroot, quote_plus(kwargs['pubid']),index+self.PAGE_SIZE), type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='next')) if index >= self.PAGE_SIZE: links.append( getLink(href='%s?cmd=Publisher&pubid=%s&index=%s' % (self.opdsroot, quote_plus(kwargs['pubid']),index-self.PAGE_SIZE), type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='previous')) feed['links'] = links feed['entries'] = entries[index:(index+self.PAGE_SIZE)] self.data = feed return
def rssdbupdate(feeddata, i, type): rsschktime = 15 myDB = db.DBConnection() #let's add the entries into the db so as to save on searches #also to build up the ID's ;) for dataval in feeddata: if type == 'torrent': #we just store the torrent ID's now. if dataval['site'] == '32P': newlink = dataval['link'] else: #store the hash/id from KAT newlink = os.path.basename( re.sub('.torrent', '', dataval['link'][:dataval['link'].find('?title')])) newVal = { "Link": newlink, "Pubdate": dataval['pubdate'], "Site": dataval['site'], "Size": dataval['size'] } ctrlVal = {"Title": dataval['title']} else: newlink = dataval['Link'] newVal = { "Link": newlink, "Pubdate": dataval['Pubdate'], "Site": dataval['Site'], "Size": dataval['Size'] } ctrlVal = {"Title": dataval['Title']} myDB.upsert("rssdb", newVal, ctrlVal) logger.fdebug('Completed adding new data to RSS DB. Next add in ' + str(mylar.RSS_CHECKINTERVAL) + ' minutes') return
def __init__(self, comic=None, issue=None, ComicID=None, IssueID=None): self.myDB = db.DBConnection() if ComicID is not None: self.comicid = ComicID self.comic = self.myDB.selectone('SELECT * FROM comics WHERE ComicID=?', [ComicID]).fetchone() elif comic is not None: self.comic = comic self.comicid = None else: self.comic = None self.comicid = None if IssueID is not None: self.issueid = IssueID self.issue = self.myDB.select('SELECT * FROM issues WHERE IssueID=?', [IssueID]) elif issue is not None: self.issue = issue self.issueid = None else: self.issue = None self.issueid = None
def loaditup(comicname, comicid, issue): myDB = db.DBConnection() issue_number = helpers.issuedigits(issue) logger.fdebug('[' + comicname + '] trying to locate issue ' + str(issue) + ' to do comparitive issue analysis for pull-list') issueload = myDB.action( 'SELECT * FROM issues WHERE ComicID=? AND Int_IssueNumber=?', [comicid, issue_number]).fetchone() if issueload is None: logger.fdebug( 'No results matched for Issue number - either this is a NEW series with no data yet, or something is wrong' ) return 'no results' if issueload['ReleaseDate'] is not None or issueload[ 'ReleaseDate'] is not 'None': logger.fdebug('Returning Release Date for issue # ' + str(issue) + ' of ' + str(issueload['ReleaseDate'])) return issueload['ReleaseDate'] else: logger.fdebug('Returning Publication Date for issue # ' + str(issue) + ' of ' + str(issueload['PublicationDate'])) return issueload['PublicationDate']
def __init__(self, comic=None, issue=None, ComicID=None, IssueID=None, arcID=None): self.myDB = db.DBConnection() self.weekly = None if ComicID is not None: self.comicid = ComicID self.comic = self.myDB.selectone('SELECT * FROM comics WHERE ComicID=?', [ComicID]).fetchone() if not self.comic: self.weekly = self.myDB.selectone('SELECT * FROM weekly WHERE ComicID=? AND IssueID=?', [ComicID, IssueID]).fetchone() if not self.weekly: self.comic = None elif comic is not None: self.comic = comic self.comicid = None else: self.comic = None self.comicid = None if IssueID is not None: self.issueid = IssueID self.issue = self.myDB.selectone('SELECT * FROM issues WHERE IssueID=?', [IssueID]).fetchone() if not self.issue: if mylar.CONFIG.ANNUALS_ON: self.issue = self.myDB.selectone('SELECT * FROM annuals WHERE IssueID=?', [IssueID]).fetchone() if not self.issue: self.issue = None elif issue is not None: self.issue = issue self.issueid = None else: self.issue = None self.issueid = None if arcID is not None: self.arcid = arcID self.arc = self.myDB.selectone('SELECT * FROM storyarcs WHERE IssueArcID=?', [arcID]).fetchone() else: self.arc = None self.arcid = None
def _AllTitles(self, **kwargs): index = 0 if 'index' in kwargs: index = int(kwargs['index']) myDB = db.DBConnection() feed = {} feed['title'] = 'Mylar OPDS - All Titles' feed['id'] = 'AllTitles' feed['updated'] = mylar.helpers.now() links = [] entries=[] links.append(getLink(href=self.opdsroot,type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='start', title='Home')) links.append(getLink(href='%s?cmd=AllTitles' % self.opdsroot,type='application/atom+xml; profile=opds-catalog; kind=navigation',rel='self')) comics = mylar.helpers.havetotals() for comic in comics: if comic['haveissues'] > 0: entries.append( { 'title': escape('%s (%s) (comicID: %s)' % (comic['ComicName'], comic['ComicYear'], comic['ComicID'])), 'id': escape('comic:%s (%s) [%s]' % (comic['ComicName'], comic['ComicYear'], comic['ComicID'])), 'updated': comic['DateAdded'], 'content': escape('%s (%s)' % (comic['ComicName'], comic['ComicYear'])), 'href': '%s?cmd=Comic&comicid=%s' % (self.opdsroot, quote_plus(comic['ComicID'])), 'kind': 'acquisition', 'rel': 'subsection', } ) if len(entries) > (index + self.PAGE_SIZE): links.append( getLink(href='%s?cmd=AllTitles&index=%s' % (self.opdsroot, index+self.PAGE_SIZE), type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='next')) if index >= self.PAGE_SIZE: links.append( getLink(href='%s?cmd=AllTitles&index=%s' % (self.opdsroot, index-self.PAGE_SIZE), type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='previous')) feed['links'] = links feed['entries'] = entries[index:(index+self.PAGE_SIZE)] self.data = feed return
def nzblog(IssueID, NZBName, ComicName, SARC=None, IssueArcID=None): myDB = db.DBConnection() newValue = {"NZBName": NZBName} if IssueID is None or IssueID == 'None': #if IssueID is None, it's a one-off download from the pull-list. #give it a generic ID above the last one so it doesn't throw an error later. print "SARC detected as: " + str(SARC) if mylar.HIGHCOUNT == 0: IssueID = '900000' else: IssueID = int(mylar.HIGHCOUNT) + 1 if SARC: IssueID = 'S' + str(IssueArcID) newValue['SARC'] = SARC controlValue = {"IssueID": IssueID} #print controlValue #newValue['NZBName'] = NZBName #print newValue myDB.upsert("nzblog", newValue, controlValue)
def markasRead(self, IssueID=None, IssueArcID=None): myDB = db.DBConnection() if IssueID: issue = myDB.selectone('SELECT * from readlist WHERE IssueID=?', [IssueID]).fetchone() if issue['Status'] == 'Read': NewVal = {"Status": "Added"} else: NewVal = {"Status": "Read"} NewVal['StatusChange'] = helpers.today() CtrlVal = {"IssueID": IssueID} myDB.upsert("readlist", NewVal, CtrlVal) logger.info(self.module + ' Marked ' + issue['ComicName'] + ' #' + str(issue['Issue_Number']) + ' as Read.') elif IssueArcID: issue = myDB.selectone( 'SELECT * from readinglist WHERE IssueArcID=?', [IssueArcID]).fetchone() if issue['Status'] == 'Read': NewVal = {"Status": "Added"} else: NewVal = {"Status": "Read"} NewVal['StatusChange'] = helpers.today() CtrlVal = {"IssueArcID": IssueArcID} myDB.upsert("readinglist", NewVal, CtrlVal) logger.info(self.module + ' Marked ' + issue['ComicName'] + ' #' + str(issue['IssueNumber']) + ' as Read.') else: logger.info( self.module + 'Could not mark anything as read, no IssueID or IssueArcID passed' ) return