def _Recent(self, **kwargs): index = 0 if 'index' in kwargs: index = int(kwargs['index']) myDB = db.DBConnection() links = [] entries=[] recents = self._dic_from_query('SELECT * from snatched WHERE Status = "Post-Processed" OR Status = "Downloaded" order by DateAdded DESC LIMIT 120') if index <= len(recents): number = 1 subset = recents[index:(index+self.PAGE_SIZE)] for issue in subset: issuebook = myDB.fetch('SELECT * from issues WHERE IssueID = ?', (issue['IssueID'],)).fetchone() if not issuebook: issuebook = myDB.fetch('SELECT * from annuals WHERE IssueID = ?', (issue['IssueID'],)).fetchone() comic = myDB.fetch('SELECT * from comics WHERE ComicID = ?', (issue['ComicID'],)).fetchone() updated = issue['DateAdded'] image = None thumbnail = None if issuebook: if not 'ReleaseComicID' in list(issuebook.keys()): if issuebook['DateAdded'] is None: title = escape('%03d: %s #%s - %s (In stores %s)' % (index + number, issuebook['ComicName'], issuebook['Issue_Number'], issuebook['IssueName'], issuebook['ReleaseDate'])) image = issuebook['ImageURL_ALT'] thumbnail = issuebook['ImageURL'] else: title = escape('%03d: %s #%s - %s (Added to Mylar %s, in stores %s)' % (index + number, issuebook['ComicName'], issuebook['Issue_Number'], issuebook['IssueName'], issuebook['DateAdded'], issuebook['ReleaseDate'])) image = issuebook['ImageURL_ALT'] thumbnail = issuebook['ImageURL'] else: title = escape('%03d: %s Annual %s - %s (In stores %s)' % (index + number, issuebook['ComicName'], issuebook['Issue_Number'], issuebook['IssueName'], issuebook['ReleaseDate'])) # logger.info("%s - %s" % (comic['ComicLocation'], issuebook['Location'])) number +=1 if not issuebook['Location']: continue location = issuebook['Location'] fileloc = os.path.join(comic['ComicLocation'],issuebook['Location']) metainfo = None if mylar.CONFIG.OPDS_METAINFO: issuedetails = mylar.helpers.IssueDetails(issue['fileloc']).get('metadata', None) if issuedetails is not None: metainfo = issuedetails.get('metadata', None) if not metainfo: metainfo = {'writer': None,'summary': ''} cb, _ = open_archive(fileloc) if cb is None: self.data = self._error_with_message('Can\'t open archive') pse_count = 0 # Or just skip the issue? else: pse_count = page_count(cb) entries.append( { 'title': title, 'id': escape('comic:%s (%s) - %s' % (issuebook['ComicName'], comic['ComicYear'], issuebook['Issue_Number'])), 'updated': updated, 'content': escape('%s' % (metainfo[0]['summary'])), 'href': '%s?cmd=Issue&issueid=%s&file=%s' % (self.opdsroot, quote_plus(issuebook['IssueID']),quote_plus(location)), 'stream': '%s?cmd=Stream&issueid=%s&file=%s' % (self.opdsroot, quote_plus(issuebook['IssueID']),quote_plus(location)), 'pse_count': pse_count, 'kind': 'acquisition', 'rel': 'file', 'author': metainfo[0]['writer'], 'image': image, 'thumbnail': thumbnail, } ) feed = {} feed['title'] = 'Mylar OPDS - New Arrivals' feed['id'] = escape('New Arrivals') feed['updated'] = mylar.helpers.now() links.append(getLink(href=self.opdsroot,type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='start', title='Home')) links.append(getLink(href='%s?cmd=Recent' % (self.opdsroot),type='application/atom+xml; profile=opds-catalog; kind=navigation',rel='self')) if len(recents) > (index + self.PAGE_SIZE): links.append( getLink(href='%s?cmd=Recent&index=%s' % (self.opdsroot,index+self.PAGE_SIZE), type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='next')) if index >= self.PAGE_SIZE: links.append( getLink(href='%s?cmd=Recent&index=%s' % (self.opdsroot,index-self.PAGE_SIZE), type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='previous')) feed['links'] = links feed['entries'] = entries self.data = feed return
def _StoryArc(self, **kwargs): index = 0 if 'index' in kwargs: index = int(kwargs['index']) myDB = db.DBConnection() if 'arcid' not in kwargs: self.data =self._error_with_message('No ArcID Provided') return links = [] entries=[] arclist = self._dic_from_query("SELECT * from storyarcs WHERE StoryArcID='" + kwargs['arcid'] + "' ORDER BY ReadingOrder") newarclist = [] arcname = '' for book in arclist: arcname = book['StoryArc'] fileexists = False issue = {} issue['ReadingOrder'] = book['ReadingOrder'] issue['Title'] = '%s #%s' % (book['ComicName'],book['IssueNumber']) issue['IssueID'] = book['IssueID'] issue['fileloc'] = '' if book['Location']: issue['fileloc'] = book['Location'] fileexists = True issue['filename'] = os.path.split(book['Location'])[1] issue['image'] = None issue['thumbnail'] = None issue['updated'] = book['IssueDate'] else: bookentry = myDB.selectone("SELECT * from issues WHERE IssueID=?", (book['IssueID'],)).fetchone() if bookentry: if bookentry['Location']: comic = myDB.selectone("SELECT * from comics WHERE ComicID=?", ( bookentry['ComicID'],)).fetchone() fileexists = True issue['fileloc'] = os.path.join(comic['ComicLocation'], bookentry['Location']) issue['filename'] = bookentry['Location'] issue['image'] = bookentry['ImageURL_ALT'] issue['thumbnail'] = bookentry['ImageURL'] if bookentry['DateAdded']: issue['updated'] = bookentry['DateAdded'] else: issue['updated'] = bookentry['IssueDate'] else: annualentry = myDB.selectone("SELECT * from annuals WHERE IssueID=?", (book['IssueID'],)).fetchone() if annualentry: if annualentry['Location']: comic = myDB.selectone("SELECT * from comics WHERE ComicID=?", ( annualentry['ComicID'],)) fileexists = True issue['fileloc'] = os.path.join(comic['ComicLocation'], annualentry['Location']) issue['filename'] = annualentry['Location'] issue['image'] = None issue['thumbnail'] = None issue['updated'] = annualentry['IssueDate'] else: if book['Location']: fileexists = True issue['fileloc'] = book['Location'] issue['filename'] = os.path.split(book['Location'])[1] issue['image'] = None issue['thumbnail'] = None issue['updated'] = book['IssueDate'] if not os.path.isfile(issue['fileloc']): fileexists = False if fileexists: newarclist.append(issue) if len(newarclist) > 0: if index <= len(newarclist): subset = newarclist[index:(index + self.PAGE_SIZE)] for issue in subset: metainfo = None if mylar.CONFIG.OPDS_METAINFO: issuedetails = mylar.helpers.IssueDetails(issue['fileloc']).get('metadata', None) if issuedetails is not None: metainfo = issuedetails.get('metadata', None) if not metainfo: metainfo = [{'writer': None,'summary': ''}] fileloc = os.path.join(comic['ComicLocation'],issuebook['Location']) cb, _ = open_archive(fileloc) if cb is None: self.data = self._error_with_message('Can\'t open archive') pse_count = 0 # Or just skip the issue? else: pse_count = page_count(cb) entries.append( { 'title': escape('%s - %s' % (issue['ReadingOrder'], issue['Title'])), 'id': escape('comic:%s' % issue['IssueID']), 'updated': issue['updated'], 'content': escape('%s' % (metainfo[0]['summary'])), 'href': '%s?cmd=Issue&issueid=%s&file=%s' % (self.opdsroot, quote_plus(issue['IssueID']),quote_plus(issue['filename'])), 'stream': '%s?cmd=Stream&issueid=%s&file=%s' % (self.opdsroot, quote_plus(issue['IssueID']),quote_plus(issue['filename'])), 'pse_count': pse_count, 'kind': 'acquisition', 'rel': 'file', 'author': metainfo[0]['writer'], 'image': issue['image'], 'thumbnail': issue['thumbnail'], } ) feed = {} feed['title'] = 'Mylar OPDS - %s' % escape(arcname) feed['id'] = escape('storyarc:%s' % kwargs['arcid']) feed['updated'] = mylar.helpers.now() links.append(getLink(href=self.opdsroot,type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='start', title='Home')) links.append(getLink(href='%s?cmd=StoryArc&arcid=%s' % (self.opdsroot, quote_plus(kwargs['arcid'])),type='application/atom+xml; profile=opds-catalog; kind=navigation',rel='self')) if len(newarclist) > (index + self.PAGE_SIZE): links.append( getLink(href='%s?cmd=StoryArc&arcid=%s&index=%s' % (self.opdsroot, quote_plus(kwargs['arcid']),index+self.PAGE_SIZE), type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='next')) if index >= self.PAGE_SIZE: links.append( getLink(href='%s?cmd=StoryArc&arcid=%s&index=%s' % (self.opdsroot, quote_plus(kwargs['arcid']),index-self.PAGE_SIZE), type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='previous')) feed['links'] = links feed['entries'] = entries self.data = feed return
def _Comic(self, **kwargs): index = 0 if 'index' in kwargs: index = int(kwargs['index']) myDB = db.DBConnection() if 'comicid' not in kwargs: self.data =self._error_with_message('No ComicID Provided') return links = [] entries=[] comic = myDB.selectone('SELECT * from comics where ComicID=?', (kwargs['comicid'],)).fetchone() if not comic: self.data = self._error_with_message('Comic Not Found') return issues = self._dic_from_query('SELECT * from issues WHERE ComicID="' + kwargs['comicid'] + '"order by Int_IssueNumber DESC') if mylar.CONFIG.ANNUALS_ON: annuals = self._dic_from_query('SELECT * FROM annuals WHERE ComicID="' + kwargs['comicid'] + '"') else: annuals = [] for annual in annuals: issues.append(annual) issues = [x for x in issues if x['Location']] if index <= len(issues): subset = issues[index:(index+self.PAGE_SIZE)] for issue in subset: if 'DateAdded' in issue and issue['DateAdded']: updated = issue['DateAdded'] else: updated = issue['ReleaseDate'] image = None thumbnail = None if not 'ReleaseComicID' in issue: title = escape('%s (%s) #%s - %s' % (issue['ComicName'], comic['ComicYear'], issue['Issue_Number'], issue['IssueName'])) image = issue['ImageURL_ALT'] thumbnail = issue['ImageURL'] else: title = escape('Annual %s - %s' % (issue['Issue_Number'], issue['IssueName'])) fileloc = os.path.join(comic['ComicLocation'],issue['Location']) if not os.path.isfile(fileloc): logger.debug("Missing File: %s" % (fileloc)) continue metainfo = None if mylar.CONFIG.OPDS_METAINFO: issuedetails = mylar.helpers.IssueDetails(issue['fileloc']).get('metadata', None) if issuedetails is not None: metainfo = issuedetails.get('metadata', None) if not metainfo: metainfo = [{'writer': None,'summary': ''}] cb, _ = open_archive(fileloc) if cb is None: self.data = self._error_with_message('Can\'t open archive') pse_count = 0 # Or just skip the issue? else: pse_count = page_count(cb) entries.append( { 'title': escape(title), 'id': escape('comic:%s (%s) [%s] - %s' % (issue['ComicName'], comic['ComicYear'], comic['ComicID'], issue['Issue_Number'])), 'updated': updated, 'content': escape('%s' % (metainfo[0]['summary'])), 'href': '%s?cmd=Issue&issueid=%s&file=%s' % (self.opdsroot, quote_plus(issue['IssueID']),quote_plus(issue['Location'])), 'stream': '%s?cmd=Stream&issueid=%s&file=%s' % (self.opdsroot, quote_plus(issue['IssueID']),quote_plus(issue['Location'])), 'pse_count': pse_count, 'kind': 'acquisition', 'rel': 'file', 'author': metainfo[0]['writer'], 'image': image, 'thumbnail': thumbnail, } ) feed = {} comicname = '%s' % (escape(comic['ComicName'])) feed['title'] = 'Mylar OPDS - %s' % (comicname) feed['id'] = escape('comic:%s (%s)' % (comic['ComicName'], comic['ComicYear'])) feed['updated'] = comic['DateAdded'] links.append(getLink(href=self.opdsroot,type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='start', title='Home')) links.append(getLink(href='%s?cmd=Comic&comicid=%s' % (self.opdsroot, quote_plus(kwargs['comicid'])),type='application/atom+xml; profile=opds-catalog; kind=navigation',rel='self')) if len(issues) > (index + self.PAGE_SIZE): links.append( getLink(href='%s?cmd=Comic&comicid=%s&index=%s' % (self.opdsroot, quote_plus(kwargs['comicid']),index+self.PAGE_SIZE), type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='next')) if index >= self.PAGE_SIZE: links.append( getLink(href='%s?cmd=Comic&comicid=%s&index=%s' % (self.opdsroot, quote_plus(kwargs['comicid']),index-self.PAGE_SIZE), type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='previous')) feed['links'] = links feed['entries'] = entries self.data = feed return