def GET(self, mode='xml'): datestr = catalog.getCurrentDate() c = catalog.Catalog( title=pubInfo['name'] + ' Aggregator', urn=pubInfo['urnroot'], url=pubInfo['opdsroot'], datestr=datestr, author=pubInfo['name'], authorUri=pubInfo['uri'], ) l = catalog.Link(url='alpha.' + mode, type=bookserver.catalog.Link.opds) e = catalog.Entry( { 'title': 'Alphabetical By Title', 'urn': pubInfo['urnroot'] + ':titles:all', 'updated': datestr, 'content': 'Alphabetical list of all titles.' }, links=[l]) c.addEntry(e) l = catalog.Link(url='providers.' + mode, type=bookserver.catalog.Link.opds) e = catalog.Entry( { 'title': 'By Provider', 'urn': pubInfo['urnroot'] + ':providers:all', 'updated': datestr, 'content': 'Listing of all publishers and sellers.' }, links=[l]) c.addEntry(e) #l = catalog.Link(url = 'devices.'+mode, type = types[mode]) #e = catalog.Entry({'title' : 'By Device', # 'urn' : pubInfo['urnroot'] + ':devices', # 'updated' : datestr, # 'content' : 'Filter by books compatible with your e-book reading device.' # }, links=[l]) #c.addEntry(e) osDescriptionDoc = 'http://bookserver.archive.org/aggregator/opensearch.xml' o = catalog.OpenSearch(osDescriptionDoc) c.addOpenSearch(o) if 'html' == mode: r = output.ArchiveCatalogToHtml(c, device=getDevice()) web.header('Content-Type', 'text/html') return r.toString() else: r = output.CatalogToAtom(c) web.header('Content-Type', pubInfo['mimetype']) return r.toString()
def GET(self, mode='xml'): datestr = catalog.getCurrentDate() c = catalog.Catalog( title = pubInfo['name'] + ' Aggregator', urn = pubInfo['urnroot'], url = pubInfo['opdsroot'], datestr = datestr, author = pubInfo['name'], authorUri = pubInfo['uri'], ) l = catalog.Link(url = 'alpha.'+mode, type = bookserver.catalog.Link.opds) e = catalog.Entry({'title' : 'Alphabetical By Title', 'urn' : pubInfo['urnroot'] + ':titles:all', 'updated' : datestr, 'content' : 'Alphabetical list of all titles.' }, links=[l]) c.addEntry(e) l = catalog.Link(url = 'providers.'+mode, type = bookserver.catalog.Link.opds) e = catalog.Entry({'title' : 'By Provider', 'urn' : pubInfo['urnroot'] + ':providers:all', 'updated' : datestr, 'content' : 'Listing of all publishers and sellers.' }, links=[l]) c.addEntry(e) #l = catalog.Link(url = 'devices.'+mode, type = types[mode]) #e = catalog.Entry({'title' : 'By Device', # 'urn' : pubInfo['urnroot'] + ':devices', # 'updated' : datestr, # 'content' : 'Filter by books compatible with your e-book reading device.' # }, links=[l]) #c.addEntry(e) osDescriptionDoc = 'http://bookserver.archive.org/aggregator/opensearch.xml' o = catalog.OpenSearch(osDescriptionDoc) c.addOpenSearch(o) if 'html' == mode: r = output.ArchiveCatalogToHtml(c, device = getDevice()) web.header('Content-Type', 'text/html') return r.toString() else: r = output.CatalogToAtom(c) web.header('Content-Type', pubInfo['mimetype']) return r.toString()
def GET(self, extension): #IA is continuously scanning books. Since this OPDS file is constructed #from search engine results, let's change the updated date every midnight #TODO: create a version of /alpha.xml with the correct updated dates, #and cache it for an hour to ease load on solr datestr = catalog.getCurrentDate() c = catalog.Catalog( title=pubInfo['name'] + ' Aggregator - All Titles', urn=pubInfo['urnroot'] + ':titles:all', url=pubInfo['opdsroot'] + '/alpha.xml', datestr=datestr, author=pubInfo['name'], authorUri=pubInfo['uri'], ) for letter in string.ascii_uppercase: lower = letter.lower() l = catalog.Link(url=self.alphaURL(extension, lower, 0), type=bookserver.catalog.Link.opds) e = catalog.Entry( { 'title': 'Titles: ' + letter, 'urn': pubInfo['urnroot'] + ':titles:' + lower, 'updated': datestr, 'content': 'Titles starting with ' + letter }, links=[l]) c.addEntry(e) osDescriptionDoc = 'http://bookserver.archive.org/aggregator/opensearch.xml' o = catalog.OpenSearch(osDescriptionDoc) c.addOpenSearch(o) web.header('Content-Type', types[extension]) if ('xml' == extension): r = output.CatalogToAtom(c) else: r = output.ArchiveCatalogToHtml(c, device=getDevice()) return r.toString()
def GET(self, mode): #TODO: get correct updated dates datestr = catalog.getCurrentDate() c = catalog.Catalog( title=pubInfo['name'] + ' Aggregator - All Providers', urn=pubInfo['urnroot'] + ':providers:all', url=pubInfo['opdsroot'] + '/providers.' + mode, datestr=datestr, author=pubInfo['name'], authorUri=pubInfo['uri'], ) for provider in providers: if 'html' == mode: ext = '.html' # $$$ should do URL mapping in output side? else: ext = '' l = catalog.Link(url='provider/' + provider + '/0' + ext, type=bookserver.catalog.Link.opds) e = catalog.Entry( { 'title': providers[provider], 'urn': pubInfo['urnroot'] + ':providers:' + provider, 'updated': datestr, 'content': 'All Titles for provider ' + provider }, links=[l]) c.addEntry(e) osDescriptionDoc = 'http://bookserver.archive.org/aggregator/opensearch.xml' o = catalog.OpenSearch(osDescriptionDoc) c.addOpenSearch(o) web.header('Content-Type', types[mode]) if ('xml' == mode): r = output.CatalogToAtom(c) else: r = output.ArchiveCatalogToHtml(c, device=getDevice()) return r.toString()
def GET(self, mode): #TODO: get correct updated dates datestr = catalog.getCurrentDate() c = catalog.Catalog( title = pubInfo['name'] + ' Aggregator - All Providers', urn = pubInfo['urnroot'] + ':providers:all', url = pubInfo['opdsroot'] + '/providers.' + mode, datestr = datestr, author = pubInfo['name'], authorUri = pubInfo['uri'], ) for provider in providers: if 'html' == mode: ext = '.html' # $$$ should do URL mapping in output side? else: ext = '' l = catalog.Link(url = 'provider/'+provider+'/0'+ext, type = bookserver.catalog.Link.opds) e = catalog.Entry({'title' : providers[provider], 'urn' : pubInfo['urnroot'] + ':providers:'+provider, 'updated' : datestr, 'content' : 'All Titles for provider ' + provider }, links=[l]) c.addEntry(e) osDescriptionDoc = 'http://bookserver.archive.org/aggregator/opensearch.xml' o = catalog.OpenSearch(osDescriptionDoc) c.addOpenSearch(o) web.header('Content-Type', types[mode]) if ('xml' == mode): r = output.CatalogToAtom(c) else: r = output.ArchiveCatalogToHtml(c, device = getDevice()) return r.toString()
def GET(self, extension): #IA is continuously scanning books. Since this OPDS file is constructed #from search engine results, let's change the updated date every midnight #TODO: create a version of /alpha.xml with the correct updated dates, #and cache it for an hour to ease load on solr datestr = catalog.getCurrentDate() c = catalog.Catalog( title = pubInfo['name'] + ' Aggregator - All Titles', urn = pubInfo['urnroot'] + ':titles:all', url = pubInfo['opdsroot'] + '/alpha.xml', datestr = datestr, author = pubInfo['name'], authorUri = pubInfo['uri'], ) for letter in string.ascii_uppercase: lower = letter.lower() l = catalog.Link(url = self.alphaURL(extension, lower, 0), type = bookserver.catalog.Link.opds) e = catalog.Entry({'title' : 'Titles: ' + letter, 'urn' : pubInfo['urnroot'] + ':titles:'+lower, 'updated' : datestr, 'content' : 'Titles starting with ' + letter }, links=[l]) c.addEntry(e) osDescriptionDoc = 'http://bookserver.archive.org/aggregator/opensearch.xml' o = catalog.OpenSearch(osDescriptionDoc) c.addOpenSearch(o) web.header('Content-Type', types[extension]) if ('xml' == extension): r = output.CatalogToAtom(c) else: r = output.ArchiveCatalogToHtml(c, device = getDevice()) return r.toString()