def test_get_coverstore_url(monkeypatch): assert h.get_coverstore_url() == "http://covers.openlibrary.org" from infogami import config monkeypatch.setattr(config, "coverstore_url", "http://0.0.0.0:8090", raising=False) assert h.get_coverstore_url() == "http://0.0.0.0:8090" # make sure trailing / is always stripped monkeypatch.setattr(config, "coverstore_url", "http://0.0.0.0:8090/", raising=False) assert h.get_coverstore_url() == "http://0.0.0.0:8090"
def test_get_coverstore_url(monkeypatch): assert h.get_coverstore_url() == "http://covers.openlibrary.org" from infogami import config monkeypatch.setattr(config, "coverstore_url", "http://0.0.0.0:8090", raising=False) assert h.get_coverstore_url() == "http://0.0.0.0:8090" # make sure trailing / is always stripped monkeypatch.setattr(config, "coverstore_url", "http://0.0.0.0:8090/", raising=False) assert h.get_coverstore_url() == "http://0.0.0.0:8090"
def process_doc(doc): d = {} d['url'] = "/works/" + doc['key'] d['title'] = doc.get('title', '') if 'author_key' in doc and 'author_name' in doc: d['authors'] = [{"key": key, "name": name} for key, name in zip(doc['author_key'], doc['author_name'])] if 'cover_edition_key' in doc: d['cover_url'] = h.get_coverstore_url() + "/b/olid/%s-M.jpg" % doc['cover_edition_key'] d['read_url'] = "//archive.org/stream/" + doc['ia'][0] return d
def info(self): url = '%s/%s/id/%s.json' % (h.get_coverstore_url(), self.category, self.id) if url.startswith("//"): url = "http:" + url try: d = simplejson.loads(urllib.request.urlopen(url).read()) d['created'] = h.parse_datetime(d['created']) if d['author'] == 'None': d['author'] = None d['author'] = d['author'] and self._site.get(d['author']) return web.storage(d) except IOError: # coverstore is down return None
def process_doc(doc): d = {} d['url'] = "/works/" + doc['key'] d['title'] = doc.get('title', '') if 'author_key' in doc and 'author_name' in doc: d['authors'] = [{ "key": key, "name": name } for key, name in zip(doc['author_key'], doc['author_name'])] if 'cover_edition_key' in doc: d['cover_url'] = h.get_coverstore_url( ) + "/b/olid/%s-M.jpg" % doc['cover_edition_key'] d['read_url'] = "http://www.archive.org/stream/" + doc['ia'][0] return d
def process_doc(doc): d = {} key = doc['key'] # New solr stores the key as /works/OLxxxW if not key.startswith("/works/"): key = "/works/" + key d['url'] = key d['title'] = doc.get('title', '') if 'author_key' in doc and 'author_name' in doc: d['authors'] = [{"key": key, "name": name} for key, name in zip(doc['author_key'], doc['author_name'])] if 'cover_edition_key' in doc: d['cover_url'] = h.get_coverstore_url() + "/b/olid/%s-M.jpg" % doc['cover_edition_key'] d['read_url'] = "//archive.org/stream/" + doc['ia'][0] return d
def process_doc(doc): d = {} key = doc["key"] # New solr stores the key as /works/OLxxxW if not key.startswith("/works/"): key = "/works/" + key d["url"] = key d["title"] = doc.get("title", "") if "author_key" in doc and "author_name" in doc: d["authors"] = [{"key": key, "name": name} for key, name in zip(doc["author_key"], doc["author_name"])] if "cover_edition_key" in doc: d["cover_url"] = h.get_coverstore_url() + "/b/olid/%s-M.jpg" % doc["cover_edition_key"] d["read_url"] = "//archive.org/stream/" + doc["ia"][0] return d
def process_doc(doc): d = {} key = doc.get('key', '') # New solr stores the key as /works/OLxxxW if not key.startswith("/works/"): key = "/works/" + key d['url'] = key d['title'] = doc.get('title', '') if 'author_key' in doc and 'author_name' in doc: d['authors'] = [{"key": key, "name": name} for key, name in zip(doc['author_key'], doc['author_name'])] if 'cover_edition_key' in doc: d['cover_url'] = h.get_coverstore_url() + "/b/olid/%s-M.jpg" % doc['cover_edition_key'] d['read_url'] = "//archive.org/stream/" + doc['ia'][0] return d
def format_work_data(work): d = dict(work) key = work.get('key', '') # New solr stores the key as /works/OLxxxW if not key.startswith("/works/"): key = "/works/" + key d['url'] = key d['title'] = work.get('title', '') if 'author_key' in work and 'author_name' in work: d['authors'] = [{"key": key, "name": name} for key, name in zip(work['author_key'], work['author_name'])] if 'cover_edition_key' in work: d['cover_url'] = h.get_coverstore_url() + "/b/olid/%s-M.jpg" % work['cover_edition_key'] d['read_url'] = "//archive.org/stream/" + work['ia'][0] return d
def format_work_data(work): d = dict(work) key = work.get('key', '') # New solr stores the key as /works/OLxxxW if not key.startswith("/works/"): key = "/works/" + key d['url'] = key d['title'] = work.get('title', '') if 'author_key' in work and 'author_name' in work: d['authors'] = [{"key": key, "name": name} for key, name in zip(work['author_key'], work['author_name'])] if 'cover_edition_key' in work: d['cover_url'] = h.get_coverstore_url() + "/b/olid/%s-M.jpg" % work['cover_edition_key'] d['read_url'] = "//archive.org/stream/" + work['ia'][0] return d
def url(self, size="M"): return "%s/%s/id/%s-%s.jpg" % (h.get_coverstore_url(), self.category, self.id, size.upper())