def POST(): # pylint: disable=invalid-name,missing-docstring new_data, per_page, api_key, user_id = page_vars(web.data()) defaults = Defaults('https://www.goodreads.com', api_key, per_page, ['to-read']) data_file = defaults.get_list_url(user_id) xml_data = read_url(data_file) total_pages, document_page = get_total_pages(xml_data) if total_pages > 0: for page_num in range(total_pages): # if this is page 1, we can assume we already have the data if page_num == 0 and document_page == 1: page_loop(xml_data, DB_NAME, new_data) else: separator = "&" if "?" not in data_file: separator = "?" page_string = "%spage=%s" % (separator, (page_num + 1)) if document_page != (page_num + 1): xml_data = read_url(data_file + page_string) page_loop(xml_data, DB_NAME, False) clean_data(DB_NAME, defaults) msg = "200 OK" LOGGER.info(msg) return RENDERPLAIN.status(msg=msg)
def test_is_test(self, monkeypatch): defaults = Defaults('http://FAKE.GLTD', 'FOO_KEY') monkeypatch.setattr('os.environ', {'WEBPY_END': 'foo'}) bar = Defaults.is_test() assert bar is False
def test_get_search_url_nouri(self): defaults = Defaults('http://FAKE.GLTD') api_url = defaults.get_search_url(54321) _, _, path, query, _ = urlsplit(api_url) assert path == "/search" assert query == "q=54321&format=xml&key=None"
def test_get_book_url_nouri(self): defaults = Defaults('http://FAKE.GLTD') api_url = defaults.get_book_url(12345) _, _, path, query, _ = urlsplit(api_url) assert path == "/book/show/12345.xml" assert query == "key=None"
def test_get_book_url(self): defaults = Defaults('http://FAKE.GLTD') api_url = defaults.get_book_url(12345, 'http://FAKE.GLTD/FAKER?id=%s&key=%s') _, _, path, query, _ = urlsplit(api_url) params = parse_qs(query) assert params['id'][0] == '12345' assert params['key'][0] == 'None'
def test_get_search_url(self): defaults = Defaults('http://FAKE.GLTD') api_url = defaults.get_search_url(54321, 'http://FAKE.GLTD/FAKER?isbn=%s&key=%s') _, _, path, query, _ = urlsplit(api_url) params = parse_qs(query) assert params['isbn'][0] == '54321' assert params['key'][0] == 'None'
def test_get_list_url_noperpage(self): defaults = Defaults('http://FAKE.GLTD', None, 42) api_url = defaults.get_list_url(98765, ['foo-shelf'], None, 'http://FAKE.GLTD/FAKER?user_id=%s&key=%s&shelf=%s&per_page=%s') _, _, path, query, _ = urlsplit(api_url) params = parse_qs(query) assert params['user_id'][0] == '98765' assert params['key'][0] == 'None' assert params['shelf'][0] == 'foo-shelf' assert params['per_page'][0] == '42'
def test_get_list_url_nouri(self): defaults = Defaults('http://FAKE.GLTD/') api_url = defaults.get_list_url(98765, ['foo-shelf'], 9, None) _, _, path, query, _ = urlsplit(api_url) params = parse_qs(query) assert '98765.xml' in path assert params['key'][0] == 'None' assert params['shelf'][0] == 'foo-shelf' assert params['per_page'][0] == '9'
def POST(page): # pylint: disable=invalid-name,missing-docstring if page == 'advanced': db_file = os.path.abspath(DB_NAME) data = from_post(web.data()) manually_update_books(data, db_file) elif page == 'getshelves': _, _, api_key, _ = page_vars(web.data()) defaults = Defaults('https://www.goodreads.com', api_key, None, ['to-read']) shelves_xml = read_url(defaults.get_shelf_url()) shelf_list = get_shelf_list(shelves_xml) web.header('Content-Type', 'application/json', unique=True) return json.dumps(shelf_list) return Admin.GET(page)
def GET(): # pylint: disable=invalid-name,missing-docstring api_key, _ = query_vars(web.input()) defaults = Defaults('https://www.goodreads.com', api_key, None, ['to-read']) clean_data(DB_NAME, defaults) msg = "200 OK" LOGGER.info(msg) return RENDERPLAIN.status(msg=msg)
# pylint: skip-file import xml.etree.ElementTree as ElementTree from sorter.lib.book_utils import get_by_id, get_by_isbn from tests.utils.get_element import get_element, get_file_as_string from sorter.lib.defaults import Defaults defaults = Defaults('FOO_KEY', 1, ['BAR-SHELF']) class TestBookUtils(object): def test_get_by_id(self, monkeypatch): id = get_element('tests/fixtures/book_by_id.xml', 'book/id') monkeypatch.setattr('sorter.lib.book_utils.read_url', lambda *args: get_file_as_string('tests/fixtures/book_by_id.xml')) foo = get_by_id(1234, defaults) root = ElementTree.fromstring(foo) _id = root.find('book/id').text assert id == _id def test_get_by_isbn(self, monkeypatch): id = get_element('tests/fixtures/book_by_isbn.xml', 'search/results/work/id') monkeypatch.setattr('sorter.lib.book_utils.read_url', lambda *args: get_file_as_string('tests/fixtures/book_by_isbn.xml')) foo = get_by_isbn(54321, defaults) root = ElementTree.fromstring(foo)
from sorter.lib.asset_handler import asset from sorter.lib.page_utils import page_loop, page_vars, query_vars, from_post from sorter.lib.defaults import Defaults LOGGER = sorter_logger(__name__) URLS = ('/', 'Index', '/import', 'Import', '/(assets/.+)', 'Assets', '/admin', 'Admin', '/admin/(.+)', 'Admin', '/clean', 'Clean') APP = web.application(URLS, globals()) RENDER = web.template.render('templates/', base='layout') RENDERPLAIN = web.template.render('templates/') DB_NAME = "" if not Defaults.is_test(): DB_NAME = 'data/sorter.db' # pragma: no cover class Index(object): # pylint: disable=too-few-public-methods,missing-docstring @staticmethod def GET(): # pylint: disable=invalid-name,missing-docstring db_file = os.path.abspath(DB_NAME) if os.path.isfile(db_file): data = get_books(db_file) books = rank(data) else: books = None return RENDER.index(books)
def test_getset_key(self): defaults = Defaults('FAKER.GTLD', 'FOO_KEY') assert defaults.get_key() == 'FOO_KEY'
def test_get_shelf_url(self): defaults = Defaults('http://FAKER.GLTD', 'FOO_KEY') assert defaults.get_shelf_url() == 'http://FAKER.GLTD/shelf/list.xml?key=FOO_KEY'