def POST(): # pylint: disable=invalid-name,missing-docstring new_data, per_page, api_key, user_id = page_vars(web.data()) defaults = Defaults('https://www.goodreads.com', api_key, per_page, ['to-read']) data_file = defaults.get_list_url(user_id) xml_data = read_url(data_file) total_pages, document_page = get_total_pages(xml_data) if total_pages > 0: for page_num in range(total_pages): # if this is page 1, we can assume we already have the data if page_num == 0 and document_page == 1: page_loop(xml_data, DB_NAME, new_data) else: separator = "&" if "?" not in data_file: separator = "?" page_string = "%spage=%s" % (separator, (page_num + 1)) if document_page != (page_num + 1): xml_data = read_url(data_file + page_string) page_loop(xml_data, DB_NAME, False) clean_data(DB_NAME, defaults) msg = "200 OK" LOGGER.info(msg) return RENDERPLAIN.status(msg=msg)
def test_read_url(self, monkeypatch): monkeypatch.setattr("urllib2.urlopen", lambda foo: fake_urllib()) monkeypatch.setattr("sorter.lib.request_data.LOGGER", fake_logger()) body = read_url("fakeurl") assert body == "fake body"
def get_by_id(book_id, defaults): ''' Get book given a goodreads book id ''' url = defaults.get_book_url(book_id) xml_string = read_url(url) return xml_string
def get_by_isbn(isbn, defaults): ''' Get book info given a search term note: expects isbn or isbn13, support for other terms is limited ''' url = defaults.get_search_url(isbn) xml_string = read_url(url) return xml_string
def test_read_url_404(self, monkeypatch): faker = fake_logger() monkeypatch.setattr("sorter.lib.request_data.urllib2", fake_urllib(True)) monkeypatch.setattr("sorter.lib.request_data.LOGGER", faker) body = read_url("fakeurl") assert body == None assert faker.msg == 'Four Oh Four'
def POST(page): # pylint: disable=invalid-name,missing-docstring if page == 'advanced': db_file = os.path.abspath(DB_NAME) data = from_post(web.data()) manually_update_books(data, db_file) elif page == 'getshelves': _, _, api_key, _ = page_vars(web.data()) defaults = Defaults('https://www.goodreads.com', api_key, None, ['to-read']) shelves_xml = read_url(defaults.get_shelf_url()) shelf_list = get_shelf_list(shelves_xml) web.header('Content-Type', 'application/json', unique=True) return json.dumps(shelf_list) return Admin.GET(page)