def test_associate_tag_is_written_to_url(self): tag = "ABC12345" api = API("XXX", "XXX", "de", associate_tag=tag) url = api._build_url(Operation="ItemSearch", SearchIndex="Books") qs = parse_qs(urlparse(url)[4]) assert qs["AssociateTag"][0] == tag
def test_associate_tag_is_written_to_url(self): tag = 'ABC12345' api = API('XXX', 'XXX', 'de', associate_tag=tag) url = api._build_url(Operation='ItemSearch', SearchIndex='Books') qs = parse_qs(urlparse(url)[4]) assert qs['AssociateTag'][0] == tag
def __init__(self, access_key_id, secret_access_key, locale, cachedir=DEFAULT_CACHE_DIR, **kwargs): """ :param cachedir: Path to directory containing cached responses. """ API.__init__(self, access_key_id, secret_access_key, locale, **kwargs) self.cache = cachedir if self.cache and not os.path.isdir(self.cache): os.mkdir(self.cache)
def pytest_funcarg__api(request): server = request.getfuncargvalue("server") api = API("XXX", "XXX", "uk") # locale does not matter here! api.host = "%s:%s" % server.server_address if hasattr(request, "param"): # for tests generated by pytest_generate_tests api.VERSION = request.param["version"] api.REQUESTS_PER_SECOND = 10000 # just for here! return api
def pytest_funcarg__api(request): server = request.getfuncargvalue('server') api = API('XXX', 'XXX', 'uk') # locale does not matter here! api.host = '%s:%s' % server.server_address if hasattr(request, 'param'): # for tests generated by pytest_generate_tests api.VERSION = request.param['version'] api.REQUESTS_PER_SECOND = 10000 # just for here! return api
def __init__(self, *args, **kwargs): """ :param cachedir: Path to directory containing cached responses. """ self.cache = kwargs.pop("cachedir", DEFAULT_CACHE_DIR) API.__init__(self, *args, **kwargs) if self.cache and not os.path.isdir(self.cache): os.mkdir(self.cache)
def create_cart(asin, quantity=1): api = API(settings.AMAZON_AWS_KEY, settings.AMAZON_SECRET_KEY, settings.AMAZON_API_LOCALE, settings.AMAZON_ASSOCIATE_TAG) cart = api.cart_create({asin: quantity}) try: return unicode(cart.Cart.PurchaseURL) except ValueError, InvalidCartItem: raise ValidationError()
def __init__(self, *args, **kwargs): """ :param cachedir: Path to directory containing cached responses. """ self.cache = kwargs.pop('cachedir', DEFAULT_CACHE_DIR) self.cachetime = kwargs.pop('cachetime', False) # i.e. indefinite API.__init__(self, *args, **kwargs) if self.cache and not os.path.isdir(self.cache): os.mkdir(self.cache)
def create_cart(asin, quantity=1): api = API( settings.AMAZON_AWS_KEY, settings.AMAZON_SECRET_KEY, settings.AMAZON_API_LOCALE, settings.AMAZON_ASSOCIATE_TAG) cart = api.cart_create({asin: quantity}) try: return unicode(cart.Cart.PurchaseURL) except ValueError, InvalidCartItem: raise ValidationError()
def getAmazonContent(query): api = API(AWS_KEY, SECRET_KEY, 'uk', associate_tag='giracake-21', processor=minidom_response_parser) node = api.item_search('All', Keywords=query, ResponseGroup="Medium") products = node.getElementsByTagName('Item') data = [] for product in products: #print product.toprettyxml() summaryNodes = product.getElementsByTagName('OfferSummary') if len(summaryNodes) > 0: for summaryNode in summaryNodes: priceNodes = summaryNode.getElementsByTagName('LowestNewPrice') for priceNode in priceNodes: formattedPrice = priceNode.getElementsByTagName('FormattedPrice') price = formattedPrice[0].childNodes[0].nodeValue imageSetsNodes = product.getElementsByTagName('ImageSets') if len(imageSetsNodes) > 0: for imageSetsNode in imageSetsNodes: imageSetNodes = imageSetsNode.getElementsByTagName('ImageSet') for imageSetNode in imageSetNodes: if imageSetNode.attributes["Category"].value != "primary": continue mediumImage = imageSetNode.getElementsByTagName('LargeImage') url = mediumImage[0].getElementsByTagName('URL')[0] image = url.childNodes[0].nodeValue prod = {} prod['url'] = getText(product.getElementsByTagName('DetailPageURL')[0].childNodes) prod['title'] = getText(product.getElementsByTagName('Title')[0].childNodes) prod['img'] = image prod['price'] = price #imagesets = getText(product.getElementsByTagName('MediumImage'))[0].childNodes #print imagesets #imageset = getText(imagesets.getElementsByTagName('ImageSet'))[0] data += [prod] return data
def test_init_with_config_file_and_parameters(self, configfiles): configfiles.add_file( """ [Credentials] access_key = ABCDEFGH12345 secret_key = abcdegf43""", '~/.amazon-product-api') API(locale='de')
def _fetch(self, url): """ Retrieves XML response from Amazon. In case of a timeout, it will try :const:`~RetryAPI.TRIES`` times before raising an error. """ attempts = 0 delay = self.DELAY while True: try: attempts += 1 return API._fetch(self, url) except urllib2.URLError, e: # if a timeout occurred # wait for some time before trying again reason = getattr(e, 'reason', None) if isinstance(reason, socket.timeout) and attempts < self.TRIES: time.sleep(delay) delay *= self.BACKOFF continue # otherwise reraise the original error raise
def _fetch(self, url): """ Retrieves XML response from Amazon. In case of a timeout, it will try :const:`~RetryAPI.TRIES`` times before raising an error. """ attempts = 0 delay = self.DELAY while True: try: attempts += 1 return API._fetch(self, url) except URLError: e = sys.exc_info()[1] # Python 2/3 compatible # if a timeout occurred # wait for some time before trying again reason = getattr(e, 'reason', None) if isinstance(reason, socket.timeout) and attempts < self.TRIES: time.sleep(delay) delay *= self.BACKOFF continue # otherwise reraise the original error raise
def test_init_with_config_file(self, configfiles): configfiles.add_file( """ [Credentials] access_key = ABCDEFGH12345 secret_key = abcdegf43 locale = de""", '~/.amazon-product-api') api = API()
def __init__(self, locale='de'): gtk.Window.__init__(self, gtk.WINDOW_TOPLEVEL) self.set_title("BrowseNode Explorer") self.set_size_request(400, 200) self.connect("delete_event", self.on_delete) self.locale = locale self.api = API(locale=self.locale) # create a TreeStore with one string column to use as the model self.treestore = gtk.TreeStore(int, str) # create the TreeView using treestore self.treeview = gtk.TreeView(self.treestore) # add column id renderer = gtk.CellRendererText() column = gtk.TreeViewColumn('id', renderer, text=0) self.treeview.append_column(column) # add column name renderer = gtk.CellRendererText() column = gtk.TreeViewColumn('name', renderer, text=1) column.set_sort_column_id(1) # Allow sorting on the column self.treeview.append_column(column) # make it clickable self.treeview.add_events(gtk.gdk.BUTTON_PRESS_MASK) self.treeview.connect('button_press_event', self.on_tree_click) scrolled = gtk.ScrolledWindow() scrolled.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC) scrolled.add(self.treeview) self.add(scrolled) self.show_all() # populate with root nodes # but avoid duplicated node ids node_ids = set(NODE_IDS[self.locale].values()) for name, id in NODE_IDS[self.locale].items(): if id in node_ids: self.treestore.append(None, [id, name]) node_ids.remove(id)
def search(): form = SearchForm() if form.validate_on_submit(): referrer = form.referrer.data query = form.query.data results = model.session.query(model.Library).filter( model.Library.product_desc.like('%' + query + '%')).all() if not results: #redirect to new page with amazon search results return redirect( url_for('amazon_bottlenose2', query=query, referrer=referrer)) else: for i in results: similar_products = None if i.product.asin: asin = i.product.asin api = API(AWS_KEY, SECRET_KEY, 'us', ASSOC_TAG) try: similar_root = api.similarity_lookup(asin, ResponseGroup='Large') #~ from lxml import etree #~ print etree.tostring(root, pretty_print=True) nspace = similar_root.nsmap.get(None, '') similar_products = similar_root.xpath( '//aws:Items/aws:Item', namespaces={'aws': nspace}) except: similar_products = None #render page with search results if referrer == 'dashboard': return render_template('results.html', results=results, similar_products=similar_products) else: form = AddProductForm() return render_template('add_product_results.html', results=results, similar_products=similar_products, form=form) # else: # flash("Invalid Search") return render_template("search.html", form=form)
def amazon_search(): api = API(AWS_KEY, SECRET_KEY, 'us', ASSOC_TAG) result = api.item_search('All', ResponseGroup='Large', AssociateTag='boitba-20', Keywords='tent marmot', ItemPage=2) total_results = result.results #item_search returns pages not items # extract paging information #total_results = result.results #Stotal_pages = len(result) # or result.pages #~ from lxml import etree #~ print etree.tostring(root, pretty_print=True) return render_template("amazon_search.html", node = result, total_results=total_results)
def amazon(query): api = API(AWS_KEY, SECRET_KEY, 'us', ASSOC_TAG) similar_root = api.similarity_lookup('B0058U6DQC', ResponseGroup='Large') product_root = api.item_lookup('B0058U6DQC', ResponseGroup='Large') product_root = api.item_search(title='unicorn', ResponseGroup='Large') more_products = api.item_search('Books', Publisher='Galileo Press') #~ from lxml import etree #~ print etree.tostring(root, pretty_print=True) nspace = similar_root.nsmap.get(None, '') similar_products = similar_root.xpath('//aws:Items/aws:Item', namespaces={'aws' : nspace}) # more_products = product_root.xpath('//aws:Items/aws:Item', # namespaces={'aws' : nspace}) return render_template("amazon.html", similar_products=similar_products, more_products = more_products, query=query)
def setup_class(cls): """ Collect all XML files stored. """ # TODO: Skip tests if no XML files are found? cls.test_files = [ os.path.join(XML_TEST_DIR, dir, f) for dir in TESTABLE_API_VERSIONS for f in os.listdir(os.path.join(XML_TEST_DIR, dir)) if f.lower().endswith('.xml') ] cls.api = API(cls.ACCESS_KEY, cls.SECRET_KEY, 'us')
def amazon_search(): api = API(AWS_KEY, SECRET_KEY, 'us', ASSOC_TAG) result = api.item_search('All', ResponseGroup='Large', AssociateTag='boitba-20', Keywords='tent marmot', ItemPage=2) total_results = result.results #item_search returns pages not items # extract paging information #total_results = result.results #Stotal_pages = len(result) # or result.pages #~ from lxml import etree #~ print etree.tostring(root, pretty_print=True) return render_template("amazon_search.html", node=result, total_results=total_results)
def search(): form = SearchForm() if form.validate_on_submit(): referrer = form.referrer.data query = form.query.data results = model.session.query(model.Library).filter(model.Library.product_desc.like('%'+ query + '%')).all() if not results: #redirect to new page with amazon search results return redirect(url_for('amazon_bottlenose2', query=query, referrer=referrer)) else: for i in results: similar_products = None if i.product.asin: asin = i.product.asin api = API(AWS_KEY, SECRET_KEY, 'us', ASSOC_TAG) try: similar_root = api.similarity_lookup(asin, ResponseGroup='Large') #~ from lxml import etree #~ print etree.tostring(root, pretty_print=True) nspace = similar_root.nsmap.get(None, '') similar_products = similar_root.xpath('//aws:Items/aws:Item', namespaces={'aws' : nspace}) except: similar_products = None #render page with search results if referrer == 'dashboard': return render_template('results.html', results=results, similar_products=similar_products) else: form = AddProductForm() return render_template('add_product_results.html', results=results, similar_products=similar_products, form=form) # else: # flash("Invalid Search") return render_template("search.html", form=form)
def __init__(self, locale='de'): gtk.Window.__init__(self, gtk.WINDOW_TOPLEVEL) self.set_title("BrowseNode Explorer") self.set_size_request(400, 200) self.connect("delete_event", self.on_delete) self.locale = locale self.api = API(AWS_KEY, SECRET_KEY, self.locale) # create a TreeStore with one string column to use as the model self.treestore = gtk.TreeStore(int, str) # create the TreeView using treestore self.treeview = gtk.TreeView(self.treestore) # add column id renderer = gtk.CellRendererText() column = gtk.TreeViewColumn('id', renderer, text=0) self.treeview.append_column(column) # add column name renderer = gtk.CellRendererText() column = gtk.TreeViewColumn('name', renderer, text=1) column.set_sort_column_id(1) # Allow sorting on the column self.treeview.append_column(column) # make it clickable self.treeview.add_events(gtk.gdk.BUTTON_PRESS_MASK) self.treeview.connect('button_press_event', self.on_tree_click) scrolled = gtk.ScrolledWindow() scrolled.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC) scrolled.add(self.treeview) self.add(scrolled) self.show_all() # populate with root nodes # but avoid duplicated node ids node_ids = set(NODE_IDS[self.locale].values()) for name, id in NODE_IDS[self.locale].items(): if id in node_ids: self.treestore.append(None, [id, name]) node_ids.remove(id)
def _fetch(self, url): if self.cache: path = os.path.join(self.cache, "%s.xml" % self.get_hash(url)) # if response was fetched previously, use that one if os.path.isfile(path): return open(path) # fetch original response from Amazon resp = API._fetch(self, url) if self.cache: fp = open(path, "w+") fp.write(etree.tostring(etree.parse(resp), pretty_print=True)) fp.seek(0) return fp return resp
def _fetch(self, url): if self.cache: path = os.path.join(self.cache, '%s.xml' % self.get_hash(url)) # if response was fetched previously, use that one if os.path.isfile(path): return open(path) # fetch original response from Amazon resp = API._fetch(self, url) if self.cache: fp = open(path, 'w+') fp.write(etree.tostring(etree.parse(resp), pretty_print=True)) fp.seek(0) return fp return resp
def _fetch(self, url): if self.cache: path = os.path.join(self.cache, '%s.xml' % self.get_hash(url)) # if response was fetched previously, use that one if os.path.isfile(path): return open(path) # fetch original response from Amazon resp = API._fetch(self, url) if self.cache: fp = open(path, 'w+') node = xml.dom.minidom.parseString(resp.read()) fp.write(node.toprettyxml()) #from lxml import etree #fp.write(etree.tostring(etree.parse(resp), pretty_print=True)) fp.seek(0) return fp return resp
def amazon(query): api = API(AWS_KEY, SECRET_KEY, 'us', ASSOC_TAG) similar_root = api.similarity_lookup('B0058U6DQC', ResponseGroup='Large') product_root = api.item_lookup('B0058U6DQC', ResponseGroup='Large') product_root = api.item_search(title='unicorn', ResponseGroup='Large') more_products = api.item_search('Books', Publisher='Galileo Press') #~ from lxml import etree #~ print etree.tostring(root, pretty_print=True) nspace = similar_root.nsmap.get(None, '') similar_products = similar_root.xpath('//aws:Items/aws:Item', namespaces={'aws': nspace}) # more_products = product_root.xpath('//aws:Items/aws:Item', # namespaces={'aws' : nspace}) return render_template("amazon.html", similar_products=similar_products, more_products=more_products, query=query)
def __init__(self, *args, **kwargs): API.__init__(self, *args, **kwargs) self.batch_mode = False
""" Find similar items to "Small Favor: A Novel of the Dresden Files" (ASIN 0451462009). """ from amazonproduct.api import API from config import AWS_KEY, SECRET_KEY if __name__ == '__main__': api = API(AWS_KEY, SECRET_KEY, 'us') root = api.similarity_lookup('0451462009') #~ from lxml import etree #~ print etree.tostring(root, pretty_print=True) nspace = root.nsmap.get(None, '') books = root.xpath('//aws:Items/aws:Item', namespaces={'aws' : nspace}) for book in books: print 'ASIN %-10s' % book.ASIN, print unicode(book.ItemAttributes.Author), ':', print unicode(book.ItemAttributes.Title)
class SoupProcessor(BaseProcessor): """ Custom response parser using BeautifulSoup to parse the returned XML. """ def parse(self, fp): soup = BeautifulSoup.BeautifulSoup(fp.read()) # parse errors for error in soup.findAll('error'): code = error.find('code').text msg = error.find('message').text raise AWSError(code, msg) return soup if __name__ == '__main__': # Don't forget to create file ~/.amazon-product-api # with your credentials (see docs for details) api = API(locale='us', processor=SoupProcessor()) result = api.item_lookup('0718155157') print result # ... # now do something with it!
class BrowseNodeExplorer (gtk.Window): """ Gtk explorer for Amazon BrowseNodes. """ def on_delete(self, widget, event, data=None): # closes the window and quits. gtk.main_quit() return False def on_tree_click(self, widget, event, data=None): # if double click if event.type == gtk.gdk._2BUTTON_PRESS: # get data from highlighted selection treeselection = self.treeview.get_selection() model, iter = treeselection.get_selected() name_of_data = self.treestore.get_value(iter, 0) # and fetch selected node self.fetch_nodes(name_of_data) def __init__(self, locale='de'): gtk.Window.__init__(self, gtk.WINDOW_TOPLEVEL) self.set_title("BrowseNode Explorer") self.set_size_request(400, 200) self.connect("delete_event", self.on_delete) self.locale = locale self.api = API(AWS_KEY, SECRET_KEY, self.locale) # create a TreeStore with one string column to use as the model self.treestore = gtk.TreeStore(int, str) # create the TreeView using treestore self.treeview = gtk.TreeView(self.treestore) # add column id renderer = gtk.CellRendererText() column = gtk.TreeViewColumn('id', renderer, text=0) self.treeview.append_column(column) # add column name renderer = gtk.CellRendererText() column = gtk.TreeViewColumn('name', renderer, text=1) column.set_sort_column_id(1) # Allow sorting on the column self.treeview.append_column(column) # make it clickable self.treeview.add_events(gtk.gdk.BUTTON_PRESS_MASK) self.treeview.connect('button_press_event', self.on_tree_click) scrolled = gtk.ScrolledWindow() scrolled.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC) scrolled.add(self.treeview) self.add(scrolled) self.show_all() # populate with root nodes # but avoid duplicated node ids node_ids = set(NODE_IDS[self.locale].values()) for name, id in NODE_IDS[self.locale].items(): if id in node_ids: self.treestore.append(None, [id, name]) node_ids.remove(id) def _find_row(self, node_id): def match_func(row, data): # data is a tuple containing column number, key column, key = data return row[column] == key def search(rows, func, data): if not rows: return None for row in rows: if func(row, data): return row result = search(row.iterchildren(), func, data) if result: return result return None return search(self.treestore, match_func, (0, node_id)) def fetch_nodes(self, node_id): """ Fetches a BrowseNode from Amazon. """ # fetch matching row from treestore row = self._find_row(node_id) # fetch Amazon data node = self.api.browse_node_lookup(node_id).BrowseNodes.BrowseNode id = node.BrowseNodeId.pyval name = node.Name.pyval is_root = hasattr(node, 'IsCategoryRoot') and node.IsCategoryRoot.pyval == 1 #~ from lxml import etree #~ print etree.tostring(node, pretty_print=True) #try: # parents = dict((parent.BrowseNodeId.pyval, parent.Name.pyval) # for parent in node.Ancestors.BrowseNode) #except AttributeError: # parents = {} # #piter = None #for parent_id, parent_name in parents.items(): # piter = self.treestore.append(None, [parent_id, parent_name]) # #iter = self.treestore.append(piter, [id, name]) # replace node name if is_root: row[1] = node.Ancestors.BrowseNode.Name.text try: children = dict((child.BrowseNodeId.pyval, child.Name.pyval) for child in node.Children.BrowseNode) except AttributeError: children = {} for child_id, child_name in children.items(): self.treestore.append(row.iter, [child_id, child_name]) # expand nodes of just added self.treeview.expand_row(tuple(row.path), True) def main(self): gtk.main()
def __init__(self, aws_access_key, aws_secret_key, affiliate_key): self.api = API(aws_access_key, aws_secret_key, 'us', affiliate_key)
def pytest_funcarg__api(request): """ Initialises API for each test call (formerly done with ``setup_method()``). """ server = request.getfuncargvalue('server') url_reg = re.compile(r'^http://(?P<host>[\w\-\.]+)(?P<path>/onca/xml.*)$') # the following parameters are injected by pytest_generate_tests locale = request.param.locale version = request.param.version xml_response = request.param.xml_response processor = TESTABLE_PROCESSORS[request.param.processor] api = API(locale=locale, processor=processor) api.VERSION = version api.REQUESTS_PER_SECOND = 10000 # just for here! def counter(fnc): """ Wrapper function for ``_fetch`` which 1. keeps track of the times has been called and adjusts the path to the corresponding XML response 2. Fetches any response that has not been cached from the live servers """ api._count = 0 def wrapped(url): api._count += 1 path = xml_response if api._count > 1: root, ext = os.path.splitext(path) path = '%s-%i%s' % (root, api._count, ext) try: if request.config.option.fetch == 'all': raise ResponseRequired try: content = open(path, 'r').read() # If the XML response has been previously fetched compare # request arguments in order to see if there are any changes cached_params = utils.arguments_from_cached_xml(content) current_params = utils.arguments_from_url(url) if cached_params != current_params: raise ArgumentMismatch except IOError: if request.config.option.fetch in ('outdated', 'missing'): raise ResponseRequired raise pytest.skip('No cached XML response found!') except ArgumentMismatch: if request.config.option.fetch == 'outdated': raise ResponseRequired msg = ('Cached arguments in %s differ from the ones ' 'currently tested against!\ncached=%r\ncurrent=%r' % (path, cached_params, current_params)) return pytest.skip(msg) except AttributeError: # XML for error messages have no Argument elements! pass except ResponseRequired: # fetch XML via urllib2 rather than directly via # lxml.etree.parse() to avoid, for instance, problems with HTTP # 403 errors resp = requests.get(url, headers={'User-Agent': USER_AGENT}) root = lxml.etree.fromstring(resp.text) # overwrite sensitive information in XML document. for arg in root.xpath('//aws:Argument', namespaces={'aws': root.nsmap[None]}): if arg.get('Name') in ('Signature', 'AWSAccessKeyId', 'AssociateTag'): arg.set('Value', 'X' * 15) content = lxml.etree.tostring(root, pretty_print=True) # complain loudly about missing credentials # UNLESS it was actually on purpose! if (six.b('MissingClientTokenId') in content and getattr(request.function, 'refetch', True)): raise pytest.fail( 'Cannot fetch XML response without credentials!') if not os.path.exists(os.path.dirname(path)): os.mkdir(os.path.dirname(path)) open(path, 'wb').write(content) # We simply exchange the real host with the local one now! # Note: Although strictly speaking it does not matter which URL is # called exactly, to appeal to one's sense of correctness, let's # keep at least the correct path! url = url_reg.sub(r'%s\g<path>' % server.url, url) server.serve_content(content) return fnc(url) return wrapped api._fetch = counter(api._fetch) return api
# -*- coding: utf-8 -*- from amazonproduct.api import API from amazonproduct.errors import AWSError from amazonproduct.processors import BaseProcessor import BeautifulSoup class SoupProcessor(BaseProcessor): def parse(self, fp): soup = BeautifulSoup.BeautifulSoup(fp.read()) for error in soup.findAll('error'): code = error.find('code').text msg = error.find('message').text raise AWSError(code, msg) return soup if __name__ == '__main__': api = API(locale='jp', processor=SoupProcessor()) result = api.item_lookup('B00LCL7A3G') print result
class BrowseNodeExplorer(gtk.Window): """ Gtk explorer for Amazon BrowseNodes. """ def on_delete(self, widget, event, data=None): # closes the window and quits. gtk.main_quit() return False def on_tree_click(self, widget, event, data=None): # if double click if event.type == gtk.gdk._2BUTTON_PRESS: # get data from highlighted selection treeselection = self.treeview.get_selection() model, iter = treeselection.get_selected() name_of_data = self.treestore.get_value(iter, 0) # and fetch selected node self.fetch_nodes(name_of_data) def __init__(self, locale='de'): gtk.Window.__init__(self, gtk.WINDOW_TOPLEVEL) self.set_title("BrowseNode Explorer") self.set_size_request(400, 200) self.connect("delete_event", self.on_delete) self.locale = locale self.api = API(locale=self.locale) # create a TreeStore with one string column to use as the model self.treestore = gtk.TreeStore(int, str) # create the TreeView using treestore self.treeview = gtk.TreeView(self.treestore) # add column id renderer = gtk.CellRendererText() column = gtk.TreeViewColumn('id', renderer, text=0) self.treeview.append_column(column) # add column name renderer = gtk.CellRendererText() column = gtk.TreeViewColumn('name', renderer, text=1) column.set_sort_column_id(1) # Allow sorting on the column self.treeview.append_column(column) # make it clickable self.treeview.add_events(gtk.gdk.BUTTON_PRESS_MASK) self.treeview.connect('button_press_event', self.on_tree_click) scrolled = gtk.ScrolledWindow() scrolled.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC) scrolled.add(self.treeview) self.add(scrolled) self.show_all() # populate with root nodes # but avoid duplicated node ids node_ids = set(NODE_IDS[self.locale].values()) for name, id in NODE_IDS[self.locale].items(): if id in node_ids: self.treestore.append(None, [id, name]) node_ids.remove(id) def _find_row(self, node_id): def match_func(row, data): # data is a tuple containing column number, key column, key = data return row[column] == key def search(rows, func, data): if not rows: return None for row in rows: if func(row, data): return row result = search(row.iterchildren(), func, data) if result: return result return None return search(self.treestore, match_func, (0, node_id)) def fetch_nodes(self, node_id): """ Fetches a BrowseNode from Amazon. """ # fetch matching row from treestore row = self._find_row(node_id) # fetch Amazon data node = self.api.browse_node_lookup(node_id).BrowseNodes.BrowseNode id = node.BrowseNodeId.pyval name = node.Name.pyval is_root = hasattr(node, 'IsCategoryRoot') and node.IsCategoryRoot.pyval == 1 #~ from lxml import etree #~ print etree.tostring(node, pretty_print=True) #try: # parents = dict((parent.BrowseNodeId.pyval, parent.Name.pyval) # for parent in node.Ancestors.BrowseNode) #except AttributeError: # parents = {} # #piter = None #for parent_id, parent_name in parents.items(): # piter = self.treestore.append(None, [parent_id, parent_name]) # #iter = self.treestore.append(piter, [id, name]) # replace node name if is_root: row[1] = node.Ancestors.BrowseNode.Name.text try: children = dict((child.BrowseNodeId.pyval, child.Name.pyval) for child in node.Children.BrowseNode) except AttributeError: children = {} for child_id, child_name in children.items(): self.treestore.append(row.iter, [child_id, child_name]) # expand nodes of just added self.treeview.expand_row(tuple(row.path), True) def main(self): gtk.main()
from pymongo import Connection from amazonproduct.api import API from config import AWS_KEY, SECRET_KEY if __name__ == '__main__': item_attr = ['Brand', 'Manufacturer', 'Model', 'OperatingSystem', 'ProductGroup', 'ProductTypeName', 'ReleaseDate', 'Title'] item_img = ['SmallImage', 'MediumImage', 'LargeImage'] item_offer = ['SalesRank', 'TotalNew'] item_price = ['LowestNewPrice', 'LowestRefurbishedPrice', 'LowestUsedPrice'] img_attr = ['Height', 'URL', 'Width'] connection = Connection() db = connection.linux_laptops api = API(AWS_KEY, SECRET_KEY, 'us', associate_tag='notebux-20') paginator = api.item_search('Electronics', Keywords='linux laptop', ResponseGroup='Medium') for root in paginator: nspace = root.nsmap.get(None, '') items = root.xpath('//aws:Items/aws:Item', namespaces={'aws' : nspace}) for i in items: doc = {} try: doc['ASIN'] = str(i.ASIN) doc['URL'] = str(i.DetailPageURL) # Images doc['Images'] = {} for a in item_img:
item_attr = [ 'Brand', 'Manufacturer', 'Model', 'OperatingSystem', 'ProductGroup', 'ProductTypeName', 'ReleaseDate', 'Title' ] item_img = ['SmallImage', 'MediumImage', 'LargeImage'] item_offer = ['SalesRank', 'TotalNew'] item_price = [ 'LowestNewPrice', 'LowestRefurbishedPrice', 'LowestUsedPrice' ] img_attr = ['Height', 'URL', 'Width'] connection = Connection() db = connection.linux_laptops api = API(AWS_KEY, SECRET_KEY, 'us', associate_tag='notebux-20') paginator = api.item_search('Electronics', Keywords='linux laptop', ResponseGroup='Medium') for root in paginator: nspace = root.nsmap.get(None, '') items = root.xpath('//aws:Items/aws:Item', namespaces={'aws': nspace}) for i in items: doc = {} try: doc['ASIN'] = str(i.ASIN) doc['URL'] = str(i.DetailPageURL) # Images
def pytest_funcarg__api(request): """ Initialises API for each test call (formerly done with ``setup_method()``). """ server = request.getfuncargvalue('server') url_reg = re.compile(r'^http://(?P<host>[\w\-\.]+)(?P<path>/onca/xml.*)$') # the following parameters are injected by pytest_generate_tests locale = request.param['locale'] version = request.param['version'] xml_response = request.param['xml_response'] processor = TESTABLE_PROCESSORS[request.param['processor']] if isinstance(processor, type): processor = processor() api = API(locale=locale, processor=processor) api.VERSION = version api.REQUESTS_PER_SECOND = 10000 # just for here! def counter(fnc): """ Wrapper function for ``_fetch`` which 1. keeps track of the times has been called and adjusts the path to the corresponding XML response 2. Fetches any response that has not been cached from the live servers """ api._count = 0 def wrapped(url): api._count += 1 path = xml_response if api._count > 1: root, ext = os.path.splitext(path) path = '%s-%i%s' % (root, api._count, ext) try: if request.config.option.fetch == 'all': raise ResponseRequired try: content = open(path, 'r').read() # If the XML response has been previously fetched compare # request arguments in order to see if there are any changes cached_params = utils.arguments_from_cached_xml(content) current_params = utils.arguments_from_url(url) if cached_params != current_params: raise ArgumentMismatch except IOError: if request.config.option.fetch in ('outdated', 'missing'): raise ResponseRequired raise pytest.skip('No cached XML response found!') except ArgumentMismatch: if request.config.option.fetch == 'outdated': raise ResponseRequired return pytest.skip('Cached arguments in %s differ from the ' 'ones currently tested against!' % path) #'\ncached=%r\ncurrent=%r' % (path, #cached_params, current_params)) except AttributeError: # XML for error messages have no Argument elements! pass except ResponseRequired: # fetch XML via urllib2 rather than directly via # lxml.etree.parse() to avoid, for instance, problems with HTTP # 403 errors try: req = urllib2.Request(url, headers={'User-Agent': USER_AGENT}) xml = urllib2.urlopen(req).read() except urllib2.HTTPError, e: xml = e.read() root = lxml.etree.fromstring(xml) # overwrite sensitive information in XML document. for arg in root.xpath('//aws:Argument', namespaces={'aws': root.nsmap[None]}): if arg.get('Name') in ('Signature', 'AWSAccessKeyId', 'AssociateTag'): arg.set('Value', 'X'*15) content = lxml.etree.tostring(root, pretty_print=True) # complain loudly about missing credentials # UNLESS it was actually on purpose! if ('MissingClientTokenId' in content and getattr(request.function, 'refetch', True)): raise pytest.fail('Cannot fetch XML response without credentials!') if not os.path.exists(os.path.dirname(path)): os.mkdir(os.path.dirname(path)) open(path, 'wb').write(content) # We simply exchange the real host with the local one now! # Note: Although strictly speaking it does not matter which URL is # called exactly, to appeal to one's sense of correctness, let's # keep at least the correct path! url = url_reg.sub(r'%s\g<path>' % server.url, url) server.serve_content(content) return fnc(url) return wrapped
class AmazonProductSearch(): def __init__(self, aws_access_key, aws_secret_key, affiliate_key): self.api = API(aws_access_key, aws_secret_key, 'us', affiliate_key) def _create_product(self, result): product = {} product['ASIN'] = str(result.ASIN) product['Amount'] = str(result.OfferSummary.LowestNewPrice.Amount) product['Binding'] = str(result.ItemAttributes.Binding) product['DetailPageURL'] = str(result.DetailPageURL) return product def item_search(self, title, actor, expected_running_time, ResponseGroup='OfferFull, Medium', release_year=None): ''' This method searches Amazon for DVD, Blu-ray, and/or Amazon Instant Video listings of the movie that we are searching. Actor and running time are included in the query to improve the accuracy of the results returned by Amazon. Args: title: Title of the movie actor: Top billing actor for the movie expected_running_time: Total running time for the movie ResponseGroup: See response group in amazon product search api documentation ''' try: results = self.api.item_search('DVD', actor=actor, Keywords=title, ResponseGroup=ResponseGroup) except Exception as e: print e return [] rv = [] max_release_year_diff = 1 max_running_time_diff = 5 bindings_seen = {"DVD": False, "Blu-ray": False, "Amazon Instant Video": False} for result in results: etree.tostring(result, pretty_print=True) try: if all(bindings_seen.values()): return rv result_binding = str(result.ItemAttributes.Binding) result_release_year = int(str(result.ItemAttributes.ReleaseDate).split('-')[0]) result_running_time = int(result.ItemAttributes.RunningTime) result_title = str(result.ItemAttributes.Title) if fuzz.partial_ratio(title.lower(), result_title.lower()) < 100: continue if release_year is not None and abs(release_year - result_release_year) > max_release_year_diff: continue if abs(expected_running_time - result_running_time) > max_running_time_diff: continue if result_binding not in bindings_seen.keys(): continue if bindings_seen.get(result_binding): continue rv.append(self._create_product(result)) bindings_seen[result_binding] = True except Exception as e: print e continue return rv
""" Get all books published by "Galileo Press". """ from amazonproduct.api import API import lxml if __name__ == '__main__': # Don't forget to create file ~/.amazon-product-api # with your credentials (see docs for details) api = API(locale='de') result = api.item_search('Books', Publisher='Galileo Press', ResponseGroup='Large') # extract paging information total_results = result.results total_pages = len(result) # or result.pages for book in result: print 'page %d of %d' % (result.current, total_pages) #~ from lxml import etree #~ print etree.tostring(book, pretty_print=True) print book.ASIN, print unicode(book.ItemAttributes.Author), ':', print unicode(book.ItemAttributes.Title),
aws_key = '1QTKKA77QPEB2MGMBYR2' aws_secret = '643VohcwGOqct4C2AeLOHbmN4uR6Is2Gby8+4cX4' result_ns = { 'xmlns': 'http://webservices.amazon.com/AWSECommerceService/2011-08-01' } def response_parser(fp): dom = fns.parse_xml(fp.read()) return dom client = API(aws_key, aws_secret, 'us', 'springpartner-20', processor=response_parser) def get_items(response): return response.xpath('//xmlns:Item', namespaces=result_ns) def get_price(item): """ Returns the amazon price for an item. """ result = item.xpath('//xmlns:LowestNewPrice/xmlns:FormattedPrice', namespaces=result_ns) if len(result) > 0:
""" Custom response parser using BeautifulSoup to parse the returned XML. """ def parse(self, fp): soup = BeautifulSoup.BeautifulSoup(fp.read()) # parse errors for error in soup.findAll('error'): code = error.find('code').text msg = error.find('message').text raise AWSError(code, msg) return soup if __name__ == '__main__': # Don't forget to create file ~/.amazon-product-api # with your credentials (see docs for details) api = API(locale='us', processor=SoupProcessor()) result = api.item_lookup('0718155157') print result # ... # now do something with it!
# -*- coding: utf-8 -*- from amazonproduct.api import API from amazonproduct.errors import AWSError from amazonproduct.processors import BaseProcessor import BeautifulSoup class SoupProcessor(BaseProcessor): def parse(self, fp): soup = BeautifulSoup.BeautifulStoneSoup(fp.read()) for error in soup.findAll('error'): code = error.find('code').text msg = error.find('message').text raise AWSError(code, msg) return soup if __name__ == '__main__': api = API(locale='jp', processor=SoupProcessor()) result = api.item_search('Apparel', Condition='All', Availability='Available', Keywords='Shirt') print result
import sys from amazonproduct.api import API if __name__ == '__main__': if len(sys.argv[1:]) == 0: print __doc__ print 'Usage: %s ISBN' % sys.argv[0] sys.exit(1) for isbn in sys.argv[1:]: isbn = isbn.replace('-', '') # Don't forget to create file ~/.amazon-product-api # with your credentials (see docs for details) api = API(locale='us') for root in api.item_lookup(isbn, IdType='ISBN', SearchIndex='Books', ResponseGroup='EditorialReview'): nspace = root.nsmap.get(None, '') reviews = root.xpath('//aws:EditorialReview', namespaces={'aws': nspace}) for review in reviews: print unicode(review.Source) print '-' * 40 print unicode(review.Content)
""" Find similar items to "Small Favor: A Novel of the Dresden Files" (ASIN 0451462009). """ from amazonproduct.api import API if __name__ == '__main__': # Don't forget to create file ~/.amazon-product-api # with your credentials (see docs for details) api = API(locale='us') root = api.similarity_lookup('0451462009') #~ from lxml import etree #~ print etree.tostring(root, pretty_print=True) nspace = root.nsmap.get(None, '') books = root.xpath('//aws:Items/aws:Item', namespaces={'aws': nspace}) for book in books: print 'ASIN %-10s' % book.ASIN, print unicode(book.ItemAttributes.Author), ':', print unicode(book.ItemAttributes.Title)
Get all editorial reviews for books with the specified ISBNs. """ import sys from amazonproduct.api import API if __name__ == '__main__': if len(sys.argv[1:]) == 0: print __doc__ print 'Usage: %s ISBN' % sys.argv[0] sys.exit(1) for isbn in sys.argv[1:]: isbn = isbn.replace('-', '') # Don't forget to create file ~/.amazon-product-api # with your credentials (see docs for details) api = API(locale='us') for root in api.item_lookup(isbn, IdType='ISBN', SearchIndex='Books', ResponseGroup='EditorialReview'): nspace = root.nsmap.get(None, '') reviews = root.xpath('//aws:EditorialReview', namespaces={'aws' : nspace}) for review in reviews: print unicode(review.Source) print '-' * 40 print unicode(review.Content)
def buy_items(item_list, quantity_list=None, category_list=None): #Instantiate Amazon API object amazon = API(locale='us') #Default buy 1 of an item, search in 'All' general category if quantity_list == None: quantity_list = [1 for item in item_list] if category_list == None: category_list = ['All' for item in item_list] item_and_quantity = dict() item_names = dict() item_prices = dict() #Search for item in Amazon, get AWS product id #Keep track of AWS product id the quantity to buy. Must be formatted as shown below for i in range(len(item_list)): #print item_list[i] result = search_item_and_price(amazon, item_list[i], category_list[i]) if result != None: item_and_quantity[result['aws_id']] = quantity_list[i] #print type(quantity_list[i]) #Create remote cart of items to buy cart = amazon.cart_create(item_and_quantity) #Get cart information. ID and HMAC is used to track and reference the created #cart. Purchase URL is what the user uses to purchase the desired items cart_id = cart.Cart.CartId cart_hmac = cart.Cart.HMAC purchase_url = cart.Cart.PurchaseURL.text #print type(purchase_url) #Get contents of cart cart_get = amazon.cart_get(cart_id, cart_hmac) subtotal = cart_get.Cart.SubTotal.getchildren()[2].text cart_contents = cart_get.Cart.CartItems.getchildren()[1:] #print type(subtotal) #pprint.pprint(item_and_quantity) #Get the prices and names of items in the cart. Since adding items to the cart #does not (seem to) let the programmer decide which offer listing to add, need #to check what is in the cart. Need to muck around the Amazon Product Advertising #API more, but man the documentation leaves much to be desired for cart_item in cart_contents: item_info = cart_item.getchildren() aws_id = item_info[1].text item_prices[aws_id] = item_info[7].getchildren()[-1].text if type(item_info[4].text) == unicode: item_names[aws_id] = item_info[4].text.encode('ascii', 'ignore') else: # Assuming it is a string type item_names[aws_id] = item_info[4].text #print item_names[aws_id] #print type(item_names[aws_id]) #print type(item_prices[aws_id]) #pprint.pprint(item_and_quantity) #pprint.pprint(item_names) #pprint.pprint(item_prices) #pprint.pprint(subtotal) #print type(purchase_url) return item_and_quantity, item_names, item_prices, subtotal, purchase_url
from amazonproduct.api import API from amazonproduct import errors _here = os.path.dirname(os.path.abspath(__file__)) XSLT = os.path.join(_here, 'xml2json.xslt') def jsonify(node): xslt_root = etree.parse(XSLT) transform = etree.XSLT(xslt_root) result = transform(node) return unicode(result) if __name__ == '__main__': api = API(locale='de') collection = [] while True: try: ean = raw_input('EAN? ') resp = api.item_lookup(ean, SearchIndex='All', IdType='EAN', ResponseGroup='Large') items = resp.Items.Item for item in items: attrs = item.ItemAttributes director = None if hasattr(attrs, 'Director'): if len(attrs.Director) > 1: director = [p.text for p in attrs.Director] else: director = attrs.Director.text
def test_responsegroups_as_list(self): api = API(locale='de') url = api._build_url(ResponseGroup=['ItemAttributes', 'Images']) qs = parse_qs(urlparse(url)[4]) assert qs['ResponseGroup'][0] == 'ItemAttributes,Images'
from xml.dom.minidom import parse def minidom_response_parser(fp): """ Custom response parser using xml.dom.minidom.parse instead of lxml.objectify. """ root = parse(fp) # parse errors for error in root.getElementsByTagName('Error'): code = error.getElementsByTagName('Code')[0].firstChild.nodeValue msg = error.getElementsByTagName('Message')[0].firstChild.nodeValue raise AWSError(code, msg) return root if __name__ == '__main__': api = API(AWS_KEY, SECRET_KEY, 'us', processor=minidom_response_parser) root = api.item_lookup('0718155157') print root.toprettyxml() # ... # now do something with it!
help='ID is an Universal Product Code (UPC).') parser.add_option('--sku', action='store_const', dest='id_type', const=SKU, help='ID is an Stock Keeping Unit (SKU).') parser.add_option('--locale', choices=HOSTS.keys(), dest='locale', help='Amazon locale to use [default: %default].', default='de') parser.add_option('-q', '--quiet', action='store_false', dest='verbose', help='Suppress output.') (options, ids) = parser.parse_args(sys.argv[1:]) if len(ids) == 0: parser.error('No IDs specified!') # Don't forget to create file ~/.amazon-product-api # with your credentials (see docs for details) api = API(locale=options.locale) params = { 'ResponseGroup' : 'Images', 'SearchIndex' : 'All', 'IdType' : options.id_type, } # When IdType equals ASIN, SearchIndex cannot be present. if options.id_type == ASIN: del params['SearchIndex'] for id in ids: id = id.replace('-', '')