class AmazonMovies(object): def __init__(self, titles): self._pattern1 = re.compile(r"(\[.*\]|\(.*\)|【.*】|<.*>|(.*)|〔.*〕)") self._pattern2 = re.compile(r"(DVD|Blu-ray|ブルーレイ|枚組).*") self._pattern3 = re.compile(r"\s.*(MovieNEX|2D|3D|エディション|ディスク|特別(編|版)).*") self._pattern4 = re.compile(r"\s$") self._api = API(cfg=amazon_keys.config) self._input_movies = self.get_movie_dict(titles) self.movies_dict = self.get_similarproducts(self._input_movies) self.movies = self.get_titles(self.movies_dict) def get_movie_dict(self, titles): tmp_list = [] for title in titles: tmp_list.append({'title': title, 'asin': self.get_asin(title)}) return tmp_list def get_asin(self, title): time.sleep(2) # 1.8sのインターバルあれば制限に引っかからない? asin = u"" try: for items in self._api.item_search('DVD', Keywords=title, limit=1): for item in items: asin = unicode(item.ASIN) break break except AWSError, e: print("code:%s message:%s" % (e.code, e.message)) return asin
def search_on_amazon(asin, album, artist): ''' Tries to locate the url of album by artis on amazon Returns '' if it can't be found ''' from amazonproduct import API if not AMAZON_KEY or not AMAZON_SECRET or not AMAZON_ASSOCIATE_TAG: return '' api = API(AMAZON_KEY, AMAZON_SECRET, 'us') try: if asin: node = api.item_lookup(asin, AssociateTag=AMAZON_ASSOCIATE_TAG) for item in node.Items: attributes = item.Item.ItemAttributes if attributes.ProductGroup == 'Music': url = item.Item.DetailPageURL if url: return url.text node = api.item_search('MP3Downloads', Keywords=album + ' ' + artist, AssociateTag=AMAZON_ASSOCIATE_TAG) for item in node.Items: attributes = item.Item.ItemAttributes if matching.match(artist, str(attributes.Creator)) \ and matching.match(album, str(attributes.Title)) \ and attributes.ProductGroup == 'Digital Music Album': url = item.Item.DetailPageURL if url: return url.text except : pass return ''
def joo_amazon(username, KEYWORDS): items_list4 = [] client = MongoClient('ds063186.mlab.com', 63186) client.credentials.authenticate('shakedinero', 'a/c57821688') db = client.credentials cursor = db.amazon.find() for i in cursor: x = i config = { "access_key": str(x['access_key']), "secret_key": str(x['secret_key']), "associate_tag": str(x['associate_tag']), "locale": str(x['locale']) } api = API(cfg=config) items = api.item_search('All', Keywords=KEYWORDS, ResponseGroup='Large') for i in items: try: title = i.ItemAttributes.Title item_url = i.DetailPageURL img = i.MediumImage.URL price = i.OfferSummary.LowestNewPrice.FormattedPrice shipping = '-' x = '{"title":"' + title + '","url":"' + item_url + '","image":"' + img + '","price":"' + price + '","shipping":"' + shipping + '","web":"Amazon"}' j = json.loads(x) items_list4.append(j) except: continue command = "db_results.results." + username + ".insert_many(items_list4)" try: exec command except: print "No Amazon Results" return items_list4
def lookup_price(searchTerm): AWS_KEY = 'AKIAIILUNE5IYH7BDF2A' SECRET_KEY = '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' ASSOC_TAG = 'camerarecomm-20' api = API(AWS_KEY, SECRET_KEY, 'us', ASSOC_TAG) price = -1 title = '' try: results = api.item_search('Electronics', Keywords=searchTerm, BrowseNode='281052', ResponseGroup='Large', ItemPage=1) if results is not None: for cam in results: try: #asin = cam.Items.Item.ASIN title = cam.Items.Item.ItemAttributes.Title.text price = cam.Items.Item.ItemAttributes.ListPrice.FormattedPrice.text # print title, price break except: price = -1 title = '' except: print 'Item not found' return price, title
class AmazonChecker(object): def __init__(self): AWS_KEY = '' SECRET_KEY = '' ASSOCIATE_TAG = 'stream0a-20' self.api = API(AWS_KEY, SECRET_KEY, 'us', ASSOCIATE_TAG) #self.api = JSONAPI(AWS_KEY, SECRET_KEY, 'us') def availability(self, needle): #Instant Video browse node: 16261631 or maybe 2649513011 #api.call(Operation='ItemSearch', SearchIndex='Video') #US/Video? 493964 #data = self.api.browse_node_lookup(16261631) #data = self.api.item_lookup('B0047WJ11G', **params) #data = self.api.item_lookup('Inception', **params) #data = self.api.item_search("DVD", Title="Inception", ResponseGroup="Large") try: data = self.api.item_search("Video", Title=needle, BrowseNode="16261631") except NoExactMatchesFound: return [{"service":"amazon-instant", "available":False}] #print data #print dir(data) #for root in data: # print "root" # print dir(root.Items.Item.ItemAttributes) # print root.Items.Item.ItemAttributes.Title #pp = pprint.PrettyPrinter(indent=3) return [{"service":"amazon-instant", "available":True}]
def search(title=''): """Amazon quick search function.""" api = API(LOG['AWS_KEY'], LOG['SECRET_KEY'], LOG['LOCAL'], LOG['ASSOC_TAG']) node = api.item_search('Books', Title=title, Publisher=publisher) for page in node: for book in page.Items.Item: print '%s' % (book.ASIN)
def amazon_product_search(keyword, storing_class, store, search_index="All", nb_items=10): api = API(settings.AWS_PRODUCT_ACCESS_KEY_ID, settings.AWS_PRODUCT_SECRET_ACCESS_KEY, settings.AWS_LOCALE) try: node = api.item_search(search_index, Keywords=keyword, ResponseGroup="Large", AssociateTag=settings.AWS_ASSOCIATE_TAG) except NoExactMatchesFound: return None except URLError: if settings.DEBUG: raise else: return None nb_pages = int(ceil(nb_items * 0.1)) item_list = [] for root in node: # total_results = root.Items.TotalResults.pyval # total_pages = root.Items.TotalPages.pyval try: current_page = root.Items.Request.ItemSearchRequest.ItemPage.pyval except AttributeError: current_page = 1 nspace = root.nsmap.get(None, '') items = root.xpath('//aws:Items/aws:Item', namespaces={'aws': nspace}) item_list.extend(items) if current_page >= nb_pages: break counter = 0 aff_item_list = list() for item in item_list: entry, created = storing_class.objects.get_or_create( store=store, object_id=item.ASIN) entry.store_init(store, item) entry.save() if entry.item is None: aff_item_list.append(entry) counter += 1 if counter == nb_items: break return aff_item_list
def search(): api = API(locale="jp") # total_results = node.Items.TotalResults.pyval # total_pages = node.Items.TotalPages.pyval for book in api.item_search("Books", Publisher=u"村上"): try: print "%s" % (book.ItemAttributes.Title) # print '%s: "%s"' % (book.ItemAttributes.Author, # book.ItemAttributes.Title) except: logging.debug("no author or title")
def search(): api = API(locale='jp') #total_results = node.Items.TotalResults.pyval #total_pages = node.Items.TotalPages.pyval for book in api.item_search('Books', Publisher=u'村上'): try: print '%s' % (book.ItemAttributes.Title) #print '%s: "%s"' % (book.ItemAttributes.Author, # book.ItemAttributes.Title) except: logging.debug("no author or title")
def get_image_from_amazon(artist, album): api = API(access_key_id="First it was fix-ed", secret_access_key="And then it was enabled", associate_tag="But now it's broke again.", locale="us") node = api.item_search('Music', ResponseGroup='Images', Keywords="{} {}".format(artist, album)) url = str(node.page(1).Items.Item.LargeImage.URL) data = requests.get(url).content return data
def search(self, q, country): titles = [] prices = [] urls= [] items = [] api = API(AWS_KEY, SECRET_KEY, country) try: for root in api.item_search('Books', Title=q, AssociateTag='...', ResponseGroup='Large'): # extract paging information total_results = root.Items.TotalResults.pyval total_pages = root.Items.TotalPages.pyval try: current_page = root.Items.Request.ItemSearchRequest.ItemPage.pyval except AttributeError: current_page = 1 #print 'page %d of %d' % (current_page, total_pages) #~ from lxml import etree #~ print etree.tostring(root, pretty_print=True) nspace = root.nsmap.get(None, '') books = root.xpath('//aws:Items/aws:Item', namespaces={'aws' : nspace}) #return unicode(books[0].ItemAttributes.Title) for book in books: items.append(unicode(book.ItemAttributes.Title)) #print book.ASIN, #if hasattr(book.ItemAttributes, 'Author'): #print unicode(book.ItemAttributes.Author), ':', #print unicode(book.ItemAttributes.Title), #price_offers(book.ASIN) try: if hasattr(book.ItemAttributes, 'ListPrice'): #print unicode(book.ItemAttributes.ListPrice.FormattedPrice) items.append(unicode(book.ItemAttributes.ListPrice.FormattedPrice)) elif hasattr(book.OfferSummary, 'LowestUsedPrice'): #print u'(used from %s)' % book.OfferSummary.LowestUsedPrice.FormattedPrice items.append(unicode(book.OfferSummary.LowestUsedPrice.FormattedPrice)) except: items.append("No price info.") items.append(unicode(book.DetailPageURL)) #print '\n' #print len(items) return items except: return items
def searchAmazon(API): hash_of_items = {} try: for item in API.item_search('VideoGames', Title='Amiibo', MerchantId="Amazon", Availability="Available"): product = item.ItemAttributes product_name = product.Title product_manufacturer = product.Manufacturer.text product_url = item.DetailPageURL.text if 'Nintendo' in product_manufacturer: hash_of_items.update({product_name: product_url}) except: print "NAH" return hash_of_items
def find_asin(title='Around the World in Eighty Days', author='Jules Verne'): """Find the unique ASIN identifier for the book INPUT: Book title and author FUNCTION: find_asin() OUTPUT: The unique asin identifier Time taken: < 1 second""" from amazonproduct import API api = API(locale='uk') items = api.item_search('Books', Title=title, Author=author) # Take the first result for book in items: break asin = str(book.ASIN) return (asin)
class AmazonDvd(): def __init__(self): self.api = API(locale='us') def find_product(self, keywords, Director=None): ''' return top 10 products ''' items = self.api.item_search( 'DVD', Keywords=keywords, Director=None, limit=10, Sort='relevancerank', MerchantId='Amazon', ResponseGroup='Large') dvds = [] for item in items: json_obj = json.loads(ObjectJSONEncoder().encode(item)) dvd = {} dvd['ASIN'] = json_obj['ASIN'] dvd['Title'] = json_obj['ItemAttributes']['Title'] dvd['DetailPageURL'] = json_obj['DetailPageURL'] if json_obj.get('SmallImage', None): dvd['SmallImage'] = json_obj['SmallImage']['URL'] if json_obj.get('CustomerReviews', None): dvd['CustomerReviews'] = json_obj['CustomerReviews']['IFrameURL'] if json_obj.get('EditorialReviews', None): dvd['EditorialReviews'] = json_obj[ 'EditorialReviews']['EditorialReview']['Content'] if json_obj.get('OfferSummary', None): dvd['LowestNewPrice'] = json_obj['OfferSummary'][ 'LowestNewPrice']['FormattedPrice'] if json_obj['ItemAttributes'].get('Actor', None): dvd['Actor'] = json_obj['ItemAttributes']['Actor'] if json_obj['ItemAttributes'].get('Director', None): dvd['Director'] = json_obj['ItemAttributes']['Director'] dvds.append(dvd) return dvds
def amazon_res(page_type, words): api = API(locale='us') if page_type == 'food': topic = 'Grocery' else: topic = 'HomeGarden' results = api.item_search(topic , Keywords=words, ResponseGroup="ItemAttributes, OfferSummary, Images", paginate = False) items = [] for it in results.Items.Item: asin = it.ASIN title = it.ItemAttributes.Title link = it.DetailPageURL try: price = it.OfferSummary.LowestNewPrice.FormattedPrice except: price = "no price available" try: image = it.SmallImage.URL except: image = "" if page_type == 'food': try: item = Food.objects.get(asin=asin) print(item.name) except: item = False print('doesnt exist') else: try: item = Equipment.objects.get(asin=asin) except: item = False items.append({'asin':asin, 'title':title, 'link':link, 'price':price, 'image':image, 'db':item}) return items
def parseVector(line): parts = line.split('+') return parts[0], (parts[1], parts[2], parts[4], parts[3]) pre = time.time() sc = SparkContext(appName="AskMeMPQuery") amazon_rdd = sc.textFile('hdfs://192.168.0.33:54310/final/amazon.csv') walmart_rdd = sc.textFile('hdfs://192.168.0.33:54310/final/walmart.csv') ebay_rdd = sc.textFile('hdfs://192.168.0.33:54310/final/ebay.csv') count = 0 start = time.time() for book in api.item_search('Books', Keywords=sys.argv[1], Condition='New', Availability='Available'): # print '%s %s' %(book.ASIN,book.ItemAttributes.Title) count += 1 if count > 10: break amazon_data = amazon_rdd.map(parseVector).filter( lambda x: str(book.ASIN) in x[0]).cache() if amazon_data.count() > 0: upc, (title, author, price, url) = amazon_data.first() print '+++' + str(upc) print '***"%s" by "%s"' % (title, author) print '\tAmazon\t$%.2f\t%s' % (float(price) / 100, url) walmart_data = walmart_rdd.map(parseVector).filter( lambda x: str(book.ASIN) in x[0]).cache()
limit_reached = False sc = SparkContext(appName="AskMeMP") amazon_rdd = sc.parallelize(['ID+TITLE+AUTHOR+URL+PRICE']) walmart_rdd = sc.parallelize(['ID+TITLE+AUTHOR+URL+PRICE']) ebay_rdd = sc.parallelize(['ID+TITLE+AUTHOR+URL+PRICE']) result = api.browse_node_lookup(1000) for child1 in result.BrowseNodes.BrowseNode.Children.BrowseNode: if limit_reached: break result1 = api.browse_node_lookup(child1.BrowseNodeId) for child in result1.BrowseNodes.BrowseNode.Children.BrowseNode: if limit_reached: break for book in api.item_search('Books', BrowseNode=child.BrowseNodeId): try: detail = api.item_lookup(str(book.ASIN), ResponseGroup='OfferSummary').Items[0] temp_rdd = sc.parallelize([ str(book.ASIN) + '+' + book.ItemAttributes.Title + '+' + book.ItemAttributes.Author + '+' + book.DetailPageURL + '+' + str(detail.Item.OfferSummary.LowestNewPrice.Amount) ]) amazon_rdd = amazon_rdd.union(temp_rdd) #print '%s,%s,%s,%s,%s' % (book.ASIN,book.ItemAttributes.Title,book.ItemAttributes.Author,book.DetailPageURL,detail.Item.OfferSummary.LowestNewPrice.Amount) amazon_price = int( detail.Item.OfferSummary.LowestNewPrice.Amount) amazon_url = str(book.DetailPageURL) walmart_url = amazon_url.replace("amazon", "walmart")
# a specific title based on the 'links' # which include formats, synopsis, etc. ###################################### disc = NetflixDisc(movie['catalog_title'],netflix) formats = disc.getInfo('formats') return formats if __name__ == '__main__': amazon_only = [] #Get list of bestselling Amazon movies abestselling = [] api = API(AWS_KEY, SECRET_KEY, 'us') for i in range(4): node = api.item_search('UnboxVideo', BrowseNode='16386761', ItemPage=i+1) for movie in node.Items.Item: abestselling.append(movie.ItemAttributes.Title) print abestselling #Query titles on Netflix and find earliest availability date. If it's in the future - it's available on Amazon but not Netflix. netflixClient = NetflixClient(APP_NAME, API_KEY, API_SECRET, CALLBACK, 'False') for film in abestselling: discs = [] time.sleep(1)# Note that we have to sleep between queries to avoid the per-second cap on the API doSearch(netflixClient, discs, film) time.sleep(1) movie = getTitleFromID(netflixClient,discs[0]['id']) time.sleep(1) formats = getTitleInfo(netflixClient,movie)
if AWS_KEY == "": exit(0) if len(sys.argv) > 1: uartist = sys.argv[1] ualbum = sys.argv[2] utrack = sys.argv[3] else: exit(0) score = 0 url = {"Amazon": ""} api = API(AWS_KEY, SECRET_KEY, "us") node = api.item_search("Music", Artist=uartist, Title=ualbum, Track=utrack, ResponseGroup="Large") for item in node.Items.Item: if not hasattr(item, "Tracks"): continue if uartist != item.ItemAttributes.Artist.pyval and not cjb.simple_compare( uartist, item.ItemAttributes.Artist.pyval, 0.90 ): continue albumscore = 0 if ualbum == item.ItemAttributes.Title: albumscore += 32 elif cjb.simple_compare(ualbum, item.ItemAttributes.Title.pyval, 0.80): albumscore += 20
def handler(clientsocket, clientaddr): #context_noun = 'it' print "Accepted connection from: ", clientaddr while 1: data = clientsocket.recv(1024) fact_found = False wtn = WordsToNumbers() s = [" one "," two ",' three', ' four', ' five', ' six', 'seven', ' eight', 'nine', ' ten '] for x in s: if x in data: print x, wtn.parse(x) data = data.replace(x, " " + str(wtn.parse(x)),1) if not data: break else: print data with open ("context.txt", "r") as myfile: context_noun=myfile.read().replace('\n', '') print context_noun f = open("answer.txt", 'w') #substitute and/or update context q = nltk.word_tokenize(data.lower()) # print "start: " + context_noun if ' s 5 ' in data: data = data.replace(' s 5 ', 's5', 1) if 'it' in q: data = data.replace(' it ', ' %s ' % context_noun, 1) print "updated: " + data elif 'its' in q: data = data.replace(' its ', " %s's " % context_noun, 1) print data elif 'bla' in q: nouns1 = get_terms(data) #nouns = list(nouns) for i in len(nouns1): nouns += [nouns1.label(i)] print nouns if len(nouns) > 0: context_noun = ' '.join(nouns[0]) print "context: " + context_noun y = open("context.txt",'w') y.write(context_noun) y.close() key = searchFactTable(context_noun) if key != None: print q for noun in q: if noun == "screen": noun = "screen size" if noun == "memory": noun = "internal memory" if noun == "talk": noun = "talk time" if noun.lower() in factTable[key]: print "fact found: " + key + " " + noun fact_found = True f.write("The "+ noun.encode('utf-8').strip() + " is " + factTable[key][noun].encode('utf-8').strip() +"\n") print 'before url' url = 'https://watson-wdc01.ihost.com/instance/508/deepqa/v1/question' headers = {'X-SyncTimeout': '30', 'Content-Type': 'application/json', 'Accept': 'application/json'} payload = {'question': {'questionText': data}} print payload print 'bf requests' r = requests.post(url, data = json.dumps(payload), headers = headers, auth = ('cmu_administrator', 'H5W2lhXv')) print 'bf json' j = r.json() msg = j["question"]["evidencelist"][0]["text"] print msg q = open("question.txt",'w') q.write(data) q.close() #f = open("answer.txt", 'w') m = msg.encode('ascii','ignore') # f = open("answer.txt",'w') if(not fact_found): f.write(m) f.close() clientsocket.send(m) b = nltk.word_tokenize(data) c = nltk.pos_tag(b) print c d = filter(lambda (a,b): b == 'CD' or b == 'NNP' or b == 'NN', c) query = "" if(len(d) > 0): print d, d[0], d[0][0] print query api = API(locale='us') for item in d: query += item[0] + " " items = api.item_search('Electronics', Keywords = query, limit=1) """ f = open("recommendations.txt", "w") count = 0 g = open("prices.txt", "w") for item in items: a = item.ASIN result = api.item_lookup(str(a)) #for i in result.Items.Item: #print '%s (%s) in group' % (i.ItemAttributes.Title, i.ASIN) try: result = api.similarity_lookup(str(a)) for b in result.Items.Item: # print '%s (%s)' % (b.ItemAttributes.Title, b.ASIN) if count >= 12: break image = api.item_lookup(str(b.ASIN), ResponseGroup = "Images") price = api.item_lookup(str(b.ASIN), ResponseGroup = "Offers") for i in image.Items.Item: # print '%s' % i.LargeImage.URL if(i.LargeImage.URL != None): import urllib link = str(i.LargeImage.URL) filename = link.split('/')[-1] h = open("images/"+filename,'wb') h.write(urllib.urlopen(link).read()) h.close() #urllib.urlretrieve(strb, strb) s = "%s $ %s\n" % (b.ItemAttributes.Title, b.DetailPageURL) stringa = s.encode('ascii','ignore') f.write(stringa) count += 1 for i in price.Items.Item: print '%s' % i.OfferSummary.LowestNewPrice.FormattedPrice g.write("%s @ %s\n" % (b.ItemAttributes.Title, i.OfferSummary.LowestNewPrice.FormattedPrice)) except Exception,e: print str(e) f.close() g.close() """ clientsocket.close()
exit(0) if len(sys.argv) > 1: uartist = sys.argv[1] ualbum = sys.argv[2] utrack = sys.argv[3] else: exit(0) score = 0 url = {'Amazon': ''} api = API(AWS_KEY, SECRET_KEY, 'us') node = api.item_search('Music', Artist=uartist, Title=ualbum, Track=utrack, ResponseGroup="Large") for item in node.Items.Item: if not hasattr(item, 'Tracks'): continue if uartist != item.ItemAttributes.Artist.pyval and not cjb.simple_compare( uartist, item.ItemAttributes.Artist.pyval, .90): continue albumscore = 0 if ualbum == item.ItemAttributes.Title: albumscore += 32 elif cjb.simple_compare(ualbum, item.ItemAttributes.Title.pyval, .80): albumscore += 20
from amazonproduct import API import nltk api = API(locale='us') # get all books from result set and # print author and title items = api.item_search('Electronics', Keywords = "samsung galaxy s5", limit=1) """a = "what is the iPhone like?" b = nltk.word_tokenize(a) c = nltk.pos_tag(b) print c d = filter(lambda (a,b): b == 'NNP' or b == 'NN', c) print d[0][0] """ f = open("recommendations.txt", "a") count = 0 g = open("prices.txt", "a") for item in items: a = item.ASIN result = api.item_lookup(str(a)) #for i in result.Items.Item: #print '%s (%s) in group' % (i.ItemAttributes.Title, i.ASIN) try:
def get_book(genre, popularity, pub_era, before): api = API(AWS_KEY, SECRET_KEY, 'us', ASSOCIATE_TAG) genre = genre.replace('&', ' ') genre = genre.replace(',', ' ') if (before): param = 'before' else: param = 'after' #print param randSet = 1 found = 0 for root in api.item_search('Books', ResponseGroup='Large', Power='pubdate:' + param + ' ' + str(pub_era) + ' and subject:' + genre, Sort="salesrank"): total_results = root.Items.TotalResults.pyval total_pages = root.Items.TotalPages.pyval if (total_results < RESULT_LIMIT): upper_bound = total_results else: upper_bound = RESULT_LIMIT if (total_results < TOP_TEN): top_results = total_results else: top_results = TOP_TEN #print upper_bound #print top_results if (randSet): if (popularity): num = random.randrange(0, top_results) else: num = random.randrange(top_results, upper_bound) print num pageNum = num / 10 + 1 #starts on page one so if we are under ten this number will be zero exact = num % 10 randSet = 0 print pageNum print exact try: current_page = root.Items.Request.ItemSearchRequest.ItemPage.pyval except AttributeError: current_page = 1 #print 'page %d of %d' % (current_page, total_pages) nspace = root.nsmap.get(None, '') books = root.xpath('//aws:Items/aws:Item', namespaces={'aws': nspace}) if (current_page == pageNum): i = 0 for book in books: if (i == exact): #output = book.ASIN, #if hasattr(book.ItemAttributes, 'Author'): # output = output + book.ItemAttributes.Author + ':' # output = output + book.ItemAttributes.Title #if hasattr(book.ItemAttributes, 'ListPrice'): # output = output + unicode(book.ItemAttributes.ListPrice.FormattedPrice) #elif hasattr(book.OfferSummary, 'LowestUsedPrice'): # output = output + u'(used from %s)' % book.OfferSummary.LowestUsedPrice.FormattedPrice return book i = i + 1
count = 0 if not results: print 'no new tweets.' else: for tweet in reversed(results): tweeter = tweet['user']['screen_name'] hashtags = tweet['entities']['hashtags'] text = tweet['text'] tag_list = tags_to_string(hashtags) if tag_list != '': # search amazon product API try: amzn_books = a.item_search('Books', Keywords=tag_list) for book in amzn_books: msg = 'Hey @'+tweeter+'! Try "%s" by %s!' % (book.ItemAttributes.Title, book.ItemAttributes.Author) if len(msg) <= 140: t.statuses.update(status=msg) count += 1 break except: t.statuses.update( status="Hey @"+tweeter+"! We coudn't find any matches for those hashtags. Sorry!") count += 1 latest_id = tweet['id']
def get_book(genre, popularity, pub_era, before): api = API(AWS_KEY, SECRET_KEY, 'us', ASSOCIATE_TAG) genre = genre.replace('&', ' ') genre = genre.replace(',', ' ') if (before): param = 'before' else: param = 'after' #print param randSet = 1 found = 0 for root in api.item_search('Books', ResponseGroup='Large', Power='pubdate:'+param+' '+str(pub_era)+' and subject:'+ genre, Sort = "salesrank"): total_results = root.Items.TotalResults.pyval total_pages = root.Items.TotalPages.pyval if (total_results < RESULT_LIMIT): upper_bound = total_results else: upper_bound = RESULT_LIMIT if(total_results < TOP_TEN): top_results = total_results else: top_results = TOP_TEN #print upper_bound #print top_results if (randSet): if (popularity): num = random.randrange(0, top_results) else: num = random.randrange(top_results, upper_bound) print num pageNum = num/10 + 1 #starts on page one so if we are under ten this number will be zero exact = num%10 randSet = 0 print pageNum print exact try: current_page = root.Items.Request.ItemSearchRequest.ItemPage.pyval except AttributeError: current_page = 1 #print 'page %d of %d' % (current_page, total_pages) nspace = root.nsmap.get(None, '') books = root.xpath('//aws:Items/aws:Item', namespaces={'aws' : nspace}) if (current_page == pageNum): i = 0 for book in books: if (i == exact): #output = book.ASIN, #if hasattr(book.ItemAttributes, 'Author'): # output = output + book.ItemAttributes.Author + ':' # output = output + book.ItemAttributes.Title #if hasattr(book.ItemAttributes, 'ListPrice'): # output = output + unicode(book.ItemAttributes.ListPrice.FormattedPrice) #elif hasattr(book.OfferSummary, 'LowestUsedPrice'): # output = output + u'(used from %s)' % book.OfferSummary.LowestUsedPrice.FormattedPrice return book i = i + 1
def main(): api = API(locale='us', access_key_id=ACCESS_KEY, secret_access_key=SECRET_KEY, associate_tag=TAG) #Create List of Tampon Products from Amazon tampon_items = [] response = api.item_search('HealthPersonalCare', Keywords='Tampons', ResponseGroup="Large, Reviews") for i in response: if hasattr(i, 'SalesRank'): product = ProductInfo() product.set_ASIN(i.ASIN) product.set_best_seller_rank(int(i.SalesRank)) product.set_name(i.ItemAttributes.Title.text) product.set_review_iframe_url(i.CustomerReviews.IFrameURL) tampon_items.append(product) #Take top 22 products for fetching reviews top_20_tampons = tampon_items[:22] #Open a Browser to get all the reviews (Dynamic Page Loading Amazon) browser = webdriver.Chrome() #Get link for all reciews from review Iframe for product in top_20_tampons: browser.get(product.review_iframe_url) x = browser.find_elements_by_class_name('small') if x: x = x[0].find_element_by_tag_name('a').get_attribute('href') product.set_all_review_url(str(x)) browser.close() #filter out the product whose reviews are not present top_20_tampons = [ product for product in top_20_tampons if product.all_review_url ] ''' Filter to reviews by "all reviews" otherwise scrap only 'Verified Purchaser Reviews' #top_20_tampons = set_filter_all_review(top_20_tampons) ''' #Scan for all reviews socket.setdefaulttimeout(50) brow = webdriver.Chrome() brow.set_page_load_timeout(30) for product in top_20_tampons: time.sleep(5) brow.get(str(product.all_review_url)) valid = True #Do it till all the previous 1 year reviews are scraped while valid: while True: try: x = brow.find_element_by_id('cm_cr-review_list') break except NoSuchElementException: print 'Excpetion' #get all reviews for the product from that page dt = [ str(i.text)[3:] for i in x.find_elements_by_class_name('review-date') ] dt = map( lambda x: datetime.strptime(x.replace(',', ''), '%B %d %Y'), dt) # setting review dates into product and Checking product.review_dates.extend(dt) #Check of last reiew on the page is 1 year old if (datetime.now() - dt[-1]).days > 365: valid = False # Goto next page to get more reviews else: if len(dt) == 10: last_button = brow.find_element_by_class_name("a-last") next_page_url = last_button.find_element_by_tag_name( 'a').get_attribute('href') print next_page_url brow.get(str(next_page_url)) else: valid = False brow.close() #Write a complete file for reviews write_all_reviews_CSV(top_20_tampons) #Write reviews per month per product for plottring and analysis write_reviews_per_month(top_20_tampons)
from amazonproduct import API api = API(locale='de') # get all books from result set and # print author and title for book in api.item_search('Books', Publisher='Galileo Press'): print '%s: "%s"' % (book.ItemAttributes.Author, book.ItemAttributes.Title)
datawriter.writerow("tid,title,artist,g1,g2,g5,g6,g7,g8,g9,g11,g12,g14,g15,g16,g17,g25,g28,g29,g31,g32,on_amazon_jp".split(",")) next(datareader, None) for row in datareader: kwds = str(row[1]) + ' ' + str(row[2]) title = str(row[1]) song_is_on_amazon = False check = True while (check): try: items = api.item_search('MP3Downloads', Keywords=kwds, Title=title) if len(items)>=1: song_is_on_amazon = True break except ape.NoExactMatchesFound, e: check = False # except ape.TooManyRequests, e: pass print(str(song_is_on_amazon) + ' | ' + row[1] + ' | ' + row[2]) row.append(song_is_on_amazon) datawriter.writerow(row)
from amazonproduct import API from lxml import etree api = API(locale='us') # get all books from result set and # # print author and title for result in api.item_search('chair', SearchIndex='HomeGarden', Keywords='wood', ResponseGroup='ItemAttributes'): width, height, length, weight = (-1, -1, -1, -1) if hasattr(result.ItemAttributes, 'ItemDimensions'): if hasattr(result.ItemAttributes.ItemDimensions, 'Width'): width = result.ItemAttributes.ItemDimensions.Width if hasattr(result.ItemAttributes.ItemDimensions, 'Height'): height = result.ItemAttributes.ItemDimensions.Height if hasattr(result.ItemAttributes.ItemDimensions, 'Length'): length = result.ItemAttributes.ItemDimensions.Length if hasattr(result.ItemAttributes.ItemDimensions, 'Weight'): weight = result.ItemAttributes.ItemDimensions.Weight print "(W, H, L) - weight => (%d, %d, %d) - %d", width, height, length, weight
class AmazonPipeline(object): def __init__(self): self.api = API(locale='us') self.datatxt = DataTXT(app_id=settings['DANDELION_APP_ID'], app_key=settings['DANDELION_KEY']) def process_item(self, item, spider): if spider.name in ['ebay_spider', 'amazon_spider']: return item item['asin'] = [] if 'upc' in item: if item['upc']: asin = self.get_upc(item['upc']) item['asin'] = asin elif 'ean' in item: if item['ean']: asin = self.get_ean(item['ean']) item['asin'] = asin elif False and 'mpn' in item and 'brand' in item: if item['mpn'] and item['brand']: asin = self.search("%s+%s" % (item['mpn'], item['brand']), item['description']) item['asin'] = asin elif 'mpn' in item and 'brand' in item: if item['mpn'] and item['brand']: asin = self.search("%s+%s" % (item['mpn'], item['brand']), item['description']) item['asin'] = asin return item def get_upc(self, upc): response = self.api.item_lookup(upc, SearchIndex="Blended", IdType="UPC") asin = list() for amazon_item in response.Items.Item: asin.append(unicode(amazon_item.ASIN.text, 'utf-8')) return asin def get_ean(self, ean): response = self.api.item_lookup(ean, SearchIndex="Blended", IdType="EAN") asin = list() for amazon_item in response.Items.Item: asin.append(unicode(amazon_item.ASIN.text, 'utf-8')) return asin def search(self, keyword, description): asin = list() try: response = self.api.item_search("Blended", Keywords=keyword, ResponseGroup="EditorialReview") except NoExactMatchesFound: return asin #if 'response' in locals() and response.results >=1: for amazon_item in response: # start matching the editorial review if hasattr(amazon_item, "EditorialReviews") and hasattr( amazon_item.EditorialReviews, "EditorialReview"): match = self.find_match( description, amazon_item.EditorialReviews. EditorialReview.Content.text) if float(match) > 70.00: asin.append(unicode(amazon_item.ASIN.text, 'utf-8')) return asin def find_match(self, source, dest): paragraphs = list() match = list() for line in source.splitlines(): if len(line) > 20: paragraphs.append(line) paragraphs = paragraphs[0:5] try: for p in paragraphs: response = self.datatxt.sim(p, dest) match.append(response.similarity) except DandelionException: return 0.00 match.sort(reverse=True) return match[0]
with open('cmn_games2.csv', 'r') as op: cmn_gms = csv.reader(op) for row in cmn_gms: cmn_games.extend(row) print len(cmn_games) game_list = [] for game in cmn_games: for plt in ['PC', 'Xbox One', 'Playstation 4']: game_dict = dict() game_dict['Title'] = game game_dict['Platform'] = plt try: print game + ' ' + plt + '\n' ress = api.item_search('VideoGames', Keywords=game + ' ' + plt) for res in ress: root = res # xml_new = etree.tostring(root, pretty_print=True) # print xml_new game_dict['Amzn_Title'] = root.ItemAttributes.Title asin = root.ASIN.text sleep(2) result0 = api.item_lookup(asin,ResponseGroup='OfferSummary') game_dict['Price'] = result0.Items.Item.OfferSummary.LowestNewPrice.FormattedPrice.text sleep(2) result = api.item_lookup(asin,ResponseGroup='Reviews', TruncateReviewsAt=10) review_link = result.Items.Item.CustomerReviews.IFrameURL.text response = urllib2.urlopen(review_link).read() soup = BeautifulSoup(response) try:
from amazonproduct import API api = API(locale='us') items = api.item_search('leisure women summer vacation girls') # get all books from result set and # print author and title for item in items: print 'Title: "%s"' % (item.ItemAttributes.Title) # import keys # from amazon.api import AmazonAPI # AMAZON_ACCESS_KEY = keys.AMAZON_ACCESS_KEY # AMAZON_SECRET_KEY = keys.AMAZON_SECRET_KEY # AMAZON_ASSOC_TAG = keys.AMAZON_ASSOC_TAG # amazon = AmazonAPI(AMAZON_ACCESS_KEY, AMAZON_SECRET_KEY, AMAZON_ASSOC_TAG) # products = amazon.search(Keywords='kindle', SearchIndex='All') # for i, product in enumerate(products): # print "{0}. '{1}'".format(i, product.title)
#!/usr/bin/env python from amazonproduct import API api = API(locale='us') api.item_search('Toys', Keywords='Rocket') #total_results = node.Items.TotalResults.pyval #total_pages = node.Items.TotalPages.pyval # get all books from result set and # print author and title #for book in api.item_search('Books', Publisher='Galileo Press'): # print '%s: "%s"' % (book.ItemAttributes.Author, # book.ItemAttributes.Title)
class Amazon_Api: """ This class connects to Amazon Product API and Pulls images, links and price into the application""" def __init__(self, some_dict, list_of_numbers, number_of_recs): self.api = API(locale='us', ) self.image_url = [] self.book_numbers = [ book for book in list_of_numbers if book in some_dict ] self.isbns = [(10 - len(i)) * '0' + i if type(i) != float else i for i in [ some_dict[book_number][0] for book_number in list_of_numbers if book_number in some_dict ]] self.some_dict = some_dict self.list_of_numbers = list_of_numbers self.number_of_recs = number_of_recs def format_response(self, resp): for item in resp.Items.Item: try: self.image_url.append({ 'title': item.ItemAttributes.Title, 'page_url': item.DetailPageURL, 'image_url': item.LargeImage.URL, 'price': item.OfferSummary.LowestNewPrice.FormattedPrice }) break except AttributeError as at: continue # This function handles values where ISBN is missing def handle_null_isbn(self, book_number): try: response = self.api.item_search( 'Books', Title=self.some_dict[book_number][1], Author=self.some_dict[book_number][2], Limit=1) for i in response: current_asin = str(i.ASIN.values.im_self) #print current_asin break resp = self.api.item_lookup( ItemId=current_asin, ResponseGroup='Images,OfferSummary,Small', IdType='ASIN') format_response(resp) except: pass return 'ok' # need breaks here due to multiple images returned def generate_images(self): print self.isbns for book_number, isbn in zip( self.book_numbers, self.isbns): # book_number over all recommendations while len(self.image_url) < self.number_of_recs: #print 'booknumber is', book_number try: if type(self.some_dict[book_number] [0]) == float: # handle nulls print 'null is', book_number self.handle_null_isbn(book_number) break else: try: print 'isbn is', isbn, 'book is', self.some_dict[ book_number][1] response = self.api.item_lookup( ItemId=isbn, ResponseGroup='Images,OfferSummary,Small', IdType='ISBN', SearchIndex='Books') self.format_response(response) break except: self.handle_null_isbn( book_number) # run this if ISBN doesn't work break except: pass return self.image_url
'''amazon.py - Simple amazon lookup tool''' import time import argparse from amazonproduct import API from amazonproduct import errors PARSER = argparse.ArgumentParser(description='Amazon lookup tool') PARSER.add_argument('-t', '--title', help='title') ARGS = PARSER.parse_args() API = API(locale='us') try: for item in API.item_search('VideoGames', Title=ARGS.title, Availability='Available', OfferSummary='New', MerchantId='Amazon'): print '%s: %s' % (item.ItemAttributes.Title, item.ASIN) except errors.NoExactMatchesFound, e: print time.strftime("%c") + ": " + "Nothing yet .."
def search(): print "hello" search_text = request.args['q'] region = request.args['region'] listed = True try: api = API(locale='us') products = api.item_search('All', Keywords=search_text, paginate=False, ResponseGroup="ItemIds, ItemAttributes, Images, OfferSummary, Offers") niceProducts = [] ASINList = [] for product in products.Items.Item: try: niceProduct = Product() niceProduct.title = product.ItemAttributes.Title niceProduct.ASIN = product.ASIN.text ASINList.append(niceProduct.ASIN) niceProduct.imageUrl = product.MediumImage.URL try: niceProduct.newPrice = float(product.OfferSummary.LowestNewPrice.Amount)/100 niceProduct.newFormattedPrice = product.OfferSummary.LowestNewPrice.FormattedPrice niceProduct.newPriceCurrency = product.OfferSummary.LowestNewPrice.CurrencyCode except: pass try: niceProduct.usedPrice = float(product.OfferSummary.LowestUsedPrice.Amount)/100 niceProduct.usedFormattedPrice = product.OfferSummary.LowestUsedPrice.FormattedPrice niceProduct.usedPriceCurrency = product.OfferSummary.LowestUsedPrice.CurrencyCode except: pass niceProduct.type = product.ItemAttributes.ProductGroup niceProduct.region = getRegionFromUrl(product.DetailPageURL.text).upper() #product.ItemAttributes.RegionCode niceProduct.locale = getRegionFromUrl(product.DetailPageURL.text) niceProduct.model = product.ItemAttributes.Model niceProducts.append(niceProduct) if not listed: print(objectify.dump(product)) listed = True except: pass #not a product # res = api.item_lookup(*ASINList, MerchantId='Amazon', ResponseGroup = 'Offers') # i = 0 # listed = False # for amazonProduct in res.Items.Item: # print 'new amazon offer for ASIN : ', amazonProduct.ASIN # print '#########################################' #print objectify.dump(amazonProduct) # try: #print 'not void!' #for offer in amazonProduct.Offers: # print objectify.dump(amazonProduct) # niceProducts[i].newPrice = float(amazonProduct.OfferSummary.LowestNewPrice.Amount)/100 # niceProducts[i].newFormattedPrice = amazonProduct.OfferSummary.LowestNewPrice.FormattedPrice # niceProducts[i].newPriceCurrency = amazonProduct.OfferSummary.LowestNewPrice.CurrencyCode # print 'price is : ', float(amazonProduct.OfferSummary.LowestNewPrice.Amount)/100 # except Exception as inst: # print inst #if not listed: # print(objectify.dump(amazonProduct)) # listed = True #try: ## print 'set price : ', i, ', ASIN : ', amazonProduct.ASIN ## niceProducts[i].newPrice = float(amazonProduct.ItemAttributes.ListPrice.Amount)/100 ## niceProducts[i].newFormattedPrice = amazonProduct.ItemAttributes.ListPrice.FormattedPrice ## niceProducts[i].newPriceCurrency = amazonProduct.ItemAttributes.ListPrice.CurrencyCode ## print 'ok price for : ', i ##except: # pass # i+=1 except errors.AWSError, e: print 'Amazon complained about yout request!' print e.code print e.msg return e.msg
from amazonproduct import API api = API(access_key_id='AKIAJXG6BBQM6YDLYEKA', secret_access_key='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', associate_tag='zhicheng-20', locale='us') for book in api.item_search('Books', Publisher='Galileo Press'): print '%s: "%s"' % (book.ItemAttributes.Author, book.ItemAttributes.Title)
def Recommend(): _movie1 = str(request.form['movie1']) _movie2 = str(request.form['movie2']) _movie3 = str(request.form['movie3']) _movie4 = str(request.form['movie4']) _movie5 = str(request.form['movie5']) _rate1 = int(request.form['rate1']) _rate2 = int(request.form['rate2']) _rate3 = int(request.form['rate3']) _rate3 = int(request.form['rate4']) _rate3 = int(request.form['rate5']) # keywords to ASIN # api = API("AKIAJGEEABW2F4H7ZB4Q", "6+ShIy2suLuPzWOdhEbzA8y4Cd3QDdfzokAbILB1","us","yueyingteng-20") api = API("AKIAIKFQCWRMAQBAIGDQ","V3URxyjcNbnRgak1CnWSoNqze2OFo2xkzxhYgYbg","us","chenlji-20") print(1) ASIN = {} print("1.1") keywords = [_movie1, _movie2, _movie3, _movie4, _movie5] print("1.2") for keyword in keywords: ASIN[keyword] = [] results = api.item_search('DVD', Title = keyword) print("1.3") for item in results: item = item.ASIN ASIN[keyword].append(item) print(2) # ASIN = {} # keywords = ['little miss sunshine'] # ASIN['little miss sunshine'] = ['B000K7VHQE', 'B000MR1V22', 'B001JNNDDI', 'B000JU9OJ4'] #from recommender import create_new_user_data # def create_new_user_data(username, keywords, ratings): # print(a) # empty_dict = {} # print(b) # for i in range(len(keywords)): # print(c) # # if there are no ASINs in common between the Amazon API results and our data, do not create an entry # if len(set(ASIN[keywords[i]]) & set(movies_list)) == 0: # print(d) # continue # else: # print(e) # # get the first entry from the intersection of the Amazon API results and the ASINs in our data # empty_dict[list(set(ASIN[keywords[i]]) & set(movies_list))[0]] = ratings[i] # users_data[username] = empty_dict # print(keywords[0]) # print(ASIN[keywords[0]]) # print(set(ASIN[keywords[0]])) # a = [filter(lambda x: x in ASIN[keywords[0]], sublist) for sublist in movies_list] # print("a") def create_new_user_data(username, keywords, ratings): userids[len(userids)] = 'newuser1' print("a") empty_dict = {} print("b") for i in range(len(keywords)): print("c") if len(set(ASIN[keywords[i]]) & set(movies_list)) == 0: print("d") continue else: empty_dict[list(set(ASIN[keywords[i]]) & set(movies_list))[0]] = ratings[i] print("e") users_data[username] = empty_dict print(3) create_new_user_data('newuser1', keywords, [_rate1, _rate2, _rate3, _rate2, _rate1]) print(users_data['newuser1']) testrun = recommend('newuser1', userids, users_data) print(testrun) movies = {} for movie in testrun: movies[movie] = [] #result = api.item_lookup(str(movie)) for item in api.item_lookup(str(movie)).Items.Item: title = item.ItemAttributes.Title URL = item.ItemLinks.ItemLink.URL movies[movie].append(str(title)) movies[movie].append(str(URL)) #result2 = api.item_lookup(str(movie), ResponseGroup='Images') for items in api.item_lookup(str(movie), ResponseGroup='Images').Items.Item: imageURL = items.ImageSets.ImageSet.LargeImage.URL movies[movie].append(str(imageURL)) # # movies2 = {'B004L9GLKE': ['Departed', 'http://www.amazon.com/Departed-Leonardo-DiCaprio/dp/tech-data/B004L9GLKE%3FSubscriptionId%3DAKIAJGEEABW2F4H7ZB4Q%26tag%3Dyueyingteng-20%26linkCode%3Dxm2%26camp%3D2025%26creative%3D386001%26creativeASIN%3DB004L9GLKE', 'http://ecx.images-amazon.com/images/I/51CN2a6OGvL.jpg'], 'B000S0DDG0': ['Dreamgirls', 'http://www.amazon.com/Dreamgirls-Jamie-Foxx/dp/tech-data/B000S0DDG0%3FSubscriptionId%3DAKIAJGEEABW2F4H7ZB4Q%26tag%3Dyueyingteng-20%26linkCode%3Dxm2%26camp%3D2025%26creative%3D386001%26creativeASIN%3DB000S0DDG0', 'http://ecx.images-amazon.com/images/I/51NsSmJiUxL.jpg'], '6300267881': ['The Exorcist [VHS]', 'http://www.amazon.com/The-Exorcist-VHS-Ellen-Burstyn/dp/tech-data/6300267881%3FSubscriptionId%3DAKIAJGEEABW2F4H7ZB4Q%26tag%3Dyueyingteng-20%26linkCode%3Dxm2%26camp%3D2025%26creative%3D386001%26creativeASIN%3D6300267881', 'http://ecx.images-amazon.com/images/I/21HWKZ0WSNL.jpg']} print(movies[testrun[0]][0]) print(movies[testrun[0]][1]) print(movies[testrun[0]][2]) # print(movies2[testrun[0]][0]) # print(movies2[testrun[0]][1]) # print(movies2[testrun[0]][2]) data = [{"title1" : movies[testrun[0]][0], "url1" : movies[testrun[0]][1], "imgUrl1" : movies[testrun[0]][2], "title2" : movies[testrun[1]][0], "url2" : movies[testrun[1]][1], "imgUrl2" : movies[testrun[1]][2], "title3" : movies[testrun[2]][0], "url3" : movies[testrun[2]][1], "imgUrl3" : movies[testrun[2]][2]}] # Writing JSON data #data = [{'title1': 'The Exorcist [VHS]', 'title2': 'Departed', 'title3': 'Dreamgirls', 'url1': 'http://www.amazon.com/The-Exorcist-VHS-Ellen-Burstyn/dp/tech-data/6300267881%3FSubscriptionId%3DAKIAJGEEABW2F4H7ZB4Q%26tag%3Dyueyingteng-20%26linkCode%3Dxm2%26camp%3D2025%26creative%3D386001%26creativeASIN%3D6300267881', 'url3': 'http://www.amazon.com/Dreamgirls-Jamie-Foxx/dp/tech-data/B000S0DDG0%3FSubscriptionId%3DAKIAJGEEABW2F4H7ZB4Q%26tag%3Dyueyingteng-20%26linkCode%3Dxm2%26camp%3D2025%26creative%3D386001%26creativeASIN%3DB000S0DDG0', 'url2': 'http://www.amazon.com/Departed-Leonardo-DiCaprio/dp/tech-data/B004L9GLKE%3FSubscriptionId%3DAKIAJGEEABW2F4H7ZB4Q%26tag%3Dyueyingteng-20%26linkCode%3Dxm2%26camp%3D2025%26creative%3D386001%26creativeASIN%3DB004L9GLKE', 'imgUrl3': 'http://ecx.images-amazon.com/images/I/51NsSmJiUxL.jpg', 'imgUrl2': 'http://ecx.images-amazon.com/images/I/51CN2a6OGvL.jpg', 'imgUrl1': 'http://ecx.images-amazon.com/images/I/21HWKZ0WSNL.jpg'}] print(data) with open('static/js/data.json', 'w') as f: json.dump(data,f, ensure_ascii = False, encoding = 'utf-8') return render_template('index.html')
def price_offers(xxx): best=999.00 one=True AutFin="" titFin="" api = API(Pp, Pl, 'it') try:items = api.item_search('Books', Keywords=xxx,AssociateTag=Pk) except Exception: return 999.00 KK=0 noDis=0 try: for book in items: KK+=1 if KK==6: break ID=book.ASIN try: AutP=str(book.ItemAttributes.Author) TitP=str(book.ItemAttributes.Title) url=str(book.DetailPageURL) except Exception as o: continue str_asin = str(ID) try:node = api.item_lookup(ItemId=str_asin, ResponseGroup='Offers', Condition='New', MerchantId='Amazon',AssociateTag=Pk) except AWSError: continue try: for a in node.Items.Item.Offers.Offer: if("non disponibile" in str(a.OfferListing.Availability).lower()): noDis=noDis+1 continue prix=str(a.OfferListing.Price.FormattedPrice).replace("EUR ","").replace(",",".") prox=float(prix) if(prox<best and one): best=prox AutFin=AutP titFin=TitP one=False session['titoloAMZ']=titFin session['urlAMZ']=url elif(prox<best and one==False and AutP==AutFin and TitP==titFin): best=prox session['titoloAMZ']=titFin session['urlAMZ']=url except Exception as e: continue if(best==999.00 and noDis>=1): KK=0 one=True AutFin="" titFin="" for book in items: KK+=1 if KK==6: break ID=book.ASIN try: AutP=str(book.ItemAttributes.Author) TitP=str(book.ItemAttributes.Title) url=str(book.DetailPageURL) except Exception as o: continue str_asin = str(ID) try:node = api.item_lookup(ItemId=str_asin, ResponseGroup='Offers', Availability='Available', Condition='New', MerchantId='Amazon',AssociateTag=Pk) except AWSError: continue try: for a in node.Items.Item.Offers.Offer: if("non disponibile" not in str(a.OfferListing.Availability).lower()): continue prix=str(a.OfferListing.Price.FormattedPrice).replace("EUR ","").replace(",",".") prox=float(prix) if(prox<best and one): best=prox AutFin=AutP titFin=TitP one=False session['titoloAMZ']=titFin+"(Attualmente non disponibile)" session['urlAMZ']=url elif(prox<best and one==False and AutP==AutFin and TitP==titFin): best=prox session['titoloAMZ']=titFin+"(Attualmente non disponibile)" session['urlAMZ']=url except Exception as e: continue return best except Exception: return best