def run(opts): try: p = Parallel() apis = [] api1 = finding(parallel=p, debug=opts.debug, appid=opts.appid, config_file=opts.yaml) api1.execute('findItemsAdvanced', {'keywords': 'python'}) apis.append(api1) api4 = html(parallel=p) api4.execute('http://www.ebay.com/sch/i.html?_nkw=Shirt&_rss=1') apis.append(api4) api2 = finding(parallel=p, debug=opts.debug, appid=opts.appid, config_file=opts.yaml) api2.execute('findItemsAdvanced', {'keywords': 'perl'}) apis.append(api2) api3 = finding(parallel=p, debug=opts.debug, appid=opts.appid, config_file=opts.yaml) api3.execute('findItemsAdvanced', {'keywords': 'php'}) apis.append(api3) p.wait() if p.error(): print(p.error()) for api in apis: dump(api) except ConnectionError as e: print(e) print(e.response.dict())
def processPage(pageNbr): try: products = bb.products(page = pageNbr) for product in products: best_buy_name = product['name'] product_upc = product['upc'] best_buy_price = product['regularPrice'] f = finding() f.execute('findItemsByProduct', '<productId type="UPC">' + product_upc + '</productId>') dom = f.response_dom() items = dom.getElementsByTagName('item') if len(items) > 0: try: ebay_names = list(map(lambda x : x.getElementsByTagName('title')[0].firstChild.nodeValue, items)) first_item = items[0] ebay_id = first_item.getElementsByTagName('productId')[0].firstChild.nodeValue ebay_price = first_item.getElementsByTagName('sellingStatus')[0].getElementsByTagName('currentPrice')[0].firstChild.nodeValue # add to price table upcs_for_price_frame.append(product_upc) ebay_prices.append(ebay_price) best_buy_prices.append(best_buy_price) # add to best buy table upcs_for_best_buy_frame.append(product_upc) names_for_best_buy_frame.append(best_buy_name) # add to ebay table for ebay_name in ebay_names: upcs_for_ebay_frame.append(product_upc) names_for_ebay_frame.append(ebay_name) ids_for_ebay_frame.append(ebay_id) # add to match table ebay_names_for_match_frame.append(ebay_name) best_buy_names_for_match_frame.append(best_buy_name) match_inds.append(1) f = finding() f.execute('findItemsAdvanced', {'keywords': best_buy_name}) dom = f.response_dom() items = dom.getElementsByTagName('item')[:10] if len(items) > 0: for item in items: curr_ebay_id = item.getElementsByTagName('productId')[0].firstChild.nodeValue if curr_ebay_id != ebay_id: curr_ebay_name = item.getElementsByTagName('title')[0].firstChild.nodeValue best_buy_names_for_match_frame.append(best_buy_name) ebay_names_for_match_frame.append(curr_ebay_name) match_inds.append(0) except: pass except: pass
def find_specific_listing(opts): try: api = finding(debug=opts.debug, appid=opts.appid, config_file=opts.yaml, warnings=True) # Include a function to exclude all special characters here api_request = { #'keywords': u'niño', 'keywords': u'rice ball mold', 'paginationInput': { 'entriesPerPage': 200 } } response = api.execute('findItemsAdvanced', api_request) dump(api) return response.dict() except ConnectionError as e: print(e) print(e.response.dict())
def find_user_listing(opts): try: api = finding(debug=opts.debug, appid=opts.appid, config_file=opts.yaml, warnings=True) api_request = { #'keywords': u'niño', 'itemFilter': [{ 'name': 'Seller', 'value': 'socal.alldayeveryday' }], 'affiliate': { 'trackingId': 1 }, 'paginationInput': { 'entriesPerPage': 200 } } response = api.execute('findItemsAdvanced', api_request) dump(api) return response.dict() except ConnectionError as e: print(e) print(e.response.dict())
def run(opts): try: api = finding(debug=opts.debug, appid=opts.appid, config_file=opts.yaml, warnings=True) api_request = { #'keywords': u'niño', 'keywords': u'GRAMMY Foundation®', 'itemFilter': [ { 'name': 'Condition', 'value': 'Used' }, { 'name': 'LocatedIn', 'value': 'GB' }, ], 'affiliate': { 'trackingId': 1 }, 'sortOrder': 'CountryDescending', } response = api.execute('findItemsAdvanced', api_request) dump(api) except ConnectionError as e: print(e) print(e.response.dict())
def getSellerInfo(self, searchResult): sellerObj = WatchedSeller() sellerObj.userName = searchResult.seller sellerObj.url = "https://www.ebay.com/usr/" + searchResult.seller sellerObj.watchedResultTitle = searchResult.title sellerObj.watchedResultURL = searchResult.website sellerObj.sellerRating = searchResult.sellerRating sellerObj.timesFlagged = 1 api = finding(siteid='EBAY-US', appid='Shashwat-ToLoP-PRD-35d80d3bd-64e84449', config_file=None) api.execute( 'findItemsAdvanced', { 'keywords': ['toys r us'], 'itemFilter': [{ 'name': 'ListedIn', 'value': 'EBAY-US' }, { 'name': 'LocatedIn', 'value': 'US' }, { 'name': 'Seller', 'value': searchResult.seller }], 'sortOrder': 'EndTimeSoonest', 'outputSelector': 'SellerInfo' }) sellerObj.numberOfPosts = int( api.response.reply.paginationOutput.totalEntries) return sellerObj
def search_card(name): try: api = finding(appid="BrianHer-Alexa-PRD-409141381-46692988", config_file=None, warnings=True) #searchResult.item.sellerInfo # .sellerUserName api_request = { 'itemFilter': [ { 'name': 'Seller', 'value': name }, ], 'sortOrder': {'EndTimeSoonest'} } #https://www.bannedfromhalf.com/privacy.pdf response = api.execute('findItemsAdvanced', api_request) #print(api.response.dict()) return generate_speech_card(api.response.dict(), name) #dump(api, full=True) except ConnectionError as e: print(e) print(e.response.dict())
def run_motors(opts): api = finding(siteid='EBAY-MOTOR', debug=opts.debug, appid=opts.appid, config_file=opts.yaml, warnings=True) response = api.execute('findItemsAdvanced', { 'keywords': 'tesla', }) if hasattr(response.reply.searchResult, 'item'): for r in response.reply.searchResult.item: print("ID(%s) TITLE(%s)" % (r.itemId, r.title)) else: print("No Items Found.") if api.error(): raise Exception(api.error()) if api.response_content(): print("Call Success: %s in length" % len(api.response_content())) print("Response code: %s" % api.response_code()) print("Response DOM: %s" % api.response_dom()) dictstr = "%s" % api.response_dict() print("Response dictionary: %s..." % dictstr[:250])
def run(opts): try: api = finding(debug=opts.debug, appid=opts.appid, config_file=opts.yaml, warnings=True) for i in range(1, 100): api_request = { 'categoryId': '4251', 'paginationInput': { 'pageNumber': i, 'entriesPerPage': 100 } } f = open('WomensAccessoriesResponse' + str(i) + '.xml', 'w+') response = api.execute('findItemsByCategory', api_request) f.write(api.response.content) f.close() except ConnectionError as e: print(e) print(e.response.dict())
def run(opts): try: api = finding(debug=opts.debug, appid=opts.appid, config_file=opts.yaml, warnings=True) api_request = { #'keywords': u'niño', 'keywords': u'GRAMMY Foundation®', 'itemFilter': [ {'name': 'Condition', 'value': 'Used'}, {'name': 'LocatedIn', 'value': 'GB'}, ], 'affiliate': {'trackingId': 1}, 'sortOrder': 'CountryDescending', } response = api.execute('findItemsAdvanced', api_request) dump(api) except ConnectionError as e: print(e) print(e.response.dict())
def find_tark_pants(opts): try: api = finding(debug=opts.debug, appid=opts.appid, config_file=opts.yaml, warnings=True, response_encoding="JSON") api_request = { 'keywords': 'tark shimmer' #'categoryId': '63863', # 'itemFilter': [ # {'name': 'Condition', # 'value': '3000'}, # {'name': 'LocatedIn', # 'value': 'WorldWide'}, # {'name': 'Size', # 'value':'2'}, #{'name':'Waist', #'value':'26'}, #{'name':'Rise', #'value':'9'}, #], # 'aspectFilter': [ # # {'aspectName':'Waist', # #'aspectValueName':'26.5"'}, # {'aspectName':'Size', # 'aspectValueName':'2'}, #], } response = api.execute('findItemsAdvanced', api_request) print (response.json()) except ConnectionError as e: print(e) print(e.response.dict())
def __init__(self, **kwargs): """Initialization method. Parameters ---------- sandbox : boolean see Ebay class Returns ------- New instance of :class:`Finding` : Finding Examples -------- >>> finding = Finding(sandbox=True) >>> finding #doctest: +ELLIPSIS <app.api.Finding object at 0x...> >>> finding.kwargs['domain'] == 'svcs.sandbox.ebay.com' True >>> finding = Finding(sandbox=False) >>> finding.kwargs['domain'] == 'svcs.ebay.com' True """ super(Finding, self).__init__(**kwargs) domain = 'svcs.sandbox.ebay.com' if self.sandbox else 'svcs.ebay.com' new = { 'siteid': self.global_ids[self.kwargs['country']]['countryabbr'], 'domain': domain, 'version': '1.0.0', 'compatibility': '1.0.0', } self.kwargs.update(new) self.api = finding(**self.kwargs)
def GetItem(name): #print('DEBUG: Ebay scrape initiated') api = finding(appid=AppID, config_file=None) api_request = {'keywords': name, 'outputSelectyor': 'SellerInfo'} response = api.execute('findItemsByKeywords', api_request) soupx = soup(response.content, 'lxml') totalentries = int(soupx.find('totalentries').text) items = soupx.find_all('item') #print('DEBUG: Ebay finding instant purchase items') #add items flagged buy it now to our list AppropriateMatches = [] for item in items: listingtype = item.listingtype.string.lower() if (listingtype == 'fixedprice' or listingtype == 'storeinventory'): AppropriateMatches.append(item) #print('DEBUG: Ebay finding lowest price point') #find the lowest price point among all buy it now items lowestprice = float(AppropriateMatches[0].currentprice.string) for item in AppropriateMatches: price = float(item.currentprice.string) if (price < lowestprice): lowestprice = price #print('DEBUG: Ebay singling out first cheapest item') #create a list of items at the lowest price point LowestPricedItems = [] for item in AppropriateMatches: if (float(item.currentprice.string) == lowestprice): LowestPricedItems.append(item) #print('DEBUG: Ebay printing cheapest item detrails') #print out first found lowest priced item #print(lowestprice) #print("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx") cat = LowestPricedItems[0].categoryname.string.lower() title = LowestPricedItems[0].title.string.lower().strip() price = LowestPricedItems[0].currentprice.string url = LowestPricedItems[0].viewitemurl.string.lower() #seller = LowestPricedItems[0].sellerusername.string.lower() listingtype = LowestPricedItems[0].listingtype.string.lower() condition = LowestPricedItems[0].conditiondisplayname.string.lower() #print(cat) #print(title) #print(price) #print(url) #print(seller) #print('DEBUG' + listingtype) #print(condition) #add extracted data to return array ArrayToReturn = [] ArrayToReturn.append('EBAY IMAGE URL: ' + GetItemImageUrl(url)) ArrayToReturn.append('EBAY URL: ' + url) ArrayToReturn.append('EBAY CATEGORY: ' + cat) ArrayToReturn.append('EBAY TITLE: ' + title) ArrayToReturn.append('EBAY PRICE: ' + price.string) return ArrayToReturn
def Seller_Scrape(name, pg): api = finding(appid='AlexYeh-Product-PRD-5b3210513-f775e8d7', config_file=None) response = api.execute( 'findItemsAdvanced', { 'itemFilter': [ { 'name': 'Seller', 'value': name }, ], 'outputSelector': 'SellerInfo', 'paginationInput': { 'entriesPerPage': '100', 'pageNumber': str(pg) }, 'paginationOutput': {'totalPages'} }) soup = BeautifulSoup(response.content, 'lxml') print(name, pg) ID = soup.findAll('itemid') title = soup.findAll('title') price = soup.findAll('currentprice') img_url = soup.findAll('galleryurl') for i in range(len(title)): output_seller.append(name) item_ID.append(ID[i].text) item_title.append(title[i].text) item_price.append(price[i].text) print(len(item_ID)) pg = pg + 1 return (pg)
def findSimilarProducts(opts): api = finding(siteid='EBAY', debug=opts.debug, appid=opts.appid, config_file=opts.yaml, warnings=True) #1. grab JSON from server #2. stringify JSON #3. api.execute(stringifiedjson) #4. return descriptions, etc api.execute('findItemsAdvanced', { }) if api.error(): raise Exception(api.error()) if api.response_content(): print("Call Success: %s in length" % len(api.response_content())) print("Response code: %s" % api.response_code()) print("Response DOM: %s" % api.response_dom()) dictstr = "%s" % api.response_dict() print("Response dictionary: %s..." % dictstr[:250]) print("Finding samples for SDK version %s" % ebaysdk.get_version()) (opts, args) = init_options() findSimilarProducts(opts)
def ebayItemIdList(make, listLength=None): api = finding(siteid='EBAY-MOTOR', domain='svcs.ebay.com', warnings=True) dictFinding = ({ 'categoryId': '6001', 'aspectFilter': { 'aspectName': 'Make', 'aspectValueName': make }, 'paginationInput': { 'pageNumber': '1', 'entriesPerPage': '100' }, 'paginationOutput': 'totalPages,totalEntries', 'sortOrder': 'PricePlusShippingHighest' }) try: listItemId = [] totalPages = getTotalPages(api, make) spinner = Halo(text='grabbing list of Item IDs', spinner='monkey') spinner.start() for x in range(1, totalPages + 1): dictFinding['paginationInput']['pageNumber'] = str(x) response = json.loads( '%s' % api.execute('findItemsAdvanced', dictFinding).json()) for listing in response['searchResult']['item']: listItemId.append(listing['itemId']) print("\nTOTAL NUMBER OF LISTINGS FOR %s: " % make, len(listItemId)) if listLength != None: spinner.succeed("Returning %s number of Item Ids" % listLength) return listItemId[0:listLength] spinner.succeed("Returning all Item Ids") return listItemId except Exception as e: spinner.fail("ERROR: %s" % e)
def run(opts): try: api = finding(debug=opts.debug, appid=opts.appid, config_file=opts.yaml, warnings=True) api_request = { #'keywords': u'niño', 'keywords': u'tark shimmer', 'itemFilter': [ {'name': 'Condition', 'value': 'Used'}, {'name': 'LocatedIn', 'value': 'WorldWide'}, ], } response = api.execute('findItemsAdvanced', api_request) print (response.dict()) # dump(api) except ConnectionError as e: print(e) print(e.response.dict()) dictstr = "%s" % api.response_dict() #print(dictstr[_response_content]) print(api.response_content)
def get_pants_histogram(opts): try: api = finding(debug=opts.debug, appid=opts.appid, config_file=opts.yaml, warnings=True) api_request = { #'keywords': u'niño', 'categoryId': '63863', 'itemFilter': [ {'name': 'Condition', 'value': '3000'}, {'name': 'LocatedIn', 'value': 'WorldWide'}, ], 'aspectFilter': [ # {'aspectName':'Waist', #'aspectValueName':'26.5"'}, {'aspectName':'Rise', 'aspectValueName':'9'}, ], } response = api.execute('findItemsByCategory', api_request) print (response.dict()) except ConnectionError as e: print(e) print(e.response.dict())
def run(opts): try: api = finding(debug=opts.debug, appid=opts.appid, config_file=opts.yaml, warnings=True) api_request = { #'keywords': u'niño', # 'keywords': u'GRAMMY Foundation®', 'keywords': u'Warhammer 40k', 'itemFilter': [ {'name': 'Condition', 'value': 'Used'}, {'name': 'LocatedIn', 'value': 'CA'}, ], 'affiliate': {'trackingId': 1}, 'sortOrder': 'CountryDescending', } response = api.execute('findItemsAdvanced', api_request) if hasattr(response.reply.searchResult, 'item'): for r in response.reply.searchResult.item: print("ID(%s) TITLE(%s)" % (r.itemId, r.title)) else: print("No Items Found.") dump(api) except ConnectionError as e: print(e) print(e.response.dict())
def get_items(Keywords): api = finding(appid=ebayapi, siteid='EBAY-GB', config_file=None) # change country with 'siteid=' api_request = {'keywords': Keywords, 'outputSelector': 'SellerInfo'} response = api.execute('findItemsByKeywords', api_request) soup = BeautifulSoup(response.content, 'lxml') items = soup.find_all('item') return items
def main(): # Find the raw data using ebay api and export to excel file api = finding(appid='EddyNass-Scraper-PRD-651ca6568-7ae32d61', config_file=None) # Data Load Prep index = 0 # Declare data frame df = pd.DataFrame(columns=('date', 'total', 'price', 'condition', 'MPN', 'movement', 'case material', 'band material', 'model', 'listingType', 'start', 'url'), dtype=float) # Run this to collect data from the ebay website with BeatifulSoup # This code will return 100 results per page Keywords = "Rolex Wristwatch" minPrice = 3000 maxPrice = 12000 pageNum = 1 # Collect all items from ebay on page1 through 4 while pageNum <= 4: soup = BeautifulSoup(response(Keywords, pageNum, minPrice, maxPrice, api).content, 'lxml') if pageNum == 1: items = soup.find_all('item') else: items += soup.find_all('item') pageNum += 1 # have to clean the data afterwards df = get_attributes(items, index, df) pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) # Write to Excel. Used for Error checking, data cleaning ''' writer = ExcelWriter('ebay_data.xlsx') df.to_excel(writer,'Sheet1',index=False) writer.save() ''' # Write datafram to csv file df.to_csv('ebay_data1.csv') # remove rows with NULL entries for any attributes from the training set remove_NULL_vals('ebay_data1.csv') rolex_prices_data = get_prices_db(get_page('https://www.bobswatches.com/used-rolex-prices')) model_num = get_column(rolex_prices_data[1:], 0) market_price = get_column(rolex_prices_data[1:], -1) market_price_data = [] for i in model_num: market_price_data.append([i]) count = 0 for row in market_price_data: row.insert(1, market_price[count]) count = count + 1 write_to_file(market_price_data, 'rolex_prices_data.csv')
def connect_api(): global trading_api, finding_api, shopping_api trading_api = trading( config_file="ebay.yaml", siteid=77, token="AgAAAA**AQAAAA**aAAAAA**gNoiVA**nY+sHZ2PrBmdj6wVnY+sEZ2PrA2dj6wJkYWoDpmGqA2dj6x9nY+seQ**PXkCAA**AAMAAA**hzDKea9fTGWVfQz6a+ULxm8p20w2dRegRC1r3fy5UQjAbb/gQc7m76reGs4JHLA8/Dq6hdxVx6kNmP/78rG9ii4NXu3UIjyFPDA2G4Yg+BMZ7BfDdCVgBPtigPhrrf6GJxTQF/yP0nWbFW0hDN6ZwFURMEcFXazayEUbqNDD9V0zj35Z1X7WE3xV8tnph37SDZDhplGC1ha3MjcDRZqZURhz74TxmH1nugJ9pSsjY7GsDvmVgFk0BP/AFgA8jOXiA6avHSAzWstY9AK/Pxmcj5fO3Df+w5WYrjnNMPZcwPMyFeb+oAxABG8KkYmKbWmh/D57JVHn0G6J2tEysCQMUXUZa8VxbP9noKjmIADhmYh6pjx7PzAtjB0feAdFOb9BGmVeczGgYJrMaBhujrwPB9beaC6d1EuUnw4dON/RJqHWRq9ecQkAMsVB5+eki+/HS0LA4ZVA01FaMW6QlgNRefwj5fAJbv20mggfYx5dA+e8STAmk05ThzUktiaRzHxZEtIiVsf3xqZkSFfCZVpFskdWsBb+ifLYpjEAf+KG+56zijYC50HOP+oTcVqfUcQ9EaBZyiS6+VrYR2KVkGiVGNWyOdY56Y7Q7yJfhMxMi2YAxEFyhPZt6qVg02Ka6ubV0Mcm6Xn1jq02XUzkq3+Q0QMYV2D37MGYxaq0qVCfvp3l6GuY+hZ1S7z9BPu2uXDC8h0t+JurlSveX+/2cj+2loCX+nneCgikupnwrmZCFbqV4jmX36qrIDbE7KU2d7TE", ) finding_api = finding(config_file="ebay.yaml", siteid="EBAY-DE") shopping_api = shopping(config_file="ebay.yaml", siteid=77)
def findItems(self, keyword): api = finding(appid=ID_APP, config_file=None) api_request = {'keywords': keyword} response = api.execute('findItemsByKeywords', api_request) soup = BeautifulSoup(response.content, 'lxml') totalentries = int(soup.find('totalentries').text) items = soup.find_all('item') return items
def run2(opts): try: api = finding(debug=opts.debug, appid=opts.appid, config_file=opts.yaml) api.execute('findItemsByProduct', '<productId type="ReferenceID">53039031</productId>') dump(api) except ConnectionError as e: print(e)
def ebay_find_wanted_items(): # No need to include our ID here; that gets grabbed from the YAML file. api = finding(siteid = 'EBAY-GB') # Get the search strings from our text file. # The amount we're willing to pay is: item.split(' ', 1)[0] # The string to search for is: item.split(' ', 1)[1] wanted_items = ebay_get_wanted_items() # List to hold the lines of html we're going to write to a file. items_html_list = [] # Query eBay for each wanted item. for item in wanted_items: item_price = item.split(' ', 1)[0] item_name = item.split(' ', 1)[1] response = api.execute('findItemsAdvanced', { 'keywords': item_name, 'itemFilter': [ {'name': 'ListingType', 'value': 'Auction'}, {'name': 'LocatedIn', 'value': 'GB'}, {'name': 'MaxPrice', 'value': item_price }, ], 'sortOrder': 'EndTimeSoonest', }) # The results are returned as a dictionary. # mydict = api.response.dict() # print(mydict) # print(mydict["searchResult"]["_count"]) # mydict_count = int(mydict["searchResult"]["_count"]) item_count = int(response.reply.searchResult._count) print(item_name) print(response.reply.searchResult._count) if item_count != 0: items_html_list.append(HTML_HEADER % item_name) for i in range(item_count): if item_count == 1: item = response.reply.searchResult.item[0] else: item = response.reply.searchResult.item[i] items_html_list.append(HTML_LINK % (item.viewItemURL, item.title, item.sellingStatus.bidCount, item.sellingStatus.currentPrice)) ebay_write_html(items_html_list)
def ebayAPI(hashtag, key, min=10, max=50): tmp1 = [] tmp3 = [] api = finding(appid='SarthakS-Salamand-PRD-8f78dd8ce-ef33d6b3', config_file=None) api_request = { 'keywords': hashtag, 'itemFilter': [ # {'name': 'LocatedIn', # 'value': 'IN'}, # {'name':'Currency', # 'value':'INR'}, { 'name': 'BestOfferOnly', 'value': 1 }, # {'name':'FeaturedOnly', # 'value': 1}, { 'name': 'HideDuplicateItems', 'value': 1 }, { 'name': 'MaxPrice', 'value': max # 'paramName':'Currency', # 'paramValue':'INR' }, { 'name': 'MinPrice', 'value': min } # 'paramName':'Currency', # 'paramValue':'INR' # } ], 'SortOrderType': 'PricePlusShippingLowest' } response = api.execute('findItemsByKeywords', api_request) soup = BeautifulSoup(response.content, 'lxml') if (soup.find('totalentries')) != None: print("total enteries for:", hashtag, int(soup.find('totalentries').text)) items = soup.find_all('viewitemurl') if len(items) >= 2: for item in items[:3]: print(key, item.contents[0]) tmp3.append(item.contents[0]) tmp1.append(key) print("size of tmp3", len(tmp3)) print("size of tmp1", len(tmp1)) return tmp3, tmp1
def getUrl(word): global url api2 = finding(appid=ID_APP, config_file=None) api_request = {'keywords': word} response = api2.execute('findItemsByKeywords', api_request) soup = BeautifulSoup(response.content, 'lxml') items = soup.find_all('item') for item in items: url = item.viewitemurl.string.lower() return url
def get_from_ebay(): try: api = finding(siteid='EBAY-GB', appid="TomaszDo-ac84-4764-9f1f-c3d9de2f3e59") #prod server #api = finding(domain='svcs.sandbox.ebay.com', appid="TomaszDo-10f2-4c33-a762-9d5056c2784a") api.execute('findItemsAdvanced', { 'keywords': 'Car', 'categoryId' : [ '9801' , '29751' , '9800' ], 'itemFilter': filters_s , 'sortOrder': sort_order, }) return api.response_dict() except Exception, e: raise e
def ebay_jewelry(): final_list = [] merchandise = pd.read_csv('data/jewelry_merchandise.csv', encoding='ISO-8859-1') api = finding(config_file='data/ebay_auth.yaml') headers = [ 'item_id', 'name', 'sale_price', 'short_description', 'long_description', 'images', 'stock', 'upc' ] for i, item in merchandise.iterrows(): try: response = api.execute( 'findItemsAdvanced', { 'keywords': item['name'], 'paginationInput': { 'entriesPerPage': '25', 'pageNumber': '1' }, 'sortOrder': 'BestMatch' }) item_values = [] dictstr = response.reply.get('searchResult') if dictstr.get('_count') != '0': clearedArray = dictstr.get('item') for listing in clearedArray: list.append( item_values, listing.get('sellingStatus').get('currentPrice').get( 'value')) value_flag = True for value in item_values: if float(value) < float(item['sale_price']): value_flag = False if value_flag == True: print(i, item['name']) list.append(final_list, [ item['item_id'], item['name'], item['sale_price'], item['short_description'], item['long_description'], item['images'], item['stock'], item['upc'] ]) except ConnectionError as e: pass output_df = pd.DataFrame(final_list, columns=headers) output_df.to_csv('data/jewelry_crosschecked.csv', index=False)
def ebay_page_post(): if request.method == 'POST': #Get json format of the text sent by Ajax search = request.json['search'] try: #ebaysdk code starts here api = finding(appid='JohnOkek-hybridse-PRD-5c2330105-9bbb62f2', config_file=None) api_request = { 'keywords': search, 'outputSelector': 'SellerInfo', 'categoryId': '293' } response = api.execute('findItemsAdvanced', api_request) soup = BeautifulSoup(response.content, 'lxml') items = soup.find_all('item') # This will be returned items_found = [] if response.reply.ack == 'Success': for item in items: pic = item.pictureURLLarge title = item.title.string.strip() price = float(item.currentprice.string) url = item.viewitemurl.string.lower() seller = item.sellerusername.text listingtype = item.listingtype.string condition = item.conditiondisplayname.string naira = price * 365 print( '____________________________________________________________' ) # Adding the item found in the collection items_found.append( HTML_OUTPUT % (title, price, naira, condition, listingtype, url)) f = open("static/" + search + ".html", 'w+') for item in items_found: f.write("%s" % item) f.close() return "1" else: return "Search failed, please make sure you are searching for electronic products only." except ConnectionError as e: return jsonify(e)
def call_ebay_api(keywords=None): ''' Uses the ebaysdk library to call eBay's API ''' api = finding(appid=APP_ID, config_file=None) api_request = {'keywords': keywords} response = api.execute('findItemsByKeywords', api_request) soup = BeautifulSoup(response.content, 'lxml') items = soup.find_all('item') return items
def run2(opts): try: api = finding(debug=opts.debug, appid=opts.appid, config_file=opts.yaml) response = api.execute('findItemsByProduct', '<productId type="ReferenceID">53039031</productId><paginationInput><entriesPerPage>1</entriesPerPage></paginationInput>') dump(api) except ConnectionError as e: print(e) print(e.response.dict())
def getEbayProductData(): """ Gather all ebay product data from given store """ funId = "[ ebayProdData ]" if DEV_MODE: print(funId + runningIndicator + "Searching: " + ebayStoreName) total = 0 try: findingEbayApi = finding(siteid = ebaySiteId, appid = ebayAppId, config_file=None) findingEbayApi.execute('findItemsIneBayStores', {'storeName': ebayStoreName, 'IncludeSelector':['Title']}) item = findingEbayApi.response.dict() data = item["searchResult"]["item"] total = str(len(data)) if DEV_MODE: print(funId + message + Fore.GREEN + "Successfully found " + total + " products." + Style.RESET_ALL) except Exception as e: if DEV_MODE: print(str(e)) return if DEV_MODE: print(funId + runningIndicator + "Proccessing products...") products = [] count = 0 for product in data: if DEV_MODE: print(runningIndicator + str(count) + "/" + total) productBlock = {} productBlock["name"] = product['title'] productBlock["ebayLink"] = product['viewItemURL'] productBlock["data"] = getExtraData( productBlock["ebayLink"], product['itemId'] ) products.append(productBlock) if len(productBlock["data"]["description"]) > 1: desc = "desc found" else: desc = "desc not found" if DEV_MODE: print(" |____ Name: " + str(productBlock["name"]) + "\n |____ Price: " + str(productBlock["data"]["price"]) + "\n |____ Description: " + desc + "\n |____ Images: " + str(productBlock["data"]["images"]) ) if count >= limit: break count += 1 if DEV_MODE: print(Style.RESET_ALL + funId + message + "Finished Cleaning Data") return products
def get_one_item(opts): api = finding(debug=opts.debug, appid=opts.appid, config_file=opts.yaml, warnings=True) api_request = { 'keywords': u'GRAMMY Foundation®', } response = api.execute('findItemsAdvanced', api_request) return response.reply.searchResult.item[0].itemId
def get_keywords(YOURAPPID): Keywords = input('Enter your Keyword(s):\n') api = finding(domain='svcs.ebay.com', debug=False, appid=YOURAPPID, config_file=None) api_request = {'keywords': Keywords, 'outputSelector': 'SellerInfo'} response = api.execute('findItemsByKeywords', api_request) soup = BeautifulSoup(response.content, 'lxml') totalentries = int(soup.find('totalentries').text) items = soup.find_all('item') print() print() title = [] data = [] for item in items: item_info = [] name = item.title.string.lower().strip() title = title + name.split(" ") cat = item.categoryname.string.lower() cat_id = item.categoryid.string.lower() price = int(round(float(item.currentprice.string))) shippingcost = item.shippingservicecost ###Change it to int(float(shippingcost)) starttime = item.starttime.string endtime = item.endtime.string item_info.extend((name,cat,price,shippingcost,starttime,endtime)) data.append(item_info) df = pd.DataFrame(data, columns=['Title', 'Category', 'Price', 'ShippingCost', 'Starttime', 'Endtime']) df['Starttime'] = df['Starttime'].apply(pd.to_datetime).dt.normalize() df['Endtime'] = df['Endtime'].apply(pd.to_datetime).dt.normalize() print('-<start>--------------------------------------------------------------------') print("Keyword: " + Keywords) c = Counter(title) recommendation = c.most_common(10) print("Keyword Recommended: " + str(recommendation)) print("Number of Products: " + str(totalentries)) print('----------------------------------------------------------------------------') print("Shipping Cost Distribution: \nCost($) Top 100 Amounts") print(df['ShippingCost'].value_counts()) ###Align the Cost values by sorted print('----------------------------------------------------------------------------') df['Sales Day Length'] = (df['Endtime'] - df['Starttime']).dt.days print(df) print('----------------------------------------------------------------------------') return cat_id, df
def test_ebay_api_request_status_code(self): api = finding(appid='JohnHein-homepage-PRD-392e94856-07aba7fe', config_file=None, siteid='EBAY-US') keywords = Search(search="1986 Fleer Jordan PSA 10") api_request = { 'keywords': keywords, 'itemFilter': [ { 'name': 'SoldItemsOnly', 'value': True }, ] } response = api.execute('findCompletedItems', api_request) self.assertEqual(response.status_code, 200)
def findQuery(myargs): if '-id' in myargs: itemId = myargs['-id'] else: itemId = input("Input ItemID, no quotes: ") api = finding(siteid='EBAY-US', config_file="../sensitive_files/falcons070.yaml") execute(api, itemId, 'findItemsAdvanced') dictstr = api.response.dict() if dictstr['searchResult']['_count'] == '0': execute(api, itemId, 'findCompletedItems') else: for item in dictstr['searchResult']['item']: print json.dumps(item, indent=4)
def webScrapeEbay(): name = [] product = [] nameValues = e1.get() productValues = e2.get() name.append(nameValues) name1 = (name[0].split(',')) name1.sort() product.append(productValues) product1 = (product[0].split(',')) product1.sort() api = finding(appid='TejvirBa-l-PRD-239332c4a-8904990b', config_file=None) api_request = {'keywords': product1[0]} response = api.execute('findItemsByKeywords', api_request) soup = BeautifulSoup(response.content, 'lxml') totalentries = int(soup.find('totalentries').text) items = soup.find_all('item') print() print() for item in items: cat = item.categoryname.string.lower() title = item.title.string.lower() price = int(round(float(item.currentprice.string))) url = item.viewitemurl.string.lower() listingtype = item.listingtype.string.lower() condition = item.conditiondisplayname.string.lower() print(name[0], ', here are the details on a', product[0], 'from Ebay.com') print('\n') print('Product Category:\n' + cat + '\n') print('Product Title:\n' + title + '\n') print('Product Price:\n' + 'USD' + ' ' + str(price) + '\n') print('Product URL:\n' + url + '\n') print('Product Listing Type:\n' + listingtype + '\n') print('Product Condition:\n' + condition + '\n') print( '------------------------------------------------------------------' )
def on_data(self, tweet): tweetJson = json.loads(tweet) tweetText = tweetJson['text'] if '$' in tweetText and "Win a" not in tweetText and "Lightning Deal!" not in tweetText: # parse product description and price from tweet dealProduct = tweetText[:tweetText.rfind('$')-1] dealProduct = dealProduct.replace(",", " ") dealPrice = tweetText[tweetText.rfind('$')+1:tweetText.index(' ', tweetText.rfind('$'))] # search product in api f = finding() f.execute('findItemsAdvanced', {'keywords': dealProduct}) dom = f.response_dom() items = dom.getElementsByTagName('item') for item in items: bidCount = "" positiveFeedbackPercent = "" # pull in product description and price of search results from api marketProduct = item.getElementsByTagName('title')[0].firstChild.nodeValue marketProduct = marketProduct.replace(",", " ") marketPrice = item.getElementsByTagName('sellingStatus')[0].getElementsByTagName('currentPrice')[0].firstChild.nodeValue timeLeft = item.getElementsByTagName('sellingStatus')[0].getElementsByTagName('timeLeft')[0].firstChild.nodeValue if len(item.getElementsByTagName('sellingStatus')[0].getElementsByTagName('bidCount')) > 0: bidCount = item.getElementsByTagName('sellingStatus')[0].getElementsByTagName('bidCount')[0].firstChild.nodeValue if len(item.getElementsByTagName('sellerInfo')) > 0: positiveFeedbackPercent = item.getElementsByTagName('sellerInfo')[0].getElementsByTagName('positiveFeedbackPercent')[0].firstChild.nodeValue hoursLeft = 0.0 if "DT" in timeLeft and "H" in timeLeft and "M" in timeLeft: timeLeft = timeLeft[1:-1] days = timeLeft.split("DT")[0] hours = timeLeft.split("DT")[1].split("H")[0] minutes = timeLeft.split("DT")[1].split("H")[1].split("M")[0] #print days + " " + hours + " " + minutes hoursLeft = float(days) * 24 + float(hours) + float(minutes) / 60 # dump all matches with open('matches.csv','a') as matchCsv: print tweetText + "," + dealProduct + "," + str(dealPrice) + "," + str(marketProduct) + "," + str(marketPrice) + "," + str(hoursLeft) + "," + str(bidCount) + "," + str(positiveFeedbackPercent) + "\n" matchCsv.write(tweetText + "," + dealProduct + "," + str(dealPrice) + "," + str(marketProduct) + "," + str(marketPrice) + "," + str(hoursLeft) + "," + str(bidCount) + "," + str(positiveFeedbackPercent) + "\n") # dump potential trades with open('trades.csv','a') as tradeCsv: if float(marketPrice) > float(dealPrice): print "TRADE: " + tweetText + "," + dealProduct + "," + dealPrice + "," + marketProduct + "," + marketPrice + "," + str(hoursLeft) + "," + str(bidCount) + "," + str(positiveFeedbackPercent) + "\n" tradeCsv.write(tweetText + "," + dealProduct + "," + str(dealPrice) + "," + marketProduct + "," + str(marketPrice) + "," + str(hoursLeft) + "," + str(bidCount) + "," + str(positiveFeedbackPercent) + "\n") return True
def run_motors(opts): api = finding(siteid='EBAY-MOTOR', debug=opts.debug, appid=opts.appid, config_file=opts.yaml, warnings=True) api.execute('findItemsAdvanced', { 'keywords': 'tesla', }) if api.error(): raise Exception(api.error()) if api.response_content(): print("Call Success: %s in length" % len(api.response_content())) print("Response code: %s" % api.response_code()) print("Response DOM: %s" % api.response_dom()) dictstr = "%s" % api.response_dict() print("Response dictionary: %s..." % dictstr[:250])
def run_unicode(opts): try: api = finding(debug=opts.debug, appid=opts.appid, config_file=opts.yaml, warnings=True) api_request = { 'keywords': u'Kościół', } response = api.execute('findItemsAdvanced', api_request) for i in response.reply.searchResult.item: if i.title.find(u'ś') >= 0: print("Matched: %s" % i.title) break dump(api) except ConnectionError as e: print(e) print(e.response.dict())
def run(opts): try: api = finding(debug=opts.debug, appid=opts.appid, config_file=opts.yaml, warnings=True) for i in range(1,100): api_request = { 'categoryId': '4251', 'paginationInput': { 'pageNumber': i, 'entriesPerPage': 100} } f = open('WomensAccessoriesResponse' + str(i) + '.xml', 'w+') response = api.execute('findItemsByCategory', api_request) f.write(api.response.content) f.close() except ConnectionError as e: print(e) print(e.response.dict())
def run(opts): try: api = finding(siteid='EBAY-NLBE', debug=opts.debug, appid=opts.appid, config_file=opts.yaml, warnings=True) api.execute('findItemsAdvanced', { 'keywords': u'niño', 'itemFilter': [ {'name': 'Condition', 'value': 'Used'}, {'name': 'LocatedIn', 'value': 'GB'}, ], 'affiliate': {'trackingId': 1}, 'sortOrder': 'CountryDescending', }) dump(api) except ConnectionError as e: print(e)
def run(opts): timeout = gevent.Timeout(4) timeout.start() try: calls = [] for page in range(1, 10): api = finding(debug=opts.debug, appid=opts.appid, config_file=opts.yaml) call = gevent.spawn(api.execute, 'findItemsAdvanced', {'keywords': 'python', 'paginationInput': {'pageNumber': page}}) calls.append(call) gevent.joinall(calls) try: call_results = [c.get() for c in calls] toprated = 0 for resp in call_results: for item in resp.reply.searchResult.item: if item.topRatedListing == 'true': toprated += 1 print("Top Rated Listings: %s" % toprated) except ConnectionError as e: print("%s" % e) except gevent.timeout.Timeout as e: print("Calls reached timeout threshold: %s" % e) finally: timeout.cancel()
def on_data(self, tweet): tweetJson = json.loads(tweet) tweetText = tweetJson['text'] if '$' in tweetText and "Win a" not in tweetText and "Lightning Deal!" not in tweetText: # parse product description and price from tweet dealProduct = tweetText[:tweetText.rfind('$')-1] dealProduct = str(dealProduct).replace(",", " ") dealPrice = tweetText[tweetText.rfind('$')+1:tweetText.index(' ', tweetText.rfind('$'))] # search product in api f = finding() f.execute('findItemsAdvanced', {'keywords': dealProduct}) dom = f.response_dom() items = dom.getElementsByTagName('item') numListings = 0.0 marketPriceLow = 99999.99 marketPriceAvg = 0.0 shippingCost = 0.0 if "Free Shipping" not in tweetText: shippingCost = 8.0 for item in items: marketPrice = item.getElementsByTagName('sellingStatus')[0].getElementsByTagName('currentPrice')[0].firstChild.nodeValue if float(marketPrice) > (float(dealPrice) + shippingCost): numListings += 1.0 if float(marketPrice) < float(marketPriceLow): marketPriceLow = float(marketPrice) marketPriceAvg += float(marketPrice) dealRating = 0 if numListings > 0.0: marketPriceAvg = marketPriceAvg / numListings if float(marketPriceLow) > (float(dealPrice) + shippingCost) and marketPriceLow != 99999.99: if numListings >= 25: dealRating = 5 elif numListings >= 10: dealRating = 4 elif numListings >= 5: dealRating = 3 if float(marketPriceAvg) > (float(dealPrice) + shippingCost): if numListings >= 10: dealRating = 2 elif numListings >= 5: dealRating = 1 ts = time.time() st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S') # dump potential trades with open('dealRating.csv','a') as dealRatingDump: print "DealRating: " + str(dealRating) + "/5 stars : " + tweetText + "," + dealProduct + "," + dealPrice + "," + str(marketPriceAvg) + "," + str(marketPriceLow) + "," + str(int(numListings)) + "," + str(st) + "\n" dealRatingDump.write(tweetText + "," + dealProduct + "," + dealPrice + "," + str(marketPriceAvg) + "," + str(marketPriceLow) + "," + str(int(numListings)) + "," + str(st) + "," + str(dealRating) + "\n") dealRatingStars = "" if dealRating == 0: dealRatingStars = u'☆☆☆☆☆' elif dealRating == 1: dealRatingStars = u'★☆☆☆☆' elif dealRating == 2: dealRatingStars = u'★★☆☆☆' elif dealRating == 3: dealRatingStars = u'★★★☆☆' elif dealRating == 4: dealRatingStars = u'★★★★☆' elif dealRating == 5: dealRatingStars = u'★★★★★' pubTweet = Twython("1Oqu7GYy9RVUGgATxdsC6IDBH", "VjSUMkJaRJkadurzCyJ8mIl05izjdYz9nXzklgyRqfIou2WN1M", "4564439173-DyQWKd8smdv1mP70qoyGynICpPdkUTUeMCxh7qy", "3OfiZcD6RcaqvjDkiQBc50EzkQ9jr3FOTNJ8T3qUcFvfj") pubTweet.update_status(status=dealRatingStars + ": " + tweetText) return True
def getEbayItems(searchArgument): f = finding() f.execute("findItemsAdvanced", {"keywords": searchArgument}) dom = f.response_dom() items = dom.getElementsByTagName("item")[:20] return items
def ebay_find_wanted_items(): # No need to include our ID here; that gets grabbed from the YAML file. api = finding(siteid = 'EBAY-GB') # Get the search strings from our text file. # The amount we're willing to pay is: item.split(' ', 1)[0] # The string to search for is: item.split(' ', 1)[1] wanted_items = ebay_get_wanted_items() wanted_item_count = 0 # List to hold the lines of html we're going to write to a file. items_html_list = [] # Get the current time, for use in calculating elapsed time. time_start = time.time() # Query eBay for each wanted item. for item in wanted_items: if ebay_is_comment(item) or item.isspace(): continue item_price = item.split(' ', 1)[0] item_name = item.split(' ', 1)[1] wanted_item_count += 1 response = api.execute('findItemsAdvanced', { 'keywords': item_name, 'itemFilter': [ {'name': 'ListingType', 'value': 'Auction'}, {'name': 'LocatedIn', 'value': 'GB'}, {'name': 'MaxPrice', 'value': item_price }, ], 'sortOrder': 'EndTimeSoonest', }) # The results are returned as a dictionary. item_count = int(response.reply.searchResult._count) items_html_list.append(HTML_HEADER % (item_name, item_price)) items_html_list.append(TABLE_OPEN) for i in range(item_count): if item_count == 1: item = response.reply.searchResult.item[0] else: item = response.reply.searchResult.item[i] total_price = float(item.sellingStatus.currentPrice.value) free_postage = True if hasattr(item.shippingInfo, 'shippingServiceCost'): total_price += float(item.shippingInfo.shippingServiceCost.value) free_postage = False if total_price < float(item_price): date = isodate.parse_duration(item.sellingStatus.timeLeft) items_html_list.append(HTML_LINK % (locale.currency(total_price), "f" if free_postage else "", date, item.sellingStatus.bidCount, item.viewItemURL, item.title.encode('utf-8'))) items_html_list.append(TABLE_CLOSE) dt = datetime.datetime.now().strftime("%A, %d. %B %Y %H:%M") items_html_list.append(HTML_TIME % (wanted_item_count, time.time() - time_start, dt)) ebay_write_html(items_html_list)
import ebaysdk from ebaysdk.finding import Connection as finding f = finding() f.execute('findItemsAdvanced', {'keywords': 'Apple TV Media Player (3rd Generation)'}) dom = f.response_dom() mydict = f.response_dict() myobj = f.response_obj() print myobj.itemSearchURL # process the response via DOM items = dom.getElementsByTagName('item') for item in items: print item.getElementsByTagName('title')[0].firstChild.nodeValue print item.getElementsByTagName('sellingStatus')[0].getElementsByTagName('currentPrice')[0].firstChild.nodeValue
for search_tuple in search_return: if timing: print stamp()+"running searches" # unpack search tuple to get search text search_num, search_poller, search_text, search_category, number_of_results = search_tuple print stamp()+"running search \""+search_text+"\"" results = 0 while (results <= number_of_results): # encode search text as string search_text = search_text.encode('ascii','ignore') try: # establish connection using key obtained from database find = finding(appid=keydict['key']) if timing: print stamp()+"about to make api call" #execute the search once connected. er num_to_search = 100 if number_of_results < 100: num_to_search = number_of_results page = int(results/num_to_search) find.execute('findItemsAdvanced', {'keywords': search_text, 'paginationInput':{'entriesPerPage':num_to_search, 'pageNumber':(page+1)}, 'itemFilter':{'name':'ListingType','value':'AuctionWithBIN'}, 'itemFilter':{'name':'ListingType','value':'FixedPrice'}}) if timing: print stamp()+"finished making api call (executed)" except ConnectionError as e: print "got a Connection Error" raise e sys.exit(0)
def get_store_meta(store_name): try: api = finding(debug=opts.debug, appid=opts.appid, config_file=opts.yaml) response = api.execute('findItemsIneBayStores', { 'storeName': store_name, 'outputSelector': [ 'CategoryHistogram', 'AspectHistogram', 'SellerInfo', 'StoreInfo', ]} ) if response.reply.ack != 'Success': return {} if int(response.reply.paginationOutput.totalEntries) <= 0: return {} data = { 'followers': 0, 'item_count': response.reply.paginationOutput.totalEntries, 'seller_name': response.reply.searchResult.item[0].sellerInfo.sellerUserName, 'store_url': response.reply.searchResult.item[0].storeInfo.storeURL, 'feedback_score': response.reply.searchResult.item[0].sellerInfo.feedbackScore, 'positive_feedback_percent': response.reply.searchResult.item[0].sellerInfo.positiveFeedbackPercent, 'top_rated_seller': response.reply.searchResult.item[0].sellerInfo.topRatedSeller, 'country_code': response.reply.searchResult.item[0].country, } agg_data = { 'cat_asp': {}, 'watch_count': 0, 'L0': [], 'L1': [], } dominate_l0_cat_count = 0 dominate_l1_cat_count = 0 for lev0 in response.reply.categoryHistogramContainer.categoryHistogram: agg_data['L0'].append({ 'category_id': lev0.categoryId, 'category_name': lev0.categoryName, 'item_count': lev0.count }) if int(lev0.count) > dominate_l0_cat_count: dominate_l0_cat_count = int(lev0.count) agg_data['dominate_l0_category_id'] = lev0.categoryId agg_data['dominate_l0_category_name'] = lev0.categoryName for lev1 in lev0.childCategoryHistogram: agg_data['L1'].append({ 'category_id': lev1.categoryId, 'category_name': lev1.categoryName, 'item_count': lev1.count }) if int(lev1.count) > dominate_l1_cat_count: dominate_l1_cat_count = int(lev1.count) agg_data['dominate_l1_category_id'] = lev1.categoryId agg_data['dominate_l1_category_name'] = lev1.categoryName for category_node in agg_data['L1']: category_id = category_node['category_id'] category_call = api.execute('findItemsIneBayStores', { 'storeName': store_name, 'categoryId': category_id, 'outputSelector': [ 'CategoryHistogram', 'AspectHistogram', 'SellerInfo', 'StoreInfo', ]} ) if category_call.reply.ack != 'Success': return {} if int(category_call.reply.paginationOutput.totalEntries) <= 0: return {} analyze_items(category_call.reply.searchResult.item, category_id, agg_data) try: while True: category_call2 = api.next_page() analyze_items(category_call2.reply.searchResult.item, category_id, agg_data) except PaginationLimit as e: pass dom_l1_asp = average_asp( agg_data['cat_asp'][agg_data['dominate_l1_category_id']] ) for category_node in agg_data['L1']: asp = average_asp(agg_data['cat_asp'][category_node['category_id']]) category_node.update({'asp': asp}) data.update({ 'L0': agg_data['L0'], 'L1': agg_data['L1'], 'watch_count': agg_data['watch_count'], 'postal_code': agg_data.get('postal_code', None), 'dominate_category_id': agg_data['dominate_l0_category_id'], 'dominate_category_name': agg_data['dominate_l0_category_name'], 'dominate_l1_category_id': agg_data['dominate_l1_category_id'], 'dominate_l1_category_name': agg_data['dominate_l1_category_name'], 'dominate_l1_category_asp': dom_l1_asp, }) #from IPython import embed; #embed() return data except ConnectionError as e: print(e)
def run(opts): # # SubCategories of Women Accessories # category_ids = {'163573': 'belt-buckles', # '3003' : 'belts', # '177651' : 'collar-tips', # '168998' : 'fascinators-headpieces', # '105559' : 'gloves-mittens', # '45220' : 'hair-accessories', # '167906' : 'handkerchiefs', # '45230' : 'hats', # '169285' : 'id-document-holders', # '45237' : 'key-chains-rings-finders', # '15735' : 'organizers-day-planners', # '45238' : 'scarves-wraps', # '150955' : 'shoe-charms-jibbitz', # '179247' : 'sunglasses-fashion-eyewear', # '151486' : 'ties', # # '105569' : 'umbrellas', # '45258' : 'wallets', # '175634' : 'wigs-extensions-supplies', # # '106129' : 'wristbands', # '15738' : 'mixed-items-lots', # '1063' : 'other', # } # collectibles category # category_ids = {'1': 'general', # '34' : 'Advertising', # '1335' : 'Animals', # '13658' : 'Animation Art & Characters', # '66502' : 'Arcade, Jukeboxes & Pinball', # '14429' : 'Autographs', # '66503' : 'Banks, Registers & Vending', # '3265' : 'Barware', # '156277' : 'Beads', # '29797' : 'Bottles & Insulators', # '562' : 'Breweriana, Beer', # '898' : 'Casino', # '397' : 'Clocks', # '63' : 'Comics', # '3913' : 'Cultures & Ethnicities', # '13777' : 'Decorative Collectibles', # '137' : 'Disneyana', # '10860' : 'Fantasy, Mythical & Magic', # '13877' : 'Historical Memorabilia', # '907' : 'Holiday & Seasonal', # '13905' : 'Kitchen & Home', # '1401' : 'Knives, Swords & Blades', # '1404' : 'Lamps, Lighting', # '940' : 'Linens & Textiles (1930-Now)', # '1430' : 'Metalware', # '13956' : 'Militaria', # '124' : 'Paper', # '966' : 'Pens & Writing Instruments', # '14005' : 'Pez, Keychains, Promo Glasses', # '14277' : 'Photographic Images', # '39507' : 'Pinbacks, Bobbles, Lunchboxes', # '914' : 'Postcards', # '29832' : 'Radio, Phonograph, TV, Phone', # '1446' : 'Religion & Spirituality', # '3213' : 'Rocks, Fossils & Minerals', # '152' : 'Science Fiction & Horror', # '412' : 'Science & Medicine (1930-Now)', # '113' : 'Sewing (1930-Now)', # '165800' : 'Souvenirs & Travel Memorabilia', # '593' : 'Tobacciana', # '13849' : 'Tools, Hardware & Locks', # '868' : 'Trading Cards', # '417' : 'Transportation', # '597' : 'Vanity, Perfume & Shaving', # '69851' : 'Vintage, Retro, Mid-Century', # '45058' : 'Wholesale Lots', # } # category_ids = { # '137085' : 'Athletic Apparel', # '63862' : 'Coats & Jackets', # '63861' : 'Dresses', # '11524' : 'Hosiery & Socks', # '11514' : 'Intimates & Sleep', # '11554' : 'Jeans', # '3009' : 'Jumpsuits & Rompers', # '169001' : 'Leggings', # '172378' : 'Maternity', # '63863' : 'Pants', # '11555' : 'Shorts', # '63864' : 'Skirts', # '63865' : 'Suits & Blazers', # '63866' : 'Sweaters', # '155226' : 'Sweats & Hoodies', # '63867' : 'Swimwear', # '63869' : 'T-Shirts', # '53159' : 'Tops & Blouses', # '15775' : 'Vests', # '84275' : 'Mixed Items & Lots', # '314' : 'Other' # } # category_ids = { # '3034' : 'general', # '95672' : 'Athletic', # '53557' : 'Boots', # '45333' : 'Flats & Oxfords', # '55793' : 'Heels', # '53548' : 'Occupational', # '62107' : 'Sandals & Flip Flops', # '11632' : 'Slippers', # '63889' : 'Mixed Items & Lots', # } # category_ids = { # '220' : 'general', # '246' : 'Action Figures', # '49019' : 'Beanbag Plush', # '18991' : 'Building Toys', # '19016' : 'Classic Toys', # '222' : 'Diecast & Toy Vehicles', # '11731' : 'Educational', # '19071' : 'Electronic, Battery & Wind-Up', # '19077' : 'Fast Food & Cereal Premiums', # '233' : 'Games', # '771' : 'Marbles', # '479' : 'Model Railroads & Trains', # '1188' : 'Models & Kits', # '11743' : 'Outdoor Toys & Structures', # '19169' : 'Preschool Toys & Pretend Play', # '2613' : 'Puzzles', # '2562' : 'Radio Control & Control Line', # '19192' : 'Robots, Monsters & Space Toys', # '2616' : 'Slot Cars', # '436' : 'Stuffed Animals', # '2631' : 'Toy Soldiers', # '2536' : 'Trading Card Games', # '2624' : 'TV, Movie & Character Toys', # '717' : 'Vintage & Antique Toys', # '40149' : 'Wholesale Lots', # } # general categories for CSA - toys category_ids = { '1063' : 'womens accessories', '220' : 'toys', '3034' : 'womens shoes', '314' : 'womens clothing' '1' : 'collectibles', } for category_id, category_name in category_ids.items(): directory = '/var/www/html/ebay-data/toys-and-hobbies/' + category_name for page in range(1, 101): if not os.path.exists(directory): os.makedirs(directory) if not os.path.exists(os.path.join(directory, str(page))): try: api = finding(debug=opts.debug, appid=opts.appid, config_file=opts.yaml, warnings=True) api_request = { 'categoryId': category_id, 'paginationInput': {'pageNumber': page, 'entriesPerPage': 100} } response = api.execute('findItemsByCategory', api_request) nodes = response.dom().xpath('//itemId') item_ids = [n.text for n in nodes] print category_id, category_name if len(item_ids) > 0: shop = Shopping(debug=opts.debug, appid=opts.appid, config_file=opts.yaml, warnings=False) prim_resp = shop.execute('GetMultipleItems', {'IncludeSelector': 'ItemSpecifics', 'ItemID': item_ids[0:20]}) for j in range(20, len(item_ids), 20): sub_resp = shop.execute('GetMultipleItems', {'IncludeSelector': 'ItemSpecifics', 'ItemID': item_ids[j:j+20]}) prim_resp.dom().extend(sub_resp.dom().xpath('//Item')) xml_file = open(os.path.join(directory, str(page)), 'w+') stylesheet_tag = '<?xml-stylesheet type="text/xsl" href="/ebay-data/xslItemSpecifics.xsl"?>\n' xml_file.write(stylesheet_tag) xml_file.write(lxml.etree.tostring(prim_resp.dom(), pretty_print=True)) xml_file.close() except ConnectionError as e: print(e) print(e.response.dict())
__author__ = 'ps13150' import datetime from ebaysdk.exception import ConnectionError from ebaysdk.finding import Connection as finding from ebaysdk.soa.finditem import Connection as FindItem from ebaysdk.shopping import Connection as Shopping from ebaysdk.utils import getNodeText #from ebaysdk.exception import ConnectionError try: api = finding(appid='Prashant-PricePre-PRD-42f839365-6140ea4e', config_file=None) api_find = FindItem(debug=True, appid='Prashant-PricePre-PRD-42f839365-6140ea4e', config_file=None) for i in range(1,5,1): response = api.execute('findCompletedItems', {'keywords': 'Laptops Netbooks', 'paginationInput':{ 'pageNumber':str(i), 'entriesPerPage':"100" } }) #xml = '<keywords>"Laptops Netbooks"</keywords><paginationInput><entriesPerPage>100</entriesPerPage><pageNumber>'+'2'+'</pageNumber></paginationInput>' #response = api.execute('findItemsAdvanced',xml) assert(response.reply.ack == 'Success') assert(type(response.reply.timestamp) == datetime.datetime) assert(type(response.reply.searchResult.item) == list) item = response.reply.searchResult.item[0]