def parse_item_page(self, response): item_list = [] hxs = HtmlXPathSelector(response) item = response.meta['item'] # 6PM # node_collection = hxs.select("//h1[@class='title']/a|//div[" # "@class='description']/ul/li/span|//div[" # "@class='description']/ul/li/a|//div[@class='description']/ul/li") # Walmart node_collection = hxs.select( "//h1[@itemprop='name']/div|//div[" "@class='product-description-disclaimer']|//div[@class='about-desc']" ) if node_collection is not None and len(node_collection) > 0: indx = 0 for node in node_collection: value = ''.join(node.xpath("text()").extract()) if len(value) > 0: indx += 1 value = str(value.strip().encode('utf-8')) value = StringUtil.str_utf_encode(value) item_list.append(value) item['keywords'] = StringUtil.remove_html_tags(str( ' '.join(item_list))) return item
def __getDescription(self): item_list = [] node_collection = self.getElementValues("//h1[@class='product-name']|//div[@itemprop='description']/*") if node_collection is not None and len(node_collection) > 0: indx = 0 for node in node_collection: value = ''.join(node.xpath("text()").extract()) if len(value) > 0: indx += 1 value = str(value.strip().encode('utf-8')) value = StringUtil.str_utf_encode(value) item_list.append(value) return StringUtil.remove_html_tags(str(' '.join(item_list)))
def __getDescription(self): item_list = [] node_collection = self.getElementValues("//span[@id='ctl00_ContentPlaceHolder1_ucTemplate_aBrand']|//span[" "@class='productname']|//dl[@id='overview']/dd/p") if node_collection is not None and len(node_collection) > 0: indx = 0 for node in node_collection: value = ''.join(node.xpath("text()").extract()) if len(value) > 0: indx += 1 value = str(value.strip().encode('utf-8')) value = StringUtil.str_utf_encode(value) item_list.append(value) return StringUtil.remove_html_tags(str(' '.join(item_list)))
def __getDescription(self): item_list = [] node_collection = self.getElementValues("//div[@id='names']/span/a|//div[@id='names']/h1|//div[" "@id='info']/div/p") if node_collection is not None and len(node_collection) > 0: indx = 0 for node in node_collection: value = ''.join(node.xpath("text()").extract()) if len(value) > 0: indx += 1 value = str(value.strip().encode('utf-8')) value = StringUtil.str_utf_encode(value) item_list.append(value) return StringUtil.remove_html_tags(str(' '.join(item_list)))
def __getDescription(self): item_list = [] node_collection = self.getElementValues("//span[@id='productTitle']|//div[" "@id='fbExpandableSectionContent']/ul/li/span|//div[" "@id='feature-bullets']/ul/li/span") if node_collection is not None and len(node_collection) > 0: indx = 0 for node in node_collection: value = ''.join(node.xpath("text()").extract()) if len(value) > 0: indx += 1 value = str(value.strip().encode('utf-8')) value = StringUtil.str_utf_encode(value) item_list.append(value) return StringUtil.remove_html_tags(str(' '.join(item_list)))
def __getDescription(self): item_list = [] node_collection = self.getElementValues( "//div[@id='buy-block']/div/h1|//div[" "@class='product-details-description clearfix']/div|//div[" "@class='product-details-description clearfix']/ul/li") if node_collection is not None and len(node_collection) > 0: indx = 0 for node in node_collection: value = ''.join(node.xpath("text()").extract()) if len(value) > 0: indx += 1 value = str(value.strip().encode('utf-8')) value = StringUtil.str_utf_encode(value) item_list.append(value) return StringUtil.remove_html_tags(str(' '.join(item_list)))
def __getTitleInCategoryLevel(self): item_list = [] node_collection = self.getElementValues( "//div[@class='product-v2-name']/h1") if node_collection is not None and len(node_collection) > 0: indx = 0 for node in node_collection: value = ''.join(node.xpath("text()").extract()) if len(value) > 0: indx += 1 value = str(value.strip().encode('utf-8')) value = StringUtil.str_utf_encode(value) item_list.append( self.listToJson(['title', 'index'], [value, indx])) return item_list
def __getDescription(self): item_list = [] node_collection = self.getElementValues( "//h1[@itemprop='name']|//div[@class='extended-product-details " "hide-when-immersive']/div/div|//div[@class='extended-product-details hide-when-immersive']/div/div/span|//div[@class='extended-product-details hide-when-immersive']/div/div|//div[@itemprop='description']/p|//div[@class='product-details-and-care module-details']/ul/li" ) if node_collection is not None and len(node_collection) > 0: indx = 0 for node in node_collection: value = ''.join(node.xpath("text()").extract()) if len(value) > 0: indx += 1 value = str(value.strip().encode('utf-8')) value = StringUtil.str_utf_encode(value) item_list.append(value) return StringUtil.remove_html_tags(str(' '.join(item_list)))
def __getDescription(self): item_list = [] node_collection = self.getElementValues("//div[@class='exp-product-header']/h1|//div[" "@class='exp-product-header']/h2|//div[" "@class='pi-pdpmainbody']/p/b|//div[@class='pi-pdpmainbody']/p|//div[" "@class='pi-pdpmainbody']/li") if node_collection is not None and len(node_collection) > 0: indx = 0 for node in node_collection: value = ''.join(node.xpath("text()").extract()) if len(value) > 0: indx += 1 value = str(value.strip().encode('utf-8')) value = StringUtil.str_utf_encode(value) item_list.append(value) return StringUtil.remove_html_tags(str(' '.join(item_list)))
def __getTitleInCategoryLevel(self): item_list = [] node_collection = self.getElementValues("//div[@class='a-row a-spacing-micro']/a/h2|//div[@class='a-row " "a-spacing-top-mini']/a/h2|//div[@class='a-row " "a-spacing-mini']/a/h2|//div[@class='a-row " "a-spacing-none']/a/h2|//span[@id='productTitle']|//ol[" "@class='class=a-carousel']/li/div/a/span") if node_collection is not None and len(node_collection) > 0: indx = 0 for node in node_collection: value = ''.join(node.xpath("@data-attribute").extract()) if len(value) > 0: indx += 1 value = str(value.strip().encode('utf-8')) value = StringUtil.str_utf_encode(value) item_list.append(self.listToJson(['title', 'index'], [value, indx])) return item_list
def parse_item_page(self, response): item_list = [] hxs = Selector(response) item = response.meta['item'] # 6PM node_collection = hxs.xpath( "//h1[@class='title']/a|//div[" "@class='description']/ul/li/span|//div[" "@class='description']/ul/li/a|//div[@class='description']/ul/li") # Walmart # node_collection = hxs.xpath("//h1[@itemprop='name']/div|//div[" # "@class='product-description-disclaimer']|//div[@class='about-desc']") # Amazon # node_collection = hxs.xpath("//span[@id='productTitle']|//div[" # "@id='fbExpandableSectionContent']/ul/li/span|//div[" # "@id='feature-bullets']/ul/li/span") if node_collection is not None and len(node_collection) > 0: indx = 0 for node in node_collection: value = ''.join(node.xpath("text()").extract()) if len(value) > 0: indx += 1 value = str(value.strip().encode('utf-8')) value = StringUtil.str_utf_encode(value) item_list.append(value) item['keywords'] = StringUtil.remove_html_tags(str( ' '.join(item_list))) # Create a CSV file for training data with open('train_data.csv', 'ab') as csvfile: trainwriter = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_ALL) trainwriter.writerow( [item['category'], str(item['keywords']).lower()]) return item