def parse_item_page(self, response): item_list = [] hxs = HtmlXPathSelector(response) item = response.meta['item'] # 6PM # node_collection = hxs.select("//h1[@class='title']/a|//div[" # "@class='description']/ul/li/span|//div[" # "@class='description']/ul/li/a|//div[@class='description']/ul/li") # Walmart node_collection = hxs.select( "//h1[@itemprop='name']/div|//div[" "@class='product-description-disclaimer']|//div[@class='about-desc']" ) if node_collection is not None and len(node_collection) > 0: indx = 0 for node in node_collection: value = ''.join(node.xpath("text()").extract()) if len(value) > 0: indx += 1 value = str(value.strip().encode('utf-8')) value = StringUtil.str_utf_encode(value) item_list.append(value) item['keywords'] = StringUtil.remove_html_tags(str( ' '.join(item_list))) return item
def getProductDimension(self, instance_of, instance_val): if self.htmlObject is None: return '' if instance_val is None: return '' content = None dimension = '' if instance_of == "DOCUMENT" and len(instance_val) > 0: content = self.getElementValue(instance_val) elif instance_of == "STRING" and len(instance_val) > 0: content = instance_val content = StringUtil.remove_html_tags(content) content = StringUtil.str_cleaner(content, r'\\([a-z0-9]{3})', '') content = StringUtil.str_cleaner(content, r'[^0-9a-zA-Z\s\-\(\).,"\'&]+', '') if content is not None and type( content) is not None and len(content) > 0: # for pat in self.dimensionPatternsToRemove: # content = StringUtil.str_cleaner(content, pat, "") for sptf in self.dimensionPatterns: if StringUtil.str_find_str(str(content), sptf): dimension = StringUtil.str_search_str(str(content), sptf) break if dimension and len(dimension) == 0 or len(dimension) > 35: dimension = self.getProductWeight(content) return dimension
def __getDescription(self): item_list = [] node_collection = self.getElementValues("//h1[@class='product-name']|//div[@itemprop='description']/*") if node_collection is not None and len(node_collection) > 0: indx = 0 for node in node_collection: value = ''.join(node.xpath("text()").extract()) if len(value) > 0: indx += 1 value = str(value.strip().encode('utf-8')) value = StringUtil.str_utf_encode(value) item_list.append(value) return StringUtil.remove_html_tags(str(' '.join(item_list)))
def __getDescription(self): item_list = [] node_collection = self.getElementValues("//span[@id='ctl00_ContentPlaceHolder1_ucTemplate_aBrand']|//span[" "@class='productname']|//dl[@id='overview']/dd/p") if node_collection is not None and len(node_collection) > 0: indx = 0 for node in node_collection: value = ''.join(node.xpath("text()").extract()) if len(value) > 0: indx += 1 value = str(value.strip().encode('utf-8')) value = StringUtil.str_utf_encode(value) item_list.append(value) return StringUtil.remove_html_tags(str(' '.join(item_list)))
def __getDescription(self): item_list = [] node_collection = self.getElementValues("//div[@id='names']/span/a|//div[@id='names']/h1|//div[" "@id='info']/div/p") if node_collection is not None and len(node_collection) > 0: indx = 0 for node in node_collection: value = ''.join(node.xpath("text()").extract()) if len(value) > 0: indx += 1 value = str(value.strip().encode('utf-8')) value = StringUtil.str_utf_encode(value) item_list.append(value) return StringUtil.remove_html_tags(str(' '.join(item_list)))
def __getDescription(self): item_list = [] node_collection = self.getElementValues("//span[@id='productTitle']|//div[" "@id='fbExpandableSectionContent']/ul/li/span|//div[" "@id='feature-bullets']/ul/li/span") if node_collection is not None and len(node_collection) > 0: indx = 0 for node in node_collection: value = ''.join(node.xpath("text()").extract()) if len(value) > 0: indx += 1 value = str(value.strip().encode('utf-8')) value = StringUtil.str_utf_encode(value) item_list.append(value) return StringUtil.remove_html_tags(str(' '.join(item_list)))
def __getDescription(self): item_list = [] node_collection = self.getElementValues( "//div[@id='buy-block']/div/h1|//div[" "@class='product-details-description clearfix']/div|//div[" "@class='product-details-description clearfix']/ul/li") if node_collection is not None and len(node_collection) > 0: indx = 0 for node in node_collection: value = ''.join(node.xpath("text()").extract()) if len(value) > 0: indx += 1 value = str(value.strip().encode('utf-8')) value = StringUtil.str_utf_encode(value) item_list.append(value) return StringUtil.remove_html_tags(str(' '.join(item_list)))
def __getDescription(self): item_list = [] node_collection = self.getElementValues( "//h1[@itemprop='name']|//div[@class='extended-product-details " "hide-when-immersive']/div/div|//div[@class='extended-product-details hide-when-immersive']/div/div/span|//div[@class='extended-product-details hide-when-immersive']/div/div|//div[@itemprop='description']/p|//div[@class='product-details-and-care module-details']/ul/li" ) if node_collection is not None and len(node_collection) > 0: indx = 0 for node in node_collection: value = ''.join(node.xpath("text()").extract()) if len(value) > 0: indx += 1 value = str(value.strip().encode('utf-8')) value = StringUtil.str_utf_encode(value) item_list.append(value) return StringUtil.remove_html_tags(str(' '.join(item_list)))
def __getDescription(self): item_list = [] node_collection = self.getElementValues("//div[@class='exp-product-header']/h1|//div[" "@class='exp-product-header']/h2|//div[" "@class='pi-pdpmainbody']/p/b|//div[@class='pi-pdpmainbody']/p|//div[" "@class='pi-pdpmainbody']/li") if node_collection is not None and len(node_collection) > 0: indx = 0 for node in node_collection: value = ''.join(node.xpath("text()").extract()) if len(value) > 0: indx += 1 value = str(value.strip().encode('utf-8')) value = StringUtil.str_utf_encode(value) item_list.append(value) return StringUtil.remove_html_tags(str(' '.join(item_list)))
def parse_item_page(self, response): item_list = [] hxs = Selector(response) item = response.meta['item'] # 6PM node_collection = hxs.xpath( "//h1[@class='title']/a|//div[" "@class='description']/ul/li/span|//div[" "@class='description']/ul/li/a|//div[@class='description']/ul/li") # Walmart # node_collection = hxs.xpath("//h1[@itemprop='name']/div|//div[" # "@class='product-description-disclaimer']|//div[@class='about-desc']") # Amazon # node_collection = hxs.xpath("//span[@id='productTitle']|//div[" # "@id='fbExpandableSectionContent']/ul/li/span|//div[" # "@id='feature-bullets']/ul/li/span") if node_collection is not None and len(node_collection) > 0: indx = 0 for node in node_collection: value = ''.join(node.xpath("text()").extract()) if len(value) > 0: indx += 1 value = str(value.strip().encode('utf-8')) value = StringUtil.str_utf_encode(value) item_list.append(value) item['keywords'] = StringUtil.remove_html_tags(str( ' '.join(item_list))) # Create a CSV file for training data with open('train_data.csv', 'ab') as csvfile: trainwriter = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_ALL) trainwriter.writerow( [item['category'], str(item['keywords']).lower()]) return item