def parse_page_content(self, response):
        xxs = XmlXPathSelector(response)
        
        page_text = xxs.select('/api/query/pages/page/revisions/rev/text()').extract()
        if page_text:
            url = xxs.select('/api/query/pages/page/@fullurl').extract()
            if url:
                url = url[0]
            else:
                url = None

            page_text = page_text[0]
            for md_full in RE_INFOBOX_PAINTING.finditer(page_text):
                infobox = md_full.groups()[0]
                md = RE_IB_LOCATION.search(infobox)
                if md:
                    location = clean_wiki_string(md.groups()[0])

                    artist = ''
                    md_artist = RE_IB_ARTIST.search(infobox)
                    if md_artist:
                        artist = clean_wiki_string(md_artist.groups()[0])

                        name = ''
                        md_name = RE_IB_NAME.search(infobox)
                        if md_name:
                            name = clean_wiki_string(md_name.groups()[0])

                            if location and artist and name:
                                yield ArtInfo(name=name, artist=artist, location=location, url=url)
Exemple #2
0
 def parse(self, response):
     if(self.value('link_extractor') != None):
         xxs = XmlXPathSelector(response)
         links = xxs.select(self.value("link_extractor")).extract()
         return [Request(x, callback=self.parse_item) for x in links]
     else:
         return super(CommonSpider, self).parse(response)
    def parse(self, response):
        # Create xml selector & get its contents as a string for regex parsing
        xxs = XmlXPathSelector(response)
        data = str(xxs.select('/courseinfo').extract())
        
        # Create course item
        item = CourseItem()

        # Get course number from url
        number_regex = re.compile('(..-...)')
        number_match = number_regex.search(response.url)
        if (number_match != None):
            number_match.group()
            item['number'] = number_match.group(1)
        
        # Construct regular expression for prerequisite decoding
        prereq_regex = re.compile('Prerequisite(?:s)?:(.*)(\.')
        
        match = prereq_regex.search(data)
        if (match == None):
            print item
            return
        else:
            match.group()
            print match.group(1)
            item['prereqs'] = match.group(1)
    
        print item
    def parse(self, response):
        xxs = XmlXPathSelector(response)

        for product in xxs.select('//product'):
            category = product.select('./Category/text()').extract()
            loader = ProductLoader(item=Product(), selector=product)
            loader.add_xpath('identifier', './product-id/text()')
            loader.add_xpath('sku', './product-id/text()')
            loader.add_xpath('url', './product-url/text()')
            loader.add_xpath('name', './product-name/text()')
            loader.add_xpath('brand', './brand/text()')
            loader.add_value(
                'price',
                extract_price_eu(' '.join(
                    product.select('./price/text()').extract())))
            if category:
                loader.add_value('category',
                                 category[0].split('/')[-1].strip())
            loader.add_xpath('image_url', './image-url/text()')
            loader.add_xpath('stock', './stock/text()')
            if loader.get_output_value('price') > 499:
                loader.add_value('shipping_cost', '0')
            else:
                loader.add_value('shipping_cost', '25')
            yield loader.load_item()
Exemple #5
0
	def parseSubGenre(self, response):
		x = XmlXPathSelector(response)
		x.register_namespace("kb", "http://www.kerbango.com/xml")
		metaData = response.meta['metaData']
		stations = x.select('//kb:results/kb:station_record') #   was limited to less 5 for now!!!

		for station in stations:
			metaData['channelPlaylist'] = [station.select('./kb:station_url_record/kb:url/text()').extract()[0].rstrip('/ \r\n')]
			metaData['channelName'] = station.select('./kb:station/text()').extract()	
			metaData['channelDescription'] = station.select('./kb:description/text()').extract()	
			metaData['streamId'] = station.select('./kb:esid/text()').extract()	
			metaData['streamBandwidth'] = station.select('./kb:station_url_record/kb:bandwidth_kbps/text()').extract()	
			metaData['streamData'] = station.select('./kb:station_url_record/kb:status_code/text()').extract()	
			metaData['channelGenreIds'] = metaData['genreId']
			metaData['channelGenres'] = metaData['genreName']
			metaData['channelCategory'] = metaData['genreName']
			
			
			self.log('parseSubGenre %s %s' % (metaData['genreName'], metaData['channelName'] ), level=log.INFO)
			channelName = metaData['channelName'][0]
			channelName = re.sub(r'Low$|High$', '', channelName).strip() #cope with bbc names that include bitratethy in name
			tuneInSearchUrl = 'http://tunein.com/search/suggest/?query='+ channelName
			#assume all is well and the supplied url is indeed a playlist!
			
			request = Request(tuneInSearchUrl,
				meta = {'metaData': copy.deepcopy(metaData)},
				callback=self.parseTuneInSearch,
				errback=lambda x:self.parsePlaylist(x,copy.deepcopy(metaData)) )

			yield request
Exemple #6
0
    def parse(self, response):

        xxs = XmlXPathSelector(response)
        links = xxs.select(
            "//item/*[local-name()='origLink']/text()").extract()

        return [Request(x, callback=self.parse_item) for x in links]
Exemple #7
0
 def parse(self, response):
     xxs = XmlXPathSelector(response)
     routetitle = xxs.select('//predictions/@routeTitle').extract()[0]
     stoptag = xxs.select('//predictions/@stopTag').extract()[0]
     predictions = xxs.select('//prediction')
     items = []
     for prediction in predictions:
         item = EtaScraperItem()
         item['seconds'] = prediction.select('@seconds').extract()[0]
         item['minutes'] = prediction.select('@minutes').extract()[0]
         item['is_departure'] = prediction.select("@isDeparture").extract()[0]
         item['dir_tag'] = prediction.select('@dirTag').extract()[0]
         item['trip_tag'] = prediction.select('@tripTag').extract()[0]
         item['vehicle_id'] = prediction.select('@vehicle').extract()[0]
         abl = prediction.select("@affectedByLayover").extract()
         if len(abl) > 0:
             item['affected_by_layover'] = abl[0]
         else:
             item['affected_by_layover'] = 'false'
         item['routename'] = routetitle
         item['stoptag'] = stoptag
         item['created'] = time()
         item['thisdate'] = datetime.now().date()
         direction = item['dir_tag']
         if direction.find(DIRECTION_OPTS[0][0]) == -1 and direction.find(DIRECTION_OPTS[1][0]) == -1:
             direc = DIRECTION_OPTS[2][1]
         elif direction.find(DIRECTION_OPTS[0][0]) != -1:
             direc = DIRECTION_OPTS[0][1]
         else:
             direc = DIRECTION_OPTS[1][1]
         item['dir_tag'] = direc
         items.append(item)
     return items
Exemple #8
0
 def handle_bug_xml(self, response):
     logging.info("STARTING XML")
     hxs = XmlXPathSelector(response)
     item = hxs.select('//item')
     try:
         parsed = bugimporters.items.ParsedBug({
             'title': item.select('title/text()').extract()[0],
             'description': item.select('description/text()').extract()[0] ,
             'status':  item.select('status/text()').extract()[0],
             'people_involved': 0, #TODO
             'date_reported': self.format_date(item.select('created/text()').extract()[0]),
             'last_touched': self.format_date(item.select('updated/text()').extract()[0]),
             'submitter_username': item.select('reporter/@username').extract()[0],
             'submitter_realname': item.select('reporter/text()').extract()[0],
             'canonical_bug_link': item.select('link/text()').extract()[0],
             'looks_closed': (item.select('status/text()').extract()[0] == 'Closed'),
             'last_polled': datetime.now(),
             # TODO tracker ids
             #'_project_name': self.tm.tracker_name,
             #'_tracker_name': self.tm.tracker_name,
         })
         yield parsed
     except IndexError as e:
         logging.exception(e)
         logging.debug("AHHHHHHHHHHHHHHHHHHHHHH!!!!!!!!!!!!!: {0}".format(item.select('title/text()').extract()[0]))
	def parse(self, response):
		x = XmlXPathSelector(response)
		#x.register_namespace("xsi", "http://www.w3.org/2001/XMLSchema-instance")
		
		#programs = x.select('./body/outline[position()=4]/outline[position()<4]')
		programs = x.select('//body/outline/outline')
		podcastCount = str(len(programs))
		i=0
		allitems=[]
		for program in programs:
			i=i+1
			l = XPathItemLoader(PodcastItem(), selector=program)
			l.add_xpath('id', 'concat("dpc_", ./@xmlUrl)')
			l.add_value('audioType', 'disco')
			l.add_xpath('brandId', './@xmlUrl')
			l.add_xpath('brandFeed', './@xmlUrl')
			l.add_xpath('brandName', './@title')
			l.add_xpath('brandDescription', './@description')
			l.add_xpath('brandHomepage', './@htmlUrl')
			
			self.log('Discovering dpc [%s of %s] feeds' % (i, podcastCount), level=log.INFO)
		
		
			item = l.load_item()
			yield item
    def parse(self, response):
        # inspect_response(response, self)
        # return
        # hxs = HtmlXPathSelector(response)
        # file_path = "d:/work/GoogleFeed.xml"
        # f = open(file_path)
        # xxs = XmlXPathSelector(text=f.read())
        xxs = XmlXPathSelector(response)
        for sel in xxs.select('//channel/item'):  # ##
            loader = ProductLoader(item=Product(), response=response)
            tmp = sel.select('link/text()').extract()
            if tmp:
                loader.add_value('url', tmp[0])
            # ID
            tmp = sel.select('*[name()="g:id"]/text()').extract()
            if tmp:
                loader.add_value('identifier', tmp[0])
            # Sku
            tmp = sel.select('*[name()="g:id"]/text()').extract()
            if tmp:
                loader.add_value('sku', tmp[0])
            # Name
            tmp = sel.select('title/text()').extract()
            if tmp:
                loader.add_value('name', tmp[0])
            # price
            tmp = sel.select('*[name()="g:sale_price"]/text()').extract()
            if not tmp:
                tmp = sel.select('*[name()="g:price"]/text()').extract()
            if tmp:
                price = round(extract_price(tmp[0]) / Decimal('1.20'), 2)
                loader.add_value('price', price)
            # image_url
            tmp = sel.select('*[name()="g:image_link"]/text()').extract()
            if tmp:
                loader.add_value('image_url', tmp[0])
            # Brand
            tmp = sel.select('*[name()="g:brand"]/text()').extract()
            if tmp and tmp[0] != 'Alliance':
                loader.add_value('brand', tmp[0])
            # category
            tmp = sel.select('*[name()="g:product_type"]/text()').extract()
            if tmp:
                try:
                    loader.add_value('category', tmp[0].split('>')[1].strip())
                except:
                    loader.add_value('category', tmp[0].strip())
            # shipping_cost
            price = loader.load_item()['price']
            if price and price < 50.00:
                loader.add_value('shipping_cost', 5.90)
            # stock
            tmp = sel.select('*[name()="g:availability"]/text()').extract()
            if tmp and tmp[0] == 'in stock':
                loader.add_value('stock', 1)
            else:
                loader.add_value('stock', 0)

            yield loader.load_item()
Exemple #11
0
    def test_null_bytes(self):
        hxs = HtmlXPathSelector(text='<root>la\x00la</root>')
        self.assertEqual(hxs.extract(),
                         u'<html><body><root>lala</root></body></html>')

        xxs = XmlXPathSelector(text='<root>la\x00la</root>')
        self.assertEqual(xxs.extract(),
                         u'<root>lala</root>')
Exemple #12
0
	def parsePage(self, response):
		x = XmlXPathSelector(response)
		items = []
		feeds = x.select('//lst[@name="grouped"]/lst[@name="brandFeed"]/arr[@name="groups"]/lst')
		
		for feed in feeds:
			metaData={}
			metaData['brandAvgDuration'] = feed.select('./result/doc/str[@name="brandAvgDuration"]/text()').extract()[:1]
			metaData['brandCurrentItem'] = feed.select('./result/doc/str[@name="brandCurrentItem"]/text()').extract()[:1]
			metaData['brandDescription'] = feed.select('./result/doc/str[@name="brandDescription"]/text()').extract()[:1]
			metaData['brandFeed'] = feed.select('./result/doc/str[@name="brandFeed"]/text()').extract()[:1]
			metaData['brandFrequency'] = feed.select('./result/doc/str[@name="brandFrequency"]/text()').extract()[:1]
			metaData['brandHomepage'] = feed.select('./result/doc/str[@name="brandHomepage"]/text()').extract()[:1]
			metaData['brandId'] = feed.select('./result/doc/str[@name="brandId"]/text()').extract()[:1]
			metaData['brandIds'] = feed.select('./result/doc/arr[@name="brandIds"]/text()').extract()
			metaData['brandImage'] = feed.select('./result/doc/str[@name="brandImage"]/text()').extract()[:1]
			metaData['brandName'] = feed.select('./result/doc/str[@name="brandName"]/text()').extract()[:1]
			metaData['brandShortName'] = feed.select('./result/doc/str[@name="brandShortName"]/text()').extract()[:1]
			metaData['brandTimes'] = feed.select('./result/doc/str[@name="brandTimes"]/text()').extract()
			metaData['brandRegions'] = feed.select('./result/doc/arr[@name="brandRegions"]/text()').extract()
			metaData['channelHomepage'] = feed.select('./result/doc/str[@name="channelHomepage"]/text()').extract()[:1]
			metaData['channelId'] = feed.select('./result/doc/str[@name="channelId"]/text()').extract()[:1]
			metaData['channelName'] = feed.select('./result/doc/str[@name="channelName"]/text()').extract()[:1]
			metaData['itunesArtistId'] = feed.select('./result/doc/str[@name="itunesArtistId"]/text()').extract()[:1]
			metaData['itunesPopular'] = feed.select('./result/doc/int[@name="itunesPopular"]/text()').extract()[:1]
			metaData['itunesPopularInGenre'] = feed.select('./result/doc/int[@name="itunesPopularInGenre"]/text()').extract()[:1]
			metaData['itunesSimilar'] = feed.select('./result/doc/str[@name="itunesSimilar"]/text()').extract()[:1]
			metaData['itunesRelated'] = feed.select('./result/doc/str[@name="itunesRelated"]/text()').extract()[:1]
			metaData['itunesTrackId'] = feed.select('./result/doc/str[@name="itunesTrackId"]/text()').extract()[:1]
			metaData['ownerHomepage'] = feed.select('./result/doc/str[@name="ownerHomepage"]/text()').extract()[:1]
			metaData['ownerId'] = feed.select('./result/doc/str[@name="ownerId"]/text()').extract()[:1]
			metaData['ownerImage'] = feed.select('./result/doc/str[@name="ownerImage"]/text()').extract()[:1]
			metaData['ownerKey'] = feed.select('./result/doc/str[@name="ownerKey"]/text()').extract()[:1]
			metaData['ownerName'] = feed.select('./result/doc/str[@name="ownerName"]/text()').extract()[:1]
			
			
			if metaData['itunesTrackId']:
				metaData['itunesTrackId'] = metaData['itunesTrackId'][0]

			#itunes podcast html
			#from an Id
			if 'itunesTrackId' in metaData and metaData['itunesTrackId']:
				self.logProgress('parsePage from Id', metaData['brandName'][0], '', metaData['itunesTrackId'], log.INFO, str(metaData['itunesTrackId']) )

				request = Request('http://itunes.apple.com/lookup?id='+ metaData['itunesTrackId'], meta = {'metaData': copy.deepcopy(metaData)}, callback=self.getItunesTrackJson)
			else:
			#if not from the title
				self.logProgress('parsePage from title', metaData['brandName'], '', '---------', log.INFO)
				try:
					ownerName = metaData['ownerName'][0] 
				except:
					ownerName = ''
				#&attribute=titleTerm removed whilst using the owner name in the string as well
				request = Request('http://itunes.apple.com/search?term='+ metaData['brandName'][0] +' '+ ownerName +'&entity=podcast', meta = {'metaData': copy.deepcopy(metaData)}, callback=self.getItunesTrackJson)
			
			self.indexedPodcasts.append(1)
			yield request
Exemple #13
0
	def load_podcast_rss(self, response):
		x = XmlXPathSelector(response)
		x.register_namespace("xsi", "http://www.w3.org/2001/XMLSchema-instance")
		x.register_namespace("itunes", "http://www.itunes.com/dtds/podcast-1.0.dtd")
		x.register_namespace("media", "http://search.yahoo.com/mrss/")
		metaData = response.meta['metaData']
		itunesTrackId =  metaData['itunesTrackId']
		metaData['rssUrl'] = response.url
		
		##########
		# a limit of 50 episodes has been hard coded here, this should be in settings somewhere
		#########
		episodes = x.select('//channel/item[enclosure[contains(@type,"audio") or contains(@type,"video")]][position()<50]')
		podcastEpisodeCount = str(len(episodes))

		items = []
		self.totalPodcastEpisodes = self.totalPodcastEpisodes + len(episodes)
		if len(episodes)==0:
			self.logProgress('Empty feed', metaData['brandName'][0], '', itunesTrackId, log.WARNING, ('No episodes for %s' % (response.url)))
			
			metaData['itemtype']=['noepisodes']
			item = self.load_item(x.select('//channel'), metaData)
			yield item
		else:
			podcastEpisodeIndex = str(len(items))
			podcastEpisodeCount = str(len(episodes))
			self.logProgress('load_podcast_rss', metaData['brandName'][0], '', itunesTrackId, log.INFO, ('%s/%s' % (podcastEpisodeIndex, podcastEpisodeCount)))
			for episode in episodes:
				metaData['itemtype']=['ondemand']
				item = self.load_item(episode, metaData)
				yield item
    def parse(self, response):
        xxs = XmlXPathSelector(response)
        stores = xxs.select('//locationinfo')
        items = []
        for store in stores:
            item = TutItem()
            item['address']  = store.select('address')
            item['address2'] = store.select('address2')
            items.append(item)

        return items
Exemple #15
0
	def parse(self, response):
		xxs = XmlXPathSelector(response)
		entries = xxs.select('//item')
		for entry in entries:
			item = ZoinkscraperItem()

			item['name'] = entry.select('./title/text()')[0].extract_unquoted()
			item['url'] = entry.select('./link/text()')[0].extract()

			item['date'] = datetime.strptime(entry.select('./pubDate/text()')[0].extract()[:-6],'%a, %d %b %Y %H:%M:%S')
			yield item
    def parse(self, response):
        """
        We define a custom parser here because we need to get the link from
        the feed item and then follow it to get the recipe data.

        Getting the data from <content:encoded> seems overly complex, as we
        would have to decode all the encoded characters and then build a DOM
        from that.
        """
        xxs = XmlXPathSelector(response)
        links = xxs.select("//item/*[local-name()='origLink']/text()").extract()
        return [Request(x, callback=self.parse_item) for x in links]
Exemple #17
0
    def test_selector_over_text(self):
        hxs = HtmlXPathSelector(text='<root>lala</root>')
        self.assertEqual(hxs.extract(),
                         u'<html><body><root>lala</root></body></html>')

        xxs = XmlXPathSelector(text='<root>lala</root>')
        self.assertEqual(xxs.extract(),
                         u'<root>lala</root>')

        xxs = XmlXPathSelector(text='<root>lala</root>')
        self.assertEqual(xxs.select('.').extract(),
                         [u'<root>lala</root>'])
Exemple #18
0
 def parse_travel_asy(self, response):
     xxs = XmlXPathSelector(response)
     xxs.remove_namespaces()
     json_object = json.loads(xxs.select("//string/text()").extract()[0])
     request_list = []
     for product in json_object['product']:
         if product['isYuyue'] == 'True':
             url = 'http://www.zhongmin.cn/Product/ProductDetails.aspx?pid=%s&bid=11' % product['Id']
         else:
             url = 'http://www.zhongmin.cn/Travel/Product/TravelDetailArr%(Id)s-%(age)sd%(day)s.html' % product
         request_list.append(Request(url = url))
     return request_list
Exemple #19
0
    def parse(self, response):

        hxs = XmlXPathSelector(response)
        name = hxs.select('//name').extract()

        if self.task_id is not None:
            self.log('Processing item %s' % self.task_id, log.INFO)
            self.alert_context = 'task_id=%s' % self.task_id
            for item in self.process_item(self.bot_task_params(self.task_id)):
                yield item
        else:
            for item in self.process_items():
                yield  item
Exemple #20
0
	def parse(self, response):
		x = XmlXPathSelector(response)
		total = int(x.select('//lst[@name="grouped"]/lst[@name="brandFeed"]/int[@name="ngroups"]/text()').extract()[0])
		pageSize = 100		
		urlBase = response.url
		start = 0 #try letting scrapy handle it all

		for i in range(start, total, pageSize):
			url = urlBase + '&start='+ str(i) 
			self.log('Requesting next %d page %d of %d %s'% (pageSize, i, total, url), log.DEBUG)
			# add a , dont_filter=True to request to prevent caching solr requests by Scrapy
			request = Request( url, callback=self.parsePage, dont_filter=True)
			yield request
Exemple #21
0
    def parse(self, response):

        hxs = XmlXPathSelector(response)
        name = hxs.select('//name').extract()

        if self.task_id is not None:
            self.log('Processing item %s' % self.task_id, log.INFO)
            self.alert_context = 'task_id=%s' % self.task_id
            for item in self.process_item(self.bot_task_params(self.task_id)):
                yield item
        else:
            for item in self.process_items():
                yield item
Exemple #22
0
def xmliter_lxml(obj, nodename, namespace=None):
    from lxml import etree
    reader = _StreamReader(obj)
    tag = '{%s}%s' % (namespace, nodename) if namespace else nodename
    iterable = etree.iterparse(reader, tag=tag, encoding=reader.encoding)
    selxpath = '//' + ('x:%s' % nodename if namespace else nodename)
    for _, node in iterable:
        nodetext = etree.tostring(node)
        node.clear()
        xs = XmlXPathSelector(text=nodetext)
        if namespace:
            xs.register_namespace('x', namespace)
        yield xs.select(selxpath)[0]
Exemple #23
0
def xmliter_lxml(obj, nodename, namespace=None):
    from lxml import etree
    reader = _StreamReader(obj)
    tag = '{%s}%s' % (namespace, nodename) if namespace else nodename
    iterable = etree.iterparse(reader, tag=tag, encoding=reader.encoding)
    selxpath = '//' + ('x:%s' % nodename if namespace else nodename)
    for _, node in iterable:
        nodetext = etree.tostring(node)
        node.clear()
        xs = XmlXPathSelector(text=nodetext)
        if namespace:
            xs.register_namespace('x', namespace)
        yield xs.select(selxpath)[0]
 def parsePart(self, response):
     item = response.meta['item']
     xxs = XmlXPathSelector(response)
     if len(xxs.select("//ERRORSEGMENT")) == 0:
         part_num = response.meta['part_num']
         end_range = response.meta['end_range']
         part_prefix = response.meta['part_prefix']
         item['parts'].append(self.part_format % (part_prefix, part_num))
         if part_num < end_range:
             yield self.makePartRequest(part_prefix, part_num + 1, item, end_range)
         else:
             yield item
     else:
         yield item
Exemple #25
0
    def parse(self, response):
        """
        We define a custom parser here because we need to get the link from
        the feed item and then follow it to get the recipe data.

        Getting the data from <content:encoded> seems overly complex, as we
        would have to decode all the encoded characters and then build a DOM
        from that.
        """
        xxs = XmlXPathSelector(response)
        links = xxs.select(
            "//item/*[local-name()='origLink']/text()").extract()
        # self.parse_item comes from OnehundredonecookbooksMixin
        return [Request(x, callback=self.parse_item) for x in links]
Exemple #26
0
 def parse(self, response):
     x = XmlXPathSelector(response)
     zp_nodes = x.xpath("//stats")
     source = response.meta.get("source", "")
     for zp_node in zp_nodes:
         name = zp_node.xpath("////stats/stat/name/text()").extract()
         xy = zp_node.xpath("//stats/stat/xy/text()").extract()
         for i in range(len(name)):
             gz_item = GJZDItem()
             gz_item["name"] = name[i]
             gz_item["source"] = source
             gz_item["lng"] = xy[i].split(",")[0]
             gz_item["lat"] = xy[i].split(",")[1]
             yield gz_item
Exemple #27
0
    def test_selector_namespaces_simple(self):
        body = """
        <test xmlns:somens="http://scrapy.org">
           <somens:a id="foo"/>
           <a id="bar">found</a>
        </test>
        """

        response = XmlResponse(url="http://example.com", body=body)
        x = XmlXPathSelector(response)
        
        x.register_namespace("somens", "http://scrapy.org")
        self.assertEqual(x.select("//somens:a").extract(), 
                         ['<somens:a id="foo"/>'])
Exemple #28
0
	def parseFeed(self, response):
		jsonResponse = response.meta['jsonResponse']
		
		brandStats = jsonResponse['stats']['stats_fields']['episodePublishDate']
		#maxDate = brandStats['max']
		#updateDoc = '<delete><query>brandFeed:"'+brandFeed+'"</query></delete>'


		x = XmlXPathSelector(response)
		x.register_namespace("xsi", "http://www.w3.org/2001/XMLSchema-instance")
		x.register_namespace("itunes", "http://www.itunes.com/dtds/podcast-1.0.dtd")
		x.register_namespace("media", "http://search.yahoo.com/mrss/")

		#########
		newEpisodes = x.select('//channel/item[enclosure[contains(@type,"audio") or contains(@type,"video")]]')
		metaData = {}
		metaData['rssUrl'] = response.url
		episodes = []
		#create a single solr update doc that contains all the new episodes and deletes expired ones
		

		for xmlEpisode in newEpisodes:
			jsonBrand = jsonResponse['grouped']['brandFeed']['groups'][0]['doclist']['docs'][0]
			episode = self.load_item(jsonBrand, xmlEpisode, metaData).__dict__.values()[0]
			episodes.append(episode)

		updatejson = JSONEncoder().encode(episodes)
		yield Request(
			url=self.solrUpdateUrl, 
			method='POST', 
			body=updatejson,
			headers={'Content-Type':'application/json'},
			callback=self.dummyEnd
		)
Exemple #29
0
 def parse(self,response):
     xxs = XmlXPathSelector(response)
     citys = xxs.select('//division')
     list = []
     for city in citys:
         list.append((
                 city.select('id/text()').extract()[0],
                 city.select('name/text()').extract()[0],
                 city.select('location/latitude/text()').extract()[0],
                 city.select('location/latitude/text()').extract()[0]
             ))
     sql = 'insert into city_meituan (m_id,`name`,latitude,longtitude)value(%s,%s,%s,%s)'
     trun = 'truncate table city_meituan'
     db.batchSQL(sql=sql,list=list,trun=trun)
    def parse_xml_document(self, response):
        xxs = XmlXPathSelector(response)
        votes = xxs.select('//meeting/vote')
        items = []

        for vote in votes:
            councilvote = VoteItem()
            votenum = int(vote.select('@number').extract()[0])
            councilvote["number"] = int(votenum)
            councilvote["date"] = vote.select('vote-date/text()').extract()[0]
            councilvote["time"] = vote.select('vote-time/text()').extract()[0]
            councilvote["motion_ch"] = vote.select('motion-ch/text()').extract()[0]
            councilvote["motion_en"] = vote.select('motion-en/text()').extract()[0]
            councilvote["mover_ch"] = vote.select('mover-ch/text()').extract()[0]
            councilvote["mover_en"] = vote.select('mover-en/text()').extract()[0]
            councilvote["mover_type"] = vote.select('mover-type/text()').extract()[0]
            councilvote["separate_mechanism"] = vote.select('vote-separate-mechanism/text()').extract()[0]
            if councilvote["separate_mechanism"] == 'Yes':
                mechanism = ['functional-constituency', 'geographical-constituency']
            else:
                mechanism = ['overall']
            for constituency in mechanism:
                if constituency == 'functional-constituency':
                    short = 'fc_'
                elif constituency == 'geographical-constituency':
                    short = 'gc_'
                else:
                    short = ''
                for count_type in ['present', 'vote', 'yes', 'no', 'abstain']:
                    councilvote[short+count_type] = int(vote.select('vote-summary/'+constituency+'/'+count_type+'-count/text()').extract()[0])
                councilvote[short+'result'] = vote.select('vote-summary/'+constituency+'/'+'result/text()').extract()[0]
            councilvote['result'] = vote.select('vote-summary/overall/result/text()').extract()[0]


            items.append(councilvote)

            members = xxs.select('//meeting/vote[%s]/individual-votes/member'%votenum)
            for member in members:
                individualvote = IndividualVoteItem()
                individualvote['number'] = councilvote["number"]
                individualvote['date'] = councilvote["date"]
                individualvote['name_ch'] = member.select('@name-ch').extract()[0]
                individualvote['name_en'] = member.select('@name-en').extract()[0]
                individualvote['constituency'] = member.select('@constituency').extract()[0]
                individualvote['vote'] = member.select('vote/text()').extract()[0]

                items.append(individualvote)


        return items
Exemple #31
0
 def detect_feed(self, response):
     """Just detects the feed in the links and returns an Item"""
     xxs = XmlXPathSelector(response);
     '''Need to tweak the feedparser lib to just use the headers from response instead of 
     d/l the feed page again, rather than d/l it again 
     '''
     
     if any(xxs.select("/%s" % feed_type) for feed_type in ['rss', 'feed', 'xml', 'rdf']):
         try:
             rssFeed = feedparser.parse(response.url);
             return  self.extract_feed(rssFeed)
         except:
             raise Exception('Exception while parsing/extracting the feed')	
         
     return None
Exemple #32
0
    def parse_rss(self, response):
        item = response.request.meta['item']

        if response.status != 500:
            xxs = XmlXPathSelector(response)
            xxs.remove_namespaces()

            item['date'] = xxs.select('.//channel/date/text()').extract()
            description = xxs.select('.//channel/description/text()').extract()
            if (len(item.get('description', '')) < 10) and description:
                item['description'] = ''.join(description).strip()

        del(item['subpage_urls'])

        return item
    def parse(self, response):
        xxs = XmlXPathSelector(response)

        eis = xxs.select('/api/query/embeddedin/ei')
        for ei in eis:
            pageid = ei.select('@pageid').extract()
            if pageid:
                yield Request('http://en.wikipedia.org/w/api.php?action=query&prop=revisions|info&pageids=%s&rvprop=content&inprop=url&format=xml' % pageid[0],
                              callback=self.parse_page_content)

        cont = xxs.select('/api/query-continue/embeddedin/@eicontinue').extract()
        if cont:
            yield Request('http://en.wikipedia.org/w/api.php?action=query&list=embeddedin&'
                          'eititle=Template:Infobox%%20artwork&eilimit=100&eifilterredir=nonredirects&format=xml&eicontinue=%s' % cont[0],
                          callback=self.parse)
    def parse(self, response):
      xxs = XmlXPathSelector(response)
      hxs = HtmlXPathSelector(response)
      links = xxs.select('//link/text()').extract()

      log.msg('Link length: %s' % len(links), level=log.ERROR)

      if len(links) <= 0:
        log.msg('no links found, using regular parser', level=log.ERROR)
        links = hxs.select('//a/@href').extract()

      msg = 'Links: %s' % links
      log.msg(msg, level=log.ERROR)

      return [Request(x, callback=self.parse_item) for x in links]
 def parse(self, response):
     xxs = XmlXPathSelector(response)
     xxs.register_namespace('sac', 'http://www.steepandcheap.com/docs/steepcheap/rss.xml')
     deals = xxs.select('//item')
     items = []
     for deal in deals:
         item = DealItem()
         item['title'] = deal.select('title/text()').extract()
         item['link'] = deal.select('link/@href').extract()
         item['desc'] = deal.select('description/text()').extract()
         item['shortDesc'] = deal.select('sac:listDescription/text()').extract()
         item['curPrice'] = deal.select('sac:priceCurrent/text()').extract()
         item['regPrice'] = deal.select('sac:priceRegular/text()').extract()
         items.append(item)
         return items
 def parsePart(self, response):
     item = response.meta['item']
     xxs = XmlXPathSelector(response)
     if len(xxs.select("//ERRORSEGMENT")) == 0:
         part_num = response.meta['part_num']
         end_range = response.meta['end_range']
         part_prefix = response.meta['part_prefix']
         item['parts'].append(self.part_format % (part_prefix, part_num))
         if part_num < end_range:
             yield self.makePartRequest(part_prefix, part_num + 1, item,
                                        end_range)
         else:
             yield item
     else:
         yield item
    def parse_xml_document(self, response):
        xxs = XmlXPathSelector(response)
        item = LegcoVotes()
        item["member_vote"] = xxs.select('//individual-votes/member/vote/text()').extract()[0]
        # XPaths to fix
        #item['company'] = site.select('//*[@id="collapsible1"]/div[1]/div[2]/div[2]/span[2]/text()').extract()
        #item['filling_date'] = site.select('//*[@id="collapsible40"]/div[1]/div[2]/div[5]/span[2]/text()').extract()
        #item['types_of_securities'] = site.select('//*[@id="collapsible37"]/div[1]/div[2]/div[1]/span[2]/text()').extract()
        #item['offering_amount'] = site.select('//*[@id="collapsible39"]/div[1]/div[2]/div[1]/span[2]/text()').extract()
        #item['sold_amount'] = site.select('//*[@id="collapsible39"]/div[1]/div[2]/div[2]/span[2]/text()').extract()
        #item['remaining'] = site.select('//*[@id="collapsible39"]/div[1]/div[2]/div[3]/span[2]/text()').extract()
        #item['investors_accredited'] = site.select('//*[@id="collapsible40"]/div[1]/div[2]/div[2]/span[2]/text()').extract()
        #item['investors_non_accredited'] = site.select('//*[@id="collapsible40"]/div[1]/div[2]/div[1]/span[2]/text()').extract()

        return item
Exemple #38
0
    def parse(self, response):
        xxs = XmlXPathSelector(response)
        hxs = HtmlXPathSelector(response)
        links = xxs.select('//link/text()').extract()

        log.msg('Link length: %s' % len(links), level=log.ERROR)

        if len(links) <= 0:
            log.msg('no links found, using regular parser', level=log.ERROR)
            links = hxs.select('//a/@href').extract()

        msg = 'Links: %s' % links
        log.msg(msg, level=log.ERROR)

        return [Request(x, callback=self.parse_item) for x in links]
Exemple #39
0
	def parsePage(self, response):
		x = XmlXPathSelector(response)
		docs = x.select('//lst[@name="grouped"]/lst[@name="brandFeed"]/arr[@name="groups"]/lst/result/doc')
		
		for doc in docs:
			
			brandFeed = doc.select('./str[@name="brandFeed"]/text()').extract()[0]
			#get the brand from Solr as JSON (easier to copy into new episodes) and include some min max stats
			solrBrandUrl = self.solrUrl + 'fq=brandFeed:"'+ brandFeed +'"&wt=json&stats=true&stats.field=episodePublishDate&group.limit=50'
			request = Request(
				solrBrandUrl,
				method='GET',
				meta = {'doc': doc},
				callback=self.loadBrand)
				
			yield request
Exemple #40
0
 def parse(self, response):
     base_url = get_base_url(response)
     xxs = XmlXPathSelector(response)
     xxs.register_namespace("g", "http://base.google.com/ns/1.0")
     products = xxs.select('//channel/item')
     for product in products:
         loader = ProductLoader(item=Product(), selector=product)
         loader.add_xpath('url', 'link/text()')
         loader.add_xpath('name', 'title/text()')
         loader.add_xpath('image_url', 'g:image_link/text()')
         loader.add_xpath('price', 'g:price/text()')
         loader.add_xpath('brand', 'g:brand/text()')
         loader.add_xpath('category', 'g:brand/text()')
         loader.add_xpath('sku', 'g:id/text()')
         loader.add_xpath('identifier', 'g:id/text()')
         yield loader.load_item()
Exemple #41
0
    def parse(self, response):

        xxs = XmlXPathSelector(response)
        xxs.remove_namespaces()
        products = xxs.select('//item')
        for product in products:
            mpn = product.xpath('mpn/text()')
            if mpn:
                mpn = mpn[0].extract().upper().strip()
            else:
                mpn = None
            row = self.monitored_products.get(mpn) if mpn else None
            if row is None or (row and row['Discontinued'].lower().strip()
                               == 'yes'):
                continue
            loader = ProductLoader(selector=product, item=Product())
            loader.add_xpath('identifier', 'id/text()')
            loader.add_xpath('sku', 'mpn/text()')
            loader.add_xpath('brand', 'brand/text()')
            loader.add_xpath('image_url', 'image_link/text()')
            loader.add_xpath('url', 'link/text()')
            loader.add_xpath('name', 'title/text()')
            price = product.select('sale_price/text()').extract()
            if not price:
                price = product.select('price/text()').extract()

            loader.add_value('price', extract_price(price[0]))

            categories = product.select(
                'product_type/text()').extract()[-1].split('>')
            categories = map(lambda x: x.strip(), categories)
            loader.add_value('category', categories)

            shipping_cost = product.select('shipping/price/text()').extract()
            shipping_cost = extract_price(
                shipping_cost[0]) if shipping_cost else ''
            loader.add_value('shipping_cost', shipping_cost)

            in_stock = product.select(
                'availability[contains(text(), "in stock")]').extract()
            if not in_stock:
                loader.add_value('price', 0)

            item = loader.load_item()
            item['metadata'] = RHSMeta()
            item['metadata']['cost_price'] = row['Cost Price']
            yield item
Exemple #42
0
	def parse(self, response):
		xxs = XmlXPathSelector(response)
		for namespace, schema in self.namespaces.iteritems():
			xxs.register_namespace(namespace, schema)

		for entry in xxs.select('//itunesu:entry'):
			metaData={}
			metaData['audioType'] = entry.select('./im:contentType/@term').extract()
			metaData['brandId'] = entry.select('./itunesu:id/text()').extract()
			metaData['brandName'] = entry.select('./im:name/text()').extract()
			metaData['brandDescription'] = entry.select('./itunesu:summary/text()').extract()
			metaData['brandCategory'] = entry.select('./itunesu:category/@label').extract()
			metaData['brandGenres'] = entry.select('./itunesu:category/@label').extract()
			metaData['brandGenreIds'] = entry.select('./itunesu:category/@im:id').extract()
			metaData['brandPublishDate'] = entry.select('./im:releaseDate/text()').extract()
			
			metaData['itunesTrackId'] = entry.select('./itunesu:id/@im:id').extract()
			metaData['itunesArtworkUrl55'] = entry.select('./im:image[@height="55"]/text()').extract()
			metaData['itunesArtworkUrl60'] = entry.select('./im:image[@height="60"]/text()').extract()
			metaData['itunesArtworkUrl170'] = entry.select('./im:image[@height="170"]/text()').extract()
			metaData['itunesCollectionPrice'] = entry.select('./im:price/@amount').extract()
			metaData['itunesCollectionViewUrl'] = entry.select('./itunesu:link/@href').extract()
			
			#have got anything that identifies the "department" so using category for now
			metaData['channelName'] = metaData['brandCategory']


			metaData['ownerName'] = entry.select('./im:artist/text()').extract()
			metaData['ownerId'] = entry.select('./im:artist/@href').extract()

			#html = entry.select('./itunesu:content[@type="html"]/text()').extract()[0]
			#hxs = HtmlXPathSelector(text=html)
			#metaData['ownerName'] = hxs.select('//./a[contains(@href,"institution")]/text()').extract()


			itunesUrl = 'http://itunes.apple.com/WebObjects/DZR.woa/wa/viewPodcast?cc=us&mt=10&id=' + metaData['itunesTrackId'][0]

			request = Request(itunesUrl,
				method='GET',
				meta = {'metaData': copy.deepcopy(metaData), 'dont_retry': True},
				headers={ 
					"User-Agent": "iTunes/9.1.1"
				},
				callback=self.parseItunesHtml)
				#errback=lambda x:self.parseItem(x,copy.deepcopy(metaData)) ) 
			yield request
Exemple #43
0
def xmliter_lxml(obj, nodename):
    from lxml import etree
    reader = _StreamReader(obj)
    iterable = etree.iterparse(reader, tag=nodename, encoding=reader.encoding)
    for _, node in iterable:
        nodetext = etree.tostring(node)
        node.clear()
        yield XmlXPathSelector(text=nodetext).select('//' + nodename)[0]
 def parse(self, response):
     item = ArxivOrgItem()
     xxs = XmlXPathSelector(response)
     xxs.remove_namespaces()
     # 需要先将selector对象格式化成str
     xml_data = str(xxs.xpath('//link'))
     #logging.log(logging.INFO, xml_data)
     url_list = re.findall('http://arxiv.org/abs/\d+.\d+', xml_data)
     #logging.log(logging.INFO, url_list)
     for url in url_list:
         logging.log(
             logging.INFO,
             f'**************** crawling link: {url} ***************** ')
         yield Request(url=url,
                       callback=self.parse_single_page,
                       meta={'item': item},
                       dont_filter=True)
 def parse(self, response):
     x = XmlXPathSelector(response)
     zp_nodes = x.xpath("//lines")
     count = 0
     for zp_node in zp_nodes:
         road = zp_node.xpath("//lines/line/name/text()").extract()
         stats = zp_node.xpath("//lines/line/stats/text()").extract()
         for i in range(len(road)):
             s = stats[i].split(";")
             for j in range(len(s)):
                 count += 1
                 zd_item = ZDCXItem()
                 zd_item["road"] = road[i]
                 zd_item["station_name"] = s[j]
                 zd_item["station_num"] = count
                 yield zd_item
             count = 0
Exemple #46
0
    def parse(self, response):
        #print '-----------------------------------------1111111111111111111111111111111111111-------------------------------------'
        #print 'parseparseparseparseparseparse'
        #print type(self)
        #print '--------------------------------------------2222222222222222222222222222222222222------------------------------------'
        if self.scraper.content_type == 'H':
            xs = HtmlXPathSelector(response)
        else:
            xs = XmlXPathSelector(response)
        base_elem = self.scraper.get_base_elem()
        url_elem = self.scraper.get_detail_page_url_elem()
        base_objects = xs.select(base_elem.x_path)
        if (len(base_objects) == 0):
            self.log("No base objects found!", log.ERROR)

        if (self.conf['MAX_ITEMS_READ']):
            items_left = min(
                len(base_objects),
                self.conf['MAX_ITEMS_READ'] - self.items_read_count)
            base_objects = base_objects[0:items_left]

#print '-------------------------------------55555555555555555555555555555555555555555-----------------------------------'
#print 'before for obj in base_objects:'
#print type(base_objects)
#print '------------------------------------6666666666666666666666666666666666666666666------------------------------------'
        for obj in base_objects:
            item_num = self.items_read_count + 1
            self.log("Starting to crawl item %s." % str(item_num), log.INFO)
            item = self.parse_item(response, obj)
            # print '-------------------------------333333333333333333333333333333333333333333-------------------------------------'
            #  print type(item)
            # print '---------------------------------777777777777777777777777777777777777777-------------------------------------'
            #print item
            url_name = url_elem.scraped_obj_attr.name
            if (item and url_name in item):
                url = item[url_name]
                cnt = self.scraped_obj_class.objects.filter(
                    url=item[url_name]).count()
                cnt1 = self.scraper.get_standard_update_elems_from_detail_page(
                ).count()
                cnt2 = self.scraper.get_from_detail_page_scrape_elems().count()
                # Mark item as DOUBLE item
                if cnt > 0:
                    item[url_name] = 'DOUBLE' + item[url_name]
                # (DOUBLE item with no standard update elements to be scraped from detail page) or
                # generally no attributes scraped from detail page
                if (cnt > 0 and cnt1 == 0) or cnt2 == 0:
                    #loader = XPathItemLoader(item=Article(), response=response)
                    #print 111111111111111111
                    #loader.add_xpath('description', '//p[not(@align="center")]/text()')
                    #l.add_value('last_updated', 'today')
                    yield item
                else:
                    yield Request(url,
                                  callback=self.parse_item,
                                  meta={'item': item})
            else:
                self.log("Detail page url elem could not be read!", log.ERROR)
Exemple #47
0
    def get_products(self, meta, response, colors, colors_ids):
        hxs = XmlXPathSelector(response)
        names, ids = self.get_names(meta['base_name'], meta['product_id'],
                                    meta['current_data'], colors, colors_ids)

        for i, name in enumerate(names):
            p = ProductLoader(item=Product(), response=response)
            p.add_value('identifier', ids[i])
            p.add_value('name', name)
            p.add_value('brand', meta['brand'])
            p.add_value('url', meta['url'])
            p.add_value('image_url', meta['image_url'])
            price = hxs.select('//cmd[@t="discounted_price"]/text()').extract()
            if price:
                price = price[0].replace('.', '').replace(',', '.')
                price = extract_price(price)
            if not price or price == Decimal(1):
                if not price:
                    self.log('Price not found %s' % meta['url'])
                else:
                    self.log('Price is one %s' % meta['url'])

                if not self.retries.get(
                        meta['url']) or self.retries.get(meta['url']) < 3:
                    self.log('Retrying %s' % meta['url'])
                    self.retries[meta['url']] = self.retries.get(
                        meta['url'], 0) + 1
                    p = meta['url']
                    yield Request(p,
                                  meta={
                                      'category':
                                      response.meta.get('category', ''),
                                      'cookiejar':
                                      p + str(self.retries.get(meta['url']))
                                  },
                                  callback=self.parse_product,
                                  dont_filter=True)
                else:
                    self.log('Max retries reached %s' % meta['url'])
                return
            p.add_value('price', price)
            p.add_value('shipping_cost', '0')
            p.add_value('category', response.meta.get('category'))
            yield p.load_item()
Exemple #48
0
 def parse(self, response):
     xxs = XmlXPathSelector(response)
     base_url = get_base_url(response)
     xxs.register_namespace("f", "http://www.w3.org/2005/Atom")
     products = xxs.select('//f:entry')
     for product in products:
         product.register_namespace("g", "http://base.google.com/ns/1.0")
         product.register_namespace("p", "http://www.w3.org/2005/Atom")
         product_loader = ProductLoader(item=Product(), selector=product)
         name = product.select('./p:title/text()').extract()[0]
         if 'B-STOCK' in name.upper():
             continue
         product_loader.add_value('name', name)
         url = product.select('./p:link/@href').extract()[0]
         product_loader.add_value('url', urljoin_rfc(base_url, url))
         image_url = product.select('./g:image_link/text()').extract()
         if image_url:
             product_loader.add_value('image_url',
                                      urljoin_rfc(base_url, image_url[0]))
         category = product.select('./g:product_type/text()').extract()
         if category:
             product_loader.add_value('category', category[0])
         brand = product.select('./g:brand/text()').extract()
         if brand:
             product_loader.add_value('brand', brand[0])
         price = product.select('./g:sale_price/text()').extract()
         if price:
             product_loader.add_value('price', extract_price(price[0]))
         else:
             price = product.select('./g:price/text()').extract()
             product_loader.add_value('price', extract_price(price[0]))
         # sku = product.select('./g:gtin/text()').extract()
         # if sku:
         #     product_loader.add_value('sku', sku[0])
         identifier = product.select('./g:id/text()').extract()[0]
         product_loader.add_value('identifier', identifier)
         product_loader.add_value('sku', identifier)
         shipping_cost = product.select(
             './g:shipping/g:price/text()').extract()
         if shipping_cost:
             product_loader.add_value('shipping_cost',
                                      extract_price(shipping_cost[0]))
         product = product_loader.load_item()
         yield product
    def parse(self, response):
        xxs = XmlXPathSelector(response)

        for productxs in xxs.select(
                '//product[attribute_set/text()!="spares-accessories"]'):
            loader = ProductLoader(item=Product(), selector=productxs)
            loader.add_xpath('sku', './product_id/text()')
            loader.add_xpath('identifier', './product_id/text()')
            loader.add_xpath('price', './product_price/text()')
            loader.add_xpath('name', './product_name/text()')
            loader.add_xpath('url', './product_url/text()')
            loader.add_xpath('category', './attribute_set/text()')
            loader.add_xpath('brand', './manufacturer/text()')
            brand = loader.get_output_value('brand').strip().upper()

            if brand in self.ignore_brands:
                log.msg('Ignoring product %s because of brand %s' %
                        (loader.get_output_value('identifier'), brand))
                continue

            loader.add_value('stock', '1')

            item = loader.load_item()
            item['identifier'] = item['identifier'].upper()

            cost_price = productxs.select('./cost/text()').extract()
            metadata = CSCateringMeta()
            cost_price = cost_price[0].strip() if cost_price else '0.00'
            metadata['cost_price'] = cost_price
            item['metadata'] = metadata

            category = loader.get_output_value('category').strip().lower()

            if category in ignore_categories and not self.has_sku(
                    item.get('sku', '')):
                log.msg('Ignoring product %s because of category %s' %
                        (loader.get_output_value('identifier'), category))
                continue

            yield Request(item['url'],
                          callback=self.parse_img,
                          meta={'item': item})
Exemple #50
0
 def parse(self, response):
     xxs = XmlXPathSelector(response)
     xxs.register_namespace("g", "http://base.google.com/ns/1.0")
     products = xxs.select('//channel/item')
     for product in products:
         loader = ProductLoader(item=Product(), selector=product)
         loader.add_xpath('url', 'link/text()')
         loader.add_xpath('name', 'title/text()')
         loader.add_xpath('image_url', 'g:image_link/text()')
         loader.add_xpath('price', 'g:price/text()')
         loader.add_xpath('brand', 'g:brand/text()')
         categories = product.select(
             'g:product_type/text()').extract()[0].split(' &gt; ')
         loader.add_value('category', categories)
         loader.add_xpath('sku', 'g:id/text()')
         loader.add_xpath('identifier', 'g:id/text()')
         stock = product.select(
             'g:availability/text()').extract()[0].lower()
         if stock != 'in stock':
             loader.add_value('stock', 0)
         yield loader.load_item()
Exemple #51
0
    def parse(self, response):
        if not hasattr(self, 'parse_node'):
            raise NotConfigured(
                'You must define parse_node method in order to scrape this XML feed'
            )

        response = self.adapt_response(response)
        if self.iterator == 'iternodes':
            nodes = self._iternodes(response)
        elif self.iterator == 'xml':
            selector = XmlXPathSelector(response)
            self._register_namespaces(selector)
            nodes = selector.select('//%s' % self.itertag)
        elif self.iterator == 'html':
            selector = HtmlXPathSelector(response)
            self._register_namespaces(selector)
            nodes = selector.select('//%s' % self.itertag)
        else:
            raise NotSupported('Unsupported node iterator')

        return self.parse_nodes(response, nodes)
Exemple #52
0
    def parse(self, response):
        hxs = XmlXPathSelector(response)
        shows = hxs.select('//show')
        date_from = datetime.now()
        date_to = date_from + timedelta(days=7 * 6)

        for show in shows:
            name = show.select('./name/text()').extract()[0]
            url = show.select('./@href').extract()[0]
            show_id = url.split('/')[-1]
            show_data = SHOWS_DATA % (show_id, date_from.strftime('%Y-%m-%d'),
                                      date_to.strftime('%Y-%m-%d'))
            r = Request(
                'https://api.entstix.com/api/v1/xlive/booking/book/availability/show',
                method='POST',
                body=show_data,
                callback=self.parse_products,
                meta={
                    'name': name,
                    'id': show_id
                })
            yield r
Exemple #53
0
def scrape_rss(response):
    log.msg("inside scrape rss")
    xxs = XmlXPathSelector(response)
    items = []
    requests = []
    for item_tag in xxs.select('//item'):
        items.append(ArticleItem())
        if len(item_tag.select("title")) > 0:
            items[-1]["title"] = item_tag.select("title/text()")[0].extract()
        if len(item_tag.select("pubDate")) > 0:
            items[-1]["time_published"] = [
                item_tag.select("pubDate/text()")[0].extract()
            ]
        if len(item_tag.select("link")) > 0:
            items[-1]["url"] = item_tag.select("link/text()")[0].extract()
        if len(item_tag.select("description")) > 0:
            items[-1]["summary"] = item_tag.select(
                "description/text()")[0].extract()

        request = Request(items[-1]["url"], callback=extract_author_from_link)
        request.meta["item"] = items[-1]
        yield request
Exemple #54
0
    def parse(self, response):

        xxs = XmlXPathSelector(response)
        xxs.remove_namespaces()
        urls = xxs.select('//loc/text()').extract()
        for url in urls:
            if 'brands-sitemap.xml' in url:
                continue

            if 'productbrand' in url:
                prod_id = re.findall('productbrand_(\d+).html', url)
                prod_id = prod_id[0] if prod_id else ''
                if prod_id:
                    if prod_id in self.product_ids:
                        continue
                    else:
                        self.product_ids.append(prod_id)
                yield Request(url,
                              callback=self.parse_product,
                              meta={"dont_merge_cookies": True})
            else:
                yield Request(url, meta={"dont_merge_cookies": True})
        '''
Exemple #55
0
 def populate_vars(self, response=None, request=None, spider=None):
     self.vars['item'] = self.item_class()
     self.vars['settings'] = self.crawler.settings
     self.vars['spider'] = spider
     self.vars['request'] = request
     self.vars['response'] = response
     self.vars['xxs'] = XmlXPathSelector(response) \
         if isinstance(response, XmlResponse) else None
     self.vars['hxs'] = HtmlXPathSelector(response) \
         if isinstance(response, HtmlResponse) else None
     if self.inthread:
         self.vars['fetch'] = self.fetch
     self.vars['view'] = open_in_browser
     self.vars['shelp'] = self.print_help
     self.update_vars(self.vars)
     if not self.code:
         self.print_help()
    def parse(self, response):
        if self.scraper.content_type == 'H':
            xs = HtmlXPathSelector(response)
        else:
            xs = XmlXPathSelector(response)
        base_elem = self.scraper.get_base_elem()
        url_elem = self.scraper.get_detail_page_url_elem()
        base_objects = xs.select(base_elem.x_path)
        if (len(base_objects) == 0):
            self.log("No base objects found!", log.ERROR)

        if (self.conf['MAX_ITEMS_READ']):
            items_left = min(
                len(base_objects),
                self.conf['MAX_ITEMS_READ'] - self.items_read_count)
            base_objects = base_objects[0:items_left]

        for obj in base_objects:
            item_num = self.items_read_count + 1
            self.log("Starting to crawl item %s." % str(item_num), log.INFO)
            item = self.parse_item(response, obj)
            #print item
            url_name = url_elem.scraped_obj_attr.name
            if (item and url_name in item):
                url = item[url_name]
                cnt = self.scraped_obj_class.objects.filter(
                    url=item[url_name]).count()
                cnt1 = self.scraper.get_standard_update_elems_from_detail_page(
                ).count()
                cnt2 = self.scraper.get_from_detail_page_scrape_elems().count()
                # Mark item as DOUBLE item
                if cnt > 0:
                    item[url_name] = 'DOUBLE' + item[url_name]
                # (DOUBLE item with no standard update elements to be scraped from detail page) or
                # generally no attributes scraped from detail page
                if (cnt > 0 and cnt1 == 0) or cnt2 == 0:
                    yield item
                else:
                    yield Request(url,
                                  callback=self.parse_item,
                                  meta={'item': item})
            else:
                self.log("Detail page url elem could not be read!", log.ERROR)
Exemple #57
0
    def parse_rss(self, response):
        item = response.request.meta['item']

        if response.status != 500:
            xxs = XmlXPathSelector(response)
            xxs.remove_namespaces()

            item['date'] = xxs.select('.//channel/date/text()').extract()
            description = xxs.select('.//channel/description/text()').extract()
            if (len(item.get('description', '')) < 10) and description:
                item['description'] = ''.join(description).strip()

        del (item['subpage_urls'])

        return item
Exemple #58
0
def xmliter(obj, nodename):
    """Return a iterator of XPathSelector's over all nodes of a XML document,
       given tha name of the node to iterate. Useful for parsing XML feeds.

    obj can be:
    - a Response object
    - a unicode string
    - a string encoded as utf-8
    """
    HEADER_START_RE = re.compile(r'^(.*?)<\s*%s(?:\s|>)' % nodename, re.S)
    HEADER_END_RE = re.compile(r'<\s*/%s\s*>' % nodename, re.S)
    text = body_or_str(obj)

    header_start = re.search(HEADER_START_RE, text)
    header_start = header_start.group(1).strip() if header_start else ''
    header_end = re_rsearch(HEADER_END_RE, text)
    header_end = text[header_end[1]:].strip() if header_end else ''

    r = re.compile(r"<%s[\s>].*?</%s>" % (nodename, nodename), re.DOTALL)
    for match in r.finditer(text):
        nodetext = header_start + match.group() + header_end
        yield XmlXPathSelector(text=nodetext).select('//' + nodename)[0]
Exemple #59
0
 def parse(self, response):
     xxs = XmlXPathSelector(response)
     for title in xxs.select("//item/title/text()").extract()
         log.msg(title)
Exemple #60
0
    def parse(self, response):
        xxs = XmlXPathSelector(response)
        links = xxs.select("//link/text()").extract()

        return [Request(x, callback=self.parse_item) for x in links]