Exemple #1
0
 def parseCat(self, response):
     parser = HtmlParser(response)
     if 'Next' not in parser.xpath('//li/a/text()'):
         for i in parser.xpath('//span[@class="bqQuoteLink"]/a//text()'):
             self.mydb.quotes.insert({'quote':i})
     else:
         for i in parser.xpath('//span[@class="bqQuoteLink"]/a//text()'):
             self.mydb.quotes.insert({'quote':i})
         for url in parser.extract_urls('//li/a[contains(text(),"Next")]'):
             yield Request(url,callback="parseCat")
Exemple #2
0
 def parseAnimals(self, response):
     html = HtmlParser(response)
     if html.extract_urls('//div[@class="pagination"]/a[@class="next"]'):
         for url in html.extract_urls('//div[@id="search_results"]/div/a'):
             yield Request(url, callback="parseAnimal")
         for url in html.extract_urls('//div[@class="pagination"]/a[@class="next"]'):
             yield Request(url, callback="parseAnimals")
     else:
         for url in html.extract_urls('//div[@id="search_results"]/div/a'):
             yield Request(url, callback="parseAnimal")
Exemple #3
0
 def parseCat(self, response):
     parser = HtmlParser(response)
     dbname= response.meta['u']
     if not  parser.xpath('//a[@class="next_page"]'):
         for i in parser.xpath('//div[@class="quoteText"]'):
             quote = i.text
             for j in i.iterfind('a'):
                 author=j.text
             self.mydb[dbname].insert({'quote':quote,'author':author})
     else:
         for i in parser.xpath('//div[@class="quoteText"]'):
             quote = i.text
             for j in i.iterfind('a'):
                 author=j.text
             self.mydb[dbname].insert({'quote':quote,'author':author})
         
         for url in parser.extract_urls('//a[@class="next_page"]'):
             yield Request(url,callback="parseCat",meta={'u':dbname})
Exemple #4
0
 def parseCat(self, response):
     parser = HtmlParser(response)
     if 'Next' not in parser.xpath('//li/a/text()'):
         for i in parser.xpath('//span[@class="bqQuoteLink"]/a//text()'):
             self.mydb.quotes.insert({'quote': i})
     else:
         for i in parser.xpath('//span[@class="bqQuoteLink"]/a//text()'):
             self.mydb.quotes.insert({'quote': i})
         for url in parser.extract_urls('//li/a[contains(text(),"Next")]'):
             yield Request(url, callback="parseCat")
Exemple #5
0
    def parseAnimals(self,response):
        html = HtmlParser(response)
        if html.extract_urls('//div[@class="pagination"]/a[@class="next"]'):
	    for url in html.extract_urls('//div[@id="search_results"]/div/a'):
		yield Request(url,callback="parseAnimal")
	    for url in html.extract_urls('//div[@class="pagination"]/a[@class="next"]'):
		yield Request(url,callback="parseAnimals")
        else:
	    for url in html.extract_urls('//div[@id="search_results"]/div/a'):
		yield Request(url,callback="parseAnimal")
Exemple #6
0
    def parseCat(self, response):
        parser = HtmlParser(response)
        dbname = response.meta['u']
        if not parser.xpath('//a[@class="next_page"]'):
            for i in parser.xpath('//div[@class="quoteText"]'):
                quote = i.text
                for j in i.iterfind('a'):
                    author = j.text
                self.mydb[dbname].insert({'quote': quote, 'author': author})
        else:
            for i in parser.xpath('//div[@class="quoteText"]'):
                quote = i.text
                for j in i.iterfind('a'):
                    author = j.text
                self.mydb[dbname].insert({'quote': quote, 'author': author})

            for url in parser.extract_urls('//a[@class="next_page"]'):
                yield Request(url, callback="parseCat", meta={'u': dbname})
Exemple #7
0
 def parse(self, response):
     parser = HtmlParser(response)
     for i in parser.extract_urls(
             '//div[@class="bqLn"]/div[@class="bqLn"]/a'):
         yield Request(i, callback="parseCat")
Exemple #8
0
 def parse(self,response):
     parser = HtmlParser(response)
     for url in parser.extract_urls('//a[@class="actionLinkLite serif"]'):
         dbname = url.split('/')[-1]
         yield Request(url,callback="parseCat",meta={'u':dbname})
Exemple #9
0
 def parse(self, response):
     parser = HtmlParser(response)
     for url in parser.extract_urls('//a[@class="actionLinkLite serif"]'):
         dbname = url.split('/')[-1]
         yield Request(url, callback="parseCat", meta={'u': dbname})
Exemple #10
0
 def parseAnimal(self, response):
     print "parseAniml"
     html = HtmlParser(response)
     print "Downloading........"
     for url in html.xpath('//div[@class="primary_photo"]/a/img/@src'):
         urlretrieve(url, url.split("/")[-1])
Exemple #11
0
 def parse(self, response):
     html = HtmlParser(response)
     photo_list = ['//li[@class="first"]', '//li[@class=" "]', '//li[@class="last"]']
     for item in photo_list:
         for url in html.extract_urls(item):
             yield Request(url, callback="parseAnimals")
Exemple #12
0
 def parse(self, response):
     parser = HtmlParser(response)
     for i in parser.extract_urls('//div[@class="bqLn"]/div[@class="bqLn"]/a'):
         yield Request(i,callback="parseCat")
Exemple #13
0
    def parseAnimal(self,response):
        print 'parseAniml'
        html = HtmlParser(response)
        print 'Downloading........'
        for url in html.xpath('//div[@class="primary_photo"]/a/img/@src'):
	    urlretrieve(url,url.split('/')[-1])
Exemple #14
0
    def parse(self,response):
        html = HtmlParser(response)
        photo_list = ['//li[@class="first"]','//li[@class=" "]','//li[@class="last"]']
        for item in photo_list:
	    for url in html.extract_urls(item):
	        yield Request(url,callback="parseAnimals")