Ejemplo n.º 1
0
 def __init__(self):
     self.commonParser = CommonParser()
     self.my_conn = MySQLdb.connect(host='127.0.0.1',
                                    port=3306,
                                    user='******',
                                    passwd='',
                                    db='scrapy',
                                    charset="utf8")
Ejemplo n.º 2
0
 def run(self):
     for item in self.proxy.getProxyData():
         result = CommonParser().check_proxy(item.type, item.id)
         if not result.get('status'):
             status = self.proxy.delByid(item.id)
             if not status:
                 logging.error('id:%s,delete failed' % item.id)
             else:
                 logging.info('id:%s is expires.' % item.id)
         time.sleep(5)
Ejemplo n.º 3
0
 def __init__(self, *args, **kwargs):
     super(WeixinArticleModelSplider, self).__init__(*args, **kwargs)
     self.commonParser = CommonParser()
     self.params = [
         'filter=&hasDeal=false&keyName=囧哥&order=NRI&nonce=02b470cba&xyz=fea918dd8f8c6ef11e86c133488307d0',
         'filter=&hasDeal=false&keyName=Android&order=NRI&nonce=3be259249&xyz=6791f97678b068f073d20b50dfd7f539',
         'filter=&hasDeal=false&keyName=Python&order=NRI&nonce=70445ca16&xyz=260e43ae615c94b2eaca971e3ce73703',
         'filter=&hasDeal=false&keyName=数据&order=NRI&nonce=9dd7a9958&xyz=fbd59bf9a2f3f9182a64fb692fd0235b',
         'filter=&hasDeal=false&keyName=开发&order=NRI&nonce=c5578b10b&xyz=b3d0e82ccc5661fd3263a602a69623d0',
         'filter=&hasDeal=false&keyName=admin10000_com&order=NRI&nonce=16fc1edb8&xyz=a14ec9997de44d523c0a32cbbf23da77',
         'filter=&hasDeal=false&keyName=admin10000_com&order=NRI&nonce=ee82c8939&xyz=f2c64b494328ab6d6f03fbad9595c439',
         'filter=&hasDeal=false&keyName=Google_Developers&order=NRI&nonce=3ad6a673e&xyz=d465a25f578dab82dcb69f8c66ead7cb',
         'filter=&hasDeal=false&keyName=Google_Developers&order=NRI&nonce=5937876ac&xyz=64647fbb63128bd5cbb264be60be540b',
         'filter=&hasDeal=false&keyName=开发者&order=NRI&nonce=5ef9f5d58&xyz=3bf9b02f655db2fd5bbeef90b3ac4e63',
         'filter=&hasDeal=false&keyName=河南科技学院&order=relation&nonce=41f213076&xyz=49025d05d71cac5e505dfe3e37fd238c',
         'filter=&hasDeal=false&keyName=核心商业机密&order=NRI&nonce=3cb397b76&xyz=a175e9ed375709c3e844a16fde9dbd01'
     ]
Ejemplo n.º 4
0
class UpdateCityToLat():
    def __init__(self):
        self.commonParser = CommonParser()
        self.my_conn = MySQLdb.connect(host='127.0.0.1',
                                       port=3306,
                                       user='******',
                                       passwd='',
                                       db='scrapy',
                                       charset="utf8")

    def run(self):

        pass

    def getData(self):
        result = None
        try:
            sql_str = 'select * from city_loca where lat is null;'
            cur = self.my_conn.cursor()
            count = cur.execute(sql_str)
            result = cur.fetchone()
            for item in cur.fetchmany(cur.execute(sql_str)):
                city_code = item[0]
                city = item[1]
                location = self.commonParser.getLatLonByCityName(city)
                if not location:continue
                lng = location[0]
                lat = location[1]
                # self.insertLocation(lng, lat, str(city_code))
                # self.insertLocation(lng, lat, str(city_code))
                print city, location
                time.sleep(0.5)
        except Exception, e:
            traceback.print_exc()
            logging.error(e)
        finally:
Ejemplo n.º 5
0
class WeixinArticleModelSplider(CrawlSpider):
    name = 'article.weixin.model'
    custom_settings = {
        'RETRY_TIMES': 50,
        'ITEM_PIPELINES': {
            'CuteScrapy.pipelines.MysqlORMPipeline': 300
        },
        'DOWNLOADER_MIDDLEWARES': {
            # 'CuteScrapy.middlewares.RandomProxyMiddleware': 800,
            'CuteScrapy.middlewares.UserAgentMiddleware': 600
        },
        'DOWNLOAD_TIMEOUT': 120,
        'CONCURRENT_REQUESTS': 5,
        'REACTOR_THREADPOOL_MAXSIZE': 10
    }

    def __init__(self, *args, **kwargs):
        super(WeixinArticleModelSplider, self).__init__(*args, **kwargs)
        self.commonParser = CommonParser()
        self.params = [
            'filter=&hasDeal=false&keyName=囧哥&order=NRI&nonce=02b470cba&xyz=fea918dd8f8c6ef11e86c133488307d0',
            'filter=&hasDeal=false&keyName=Android&order=NRI&nonce=3be259249&xyz=6791f97678b068f073d20b50dfd7f539',
            'filter=&hasDeal=false&keyName=Python&order=NRI&nonce=70445ca16&xyz=260e43ae615c94b2eaca971e3ce73703',
            'filter=&hasDeal=false&keyName=数据&order=NRI&nonce=9dd7a9958&xyz=fbd59bf9a2f3f9182a64fb692fd0235b',
            'filter=&hasDeal=false&keyName=开发&order=NRI&nonce=c5578b10b&xyz=b3d0e82ccc5661fd3263a602a69623d0',
            'filter=&hasDeal=false&keyName=admin10000_com&order=NRI&nonce=16fc1edb8&xyz=a14ec9997de44d523c0a32cbbf23da77',
            'filter=&hasDeal=false&keyName=admin10000_com&order=NRI&nonce=ee82c8939&xyz=f2c64b494328ab6d6f03fbad9595c439',
            'filter=&hasDeal=false&keyName=Google_Developers&order=NRI&nonce=3ad6a673e&xyz=d465a25f578dab82dcb69f8c66ead7cb',
            'filter=&hasDeal=false&keyName=Google_Developers&order=NRI&nonce=5937876ac&xyz=64647fbb63128bd5cbb264be60be540b',
            'filter=&hasDeal=false&keyName=开发者&order=NRI&nonce=5ef9f5d58&xyz=3bf9b02f655db2fd5bbeef90b3ac4e63',
            'filter=&hasDeal=false&keyName=河南科技学院&order=relation&nonce=41f213076&xyz=49025d05d71cac5e505dfe3e37fd238c',
            'filter=&hasDeal=false&keyName=核心商业机密&order=NRI&nonce=3cb397b76&xyz=a175e9ed375709c3e844a16fde9dbd01'
        ]

    def start_requests(self):
        for param in self.params:
            fromdata = self.commonParser.getDictFromString(param)
            yield FormRequest(
                "http://www.newrank.cn/xdnphb/data/weixinuser/searchWeixinDataByCondition",
                formdata=fromdata,
                meta={
                    'type': 'list',
                    'fromdata': fromdata
                },
                dont_filter=True)

    def parse(self, response):
        if response.meta['type'] == 'list':
            for item in self.parse_list(response):
                yield item

    def parse_list(self, response):
        result = json.loads(response.body_as_unicode())
        if result.get('success'):
            for item in result.get('value').get('result'):
                try:
                    if item.get('account') is None:
                        self.logger.info('%s is not crawl!' % re.sub(
                            pattern, '',
                            item.get('name') or item.get('description')))
                        continue
                    wechatModel = WechatModel()
                    wechatModel.account = item.get('account')
                    wechatModel.name = re.sub(pattern, '', item.get('name'))
                    wechatModel.categories = item.get('type')
                    wechatModel.tags = re.sub(pattern, '', ','.join(
                        item.get('tags'))) if item.get('tags') else None
                    wechatModel.description = re.sub(pattern, '',
                                                     item.get('description'))
                    wechatModel.qrcode_img = item.get('codeImageUrl')
                    wechatModel.icon_img = item.get('headImageUrl')
                    wechatModel.xb_url = 'http://www.newrank.cn/public/info/detail.html?account=%s' % item.get(
                        'account')
                    yield ModelItem.getInstance(wechatModel)
                except Exception as e:
                    traceback.print_exc()
Ejemplo n.º 6
0
 def __init__(self, *args, **kwargs):
     super(ProxySplider, self).__init__(*args, **kwargs)
     self.site = 'kuai'
     self.commonParser = CommonParser()
Ejemplo n.º 7
0
class ProxySplider(CrawlSpider):
    name = 'proxy.kuaidaili'
    custom_settings = {
        'RETRY_TIMES': 50,
        'ITEM_PIPELINES': {
            'CuteScrapy.pipelines.MysqlORMPipeline': 300,
        },
        # 'DOWNLOADER_MIDDLEWARES': {
        #     # 'CuteScrapy.middlewares.RandomProxyMiddleware': 800,
        #     'CuteScrapy.middlewares.UserAgentMiddleware': 600
        # },
        'DOWNLOAD_TIMEOUT': 120,
        'CONCURRENT_REQUESTS': 2,
        'REACTOR_THREADPOOL_MAXSIZE': 10
    }

    def __init__(self, *args, **kwargs):
        super(ProxySplider, self).__init__(*args, **kwargs)
        self.site = 'kuai'
        self.commonParser = CommonParser()

    def start_requests(self):
        page_no = 1
        yield Request(
            'http://www.kuaidaili.com/free/inha/%s/' % page_no,
            headers={
                'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.95 Safari/537.36'},
            meta={'type': 'list', 'page_no': page_no},
            dont_filter=True
        )

    def parse(self, response):
        if response.status == 200:
            if response.meta['type'] == 'list':
                for item in self.parse_list(response):
                    yield item

    def parse_list(self, response):
        self.logger.info('page no is:%s' % response.meta['page_no'])
        for item in response.xpath('//div[@id="list"]/table/tbody/tr'):
            ip = item.xpath(u'td[@data-title="IP"]/text()').extract_first()
            port = item.xpath(u'td[@data-title="PORT"]/text()').extract_first()
            anonymity = item.xpath(u'td[@data-title="匿名度"]/text()').extract_first()
            type = item.xpath(u'td[@data-title="类型"]/text()').extract_first()
            url = '%s:%s' % (ip, port)
            result = self.commonParser.check_proxy(type, url)
            if not result.get('status'):
                self.logger.info('ip:%s is expires.' % url)
                continue
            else:
                print result
            city_list = self.commonParser.parseLocationByIp(ip)
            proxy = Proxy()
            proxy.id = url
            proxy.site = self.site
            proxy.ip = ip
            proxy.port = port
            proxy.type = type
            proxy.province = city_list.get('province')
            proxy.city = city_list.get('city')
            proxy.anonymity = True if anonymity == u'高匿名' else False
            proxy.site_conn_time = result.get('time')
            proxy.date_update = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            yield ModelItem.getInstance(proxy)
        if response.meta['page_no'] < 100:
            yield Request(
                'http://www.kuaidaili.com/free/inha/%s/' % (response.meta['page_no'] + 1),
                headers={
                    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.95 Safari/537.36'},
                meta={'type': 'list', 'page_no': response.meta['page_no'] + 1},
                dont_filter=True
            )
Ejemplo n.º 8
0
 def __init__(self, *args, **kwargs):
     super(CsdnSplider, self).__init__(*args, **kwargs)
     self.commonParser = CommonParser()
     self.blogs = Blogs()
     self.site = 'csdn'
Ejemplo n.º 9
0
 def __init__(self, *args, **kwargs):
     super(MinappSplider, self).__init__(*args, **kwargs)
     self.commonParser = CommonParser()
     self.applet = Applet()
     self.site = 'minapp'
Ejemplo n.º 10
0
 def __init__(self):
     self.proxy = Proxy()
     self.commonParser = CommonParser()
Ejemplo n.º 11
0
class ProxySplider(CrawlSpider):
    name = 'proxy.xici'
    custom_settings = {
        'RETRY_TIMES': 50,
        'ITEM_PIPELINES': {
            'CuteScrapy.pipelines.MysqlORMPipeline': 300,
            # 'CuteScrapy.pipelines.JsonWriterPipeline': 350,
        },
        'DOWNLOADER_MIDDLEWARES': {
            'CuteScrapy.middlewares.RandomProxyMiddleware': 800,
            'CuteScrapy.middlewares.UserAgentMiddleware': 600
        },
        'DOWNLOAD_TIMEOUT': 120,
        'CONCURRENT_REQUESTS': 2,
        'REACTOR_THREADPOOL_MAXSIZE': 10
    }

    def __init__(self, *args, **kwargs):
        super(ProxySplider, self).__init__(*args, **kwargs)
        self.site = 'xici'
        self.commonParser = CommonParser()

    def start_requests(self):
        yield Request('http://www.xicidaili.com/wt/',
                      meta={
                          'type': 'list',
                          'page_no': 1
                      },
                      dont_filter=True)

    def parse(self, response):
        if response.status == 200:
            if response.meta['type'] == 'list':
                for item in self.parse_list(response):
                    yield item

    def parse_list(self, response):
        self.logger.info('page no is:%s' % response.meta['page_no'])
        for item in response.xpath('//table[@id="ip_list"]/tr'):
            list = item.xpath('td').extract()
            if not list: continue
            ip = item.xpath('td[2]//text()').extract_first()
            port = item.xpath('td[3]//text()').extract_first()
            anonymity = True if item.xpath(
                'td[5]//text()').extract_first() == u'高匿' else False
            type = item.xpath('td[6]//text()').extract_first()

            url = '%s:%s' % (ip, port)
            result = self.commonParser.check_proxy(type, url)
            if not result.get('status'):
                self.logger.info('ip:%s is expires.' % url)
                continue
            else:
                print result
            city_list = self.commonParser.parseLocationByIp(ip)

            proxy = Proxy()
            proxy.id = url
            proxy.site = self.site
            proxy.ip = ip
            proxy.port = port
            proxy.type = type
            proxy.province = city_list.get('province')
            proxy.city = city_list.get('city')
            proxy.anonymity = True if anonymity == u'高匿名' else False
            proxy.site_conn_time = result.get('time')
            proxy.date_update = datetime.datetime.now().strftime(
                '%Y-%m-%d %H:%M:%S')
            yield ModelItem.getInstance(proxy)
        next_page = response.xpath(
            '//a[@class="next_page"]/@href').extract_first()
        if next_page and response.meta['page_no'] < 100:
            yield Request('http://www.xicidaili.com%s' % next_page,
                          meta={
                              'type': 'list',
                              'page_no': response.meta['page_no'] + 1
                          },
                          dont_filter=True)
Ejemplo n.º 12
0
class BlogsSplider(CrawlSpider):
    name = 'blogs.cnblogs'
    custom_settings = {
        'RETRY_TIMES': 50,
        'ITEM_PIPELINES': {
            'CuteScrapy.pipelines.MysqlORMPipeline': 300,
            # 'CuteScrapy.pipelines.JsonWriterPipeline': 350,
        },
        'DOWNLOAD_TIMEOUT': 120,
        'CONCURRENT_REQUESTS': 5,
        'REACTOR_THREADPOOL_MAXSIZE': 10
    }

    def __init__(self, *args, **kwargs):
        super(BlogsSplider, self).__init__(*args, **kwargs)
        self.commonParser = CommonParser()
        self.blogs = Blogs()
        self.site = 'cnblogs'

    def start_requests(self):
        for i in range(1, 20):
            yield Request(
                "http://www.cnblogs.com/sitehome/p/%d" % (i),
                meta={'type': 'list'},
                dont_filter=True
            )

    def parse(self, response):
        if response.status == 200:
            if response.meta['type'] == 'list':
                for item in self.parse_blogs(response):
                    yield item

    def parse_blogs(self, response):
        for item in response.xpath('//*[@id="post_list"]/div'):
            page_url = item.xpath('div[@class="post_item_body"]/h3/a/@href').extract_first()
            id = hashlib.md5(page_url).hexdigest()
            title = item.xpath('div[@class="post_item_body"]/h3/a/text()').extract_first()
            if self.blogs.isExistById(id):
                self.logger.info('id:%s is exist!' % id)
                continue
            author = item.xpath('div/div[@class="post_item_foot"]/a/text()').extract_first()
            avatar = item.xpath('div[@class="post_item_body"]/p/a/img/@src').extract_first()
            blog_url = item.xpath('div/div[@class="post_item_foot"]/a/@href').extract_first()
            summary = self.commonParser.trim(''.join(item.xpath('div[@class="post_item_body"]/p/text()').extract()))
            dateStr = self.commonParser.trim(''.join(item.xpath('div[@class="post_item_body"]/div/text()').extract()))
            pv = item.xpath('div[2]/div/span[2]/a/text()').extract_first().strip()
            cv = item.xpath('div[2]/div/span[1]/a/text()').extract_first().strip()
            positive = item.xpath('div[1]/div[1]/span/text()').extract_first()
            pv = re.findall(r'\d+', pv)[0]
            cv = re.findall(r'\d+', cv)[0]

            blogs = Blogs()
            blogs.id = id
            blogs.site = self.site
            blogs.title = title
            blogs.label = None
            blogs.author = author
            blogs.summary = summary
            blogs.content = None
            blogs.avatar = avatar
            blogs.page_url = page_url
            blogs.blog_url = blog_url
            blogs.pv = pv
            blogs.cv = cv
            blogs.positive = positive
            blogs.publish_time = parseDateString(dateStr)
            blogs.date_update = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            yield ModelItem.getInstance(blogs)
Ejemplo n.º 13
0
class MinappSplider(CrawlSpider):
    name = 'news.weixin'
    custom_settings = {
        'RETRY_TIMES': 50,
        'ITEM_PIPELINES': {
            'CuteScrapy.pipelines.MysqlORMPipeline': 300
        },
        'DOWNLOADER_MIDDLEWARES': {
            # 'CuteScrapy.middlewares.RandomProxyMiddleware': 800,
            'CuteScrapy.middlewares.UserAgentMiddleware': 600
        },
        'DOWNLOAD_TIMEOUT': 120,
        'CONCURRENT_REQUESTS': 5,
        'REACTOR_THREADPOOL_MAXSIZE': 10
    }

    def __init__(self, *args, **kwargs):
        super(MinappSplider, self).__init__(*args, **kwargs)
        self.commonParser = CommonParser()
        self.news = News()
        self.site = 'minapp'

    def start_requests(self):
        for key in [u'余额宝', u'金岩财富网']:
            url = 'http://weixin.sogou.com/weixin?type=2&query=%s&ie=utf8&_sug_=n&_sug_type_&page=%s&tsn=2' % (
                key, 1)
            yield Request(url,
                          meta={
                              'type': 'list',
                              'keywords': key,
                              'sentiment': False,
                              'page_no': 1
                          },
                          dont_filter=True)

    def parse(self, response):
        if response.status == 200:
            if response.meta['type'] == 'list':
                for item in self.parse_list(response):
                    yield item
            elif response.meta['type'] == 'detail':
                opinion = response.meta['opinion']
                opinion.page_url = response.url
                yield ModelItem.getInstance(opinion)

    def parse_list(self, response):
        keywords = response.meta['keywords']
        page_no = response.meta['page_no']
        self.logger.info('keydords:%s,page_no:%s' % (keywords, page_no))
        next_page = False
        for item in response.xpath('//ul[@class="news-list"]/li'):
            try:
                page_url = item.xpath('div/h3/a/@href').extract_first()
                title = ''.join(item.xpath('div/h3/a//text()').extract())
                publish_time_stamp = item.xpath(
                    'div/div/span/script').extract_first()
                publish_time = timestamp2datetime(
                    int(re.search('(\d{10})', publish_time_stamp).group(1)))
                summary = ''.join(item.xpath('div/p//text()').extract())
                account = item.xpath('div/div/a/text()').extract_first()  # 公众号
                if title.find(keywords) > -1 or summary.find(keywords) > -1:
                    next_page = True
                else:
                    next_page = False
                    continue
                id = hashlib.md5(
                    (publish_time_stamp + title).encode('utf8')).hexdigest()
                if self.news.isExistById(id):
                    self.logger.info('title:%s is exist!' % title)
                    continue
                opinion = News()
                opinion.id = id
                opinion.site = self.site
                opinion.title = u'%s   ——%s' % (title, account)
                opinion.keyword = keywords
                opinion.summary = self.commonParser.replaceSpace(summary)
                # if response.meta['sentiment'] and opinion.content != None and opinion.content != '':
                #     list = self.commonParser.getBosonNLP(opinion.content)
                #     opinion.positive = list[0][0]
                #     opinion.negative = list[0][1]
                opinion.page_url = page_url
                opinion.status = 0
                opinion.publish_time = publish_time
                opinion.comment_time = publish_time
                now = datetime.datetime.now()
                opinion.date_create = now.strftime('%Y-%m-%d %H:%M:%S')
                yield Request(
                    page_url +
                    '&pass_ticket=qMx7ntinAtmqhVn+C23mCuwc9ZRyUp20kIusGgbFLi0=&uin=MTc1MDA1NjU1&ascene=1',
                    meta={
                        'type': 'detail',
                        'opinion': opinion,
                        'url': page_url
                    },
                    dont_filter=True)
            except Exception as e:
                self.logger.error(e)
        if next_page:
            next_page_no = response.meta['page_no'] + 1
            yield Request(
                url=
                'http://weixin.sogou.com/weixin?type=2&query=%s&ie=utf8&_sug_=n&_sug_type_&page=%s&tsn=2'
                % (keywords, next_page_no),
                meta={
                    'type': 'list',
                    'keywords': keywords,
                    'sentiment': response.meta['sentiment'],
                    'page_no': next_page_no
                },
                dont_filter=True)

    def close(self, reason):
        pass
Ejemplo n.º 14
0
 def __init__(self, *args, **kwargs):
     super(ProxyXiaoShuSplider, self).__init__(*args, **kwargs)
     self.site = 'xsdaili'
     self.commonParser = CommonParser()
Ejemplo n.º 15
0
class XcxwoSplider(CrawlSpider):
    name = 'applet.xcxwo'
    custom_settings = {
        'RETRY_TIMES': 50,
        'ITEM_PIPELINES': {
            'CuteScrapy.pipelines.MysqlORMPipeline': 300
        },
        'DOWNLOADER_MIDDLEWARES': {
            # 'CuteScrapy.middlewares.RandomProxyMiddleware': 800,
            'CuteScrapy.middlewares.UserAgentMiddleware': 600
        },
        'DOWNLOAD_TIMEOUT': 120,
        'CONCURRENT_REQUESTS': 5,
        'REACTOR_THREADPOOL_MAXSIZE': 10
    }

    def __init__(self, *args, **kwargs):
        super(XcxwoSplider, self).__init__(*args, **kwargs)
        self.commonParser = CommonParser()
        self.applet = Applet()
        self.site = 'xcxwo'

    def start_requests(self):
        yield FormRequest("http://www.xcxwo.com/app/appList",
                          formdata={
                              "ckey": '',
                              "page": '1',
                              "q": ''
                          },
                          meta={
                              'type': 'list',
                              'page_no': 1
                          },
                          dont_filter=True)

    def parse(self, response):
        if not response.body_as_unicode():
            self.logger.info(u'一共%s页,没有了!' % (response.meta['page_no'] - 1))
            return
        if response.meta['type'] == 'list':
            for item in self.parse_list(response):
                yield item
        elif response.meta['type'] == 'detail':
            for item in self.parse_detail(response):
                yield item

    def parse_list(self, response):
        for item in response.xpath('//body/div/a'):
            url = item.xpath('@href').extract_first()
            icon = item.xpath('div[@class="header"]/img/@src').extract_first()
            name = item.xpath(
                'div[@class="header"]/div[@class="title left"]/h1/text()'
            ).extract_first()
            author = self.commonParser.trim(
                item.xpath(
                    'div[@class="header"]/div[@class="title left"]/p/text()').
                extract_first().replace(u'发布者:', ''))
            label = ','.join(
                item.xpath('div[@class="cate"]/span/text()').extract())
            star = len(
                item.xpath(
                    'div[@class="i-footer"]/div[@class="stars left"]/i[@class="fa fa-star star-active"]'
                ).extract())
            heart_share = item.xpath(
                'div[@class="i-footer"]/div[@class="right"]/text()').extract()
            heart = heart_share[1].replace(' ', '')
            share = heart_share[2].replace(' ', '')
            qrcode = item.xpath(
                'div[@class="qrcodeTooltip"]/img/@src').extract_first()
            # qrcode_conetnt = ','.join(self.commonParser.getContentFromQrcode(qrcode))
            id = hashlib.md5(
                (u'%s%s' % (name, author)).encode("utf8")).hexdigest()
            applet = Applet()
            applet.id = '%s_%s' % (self.site, id)
            applet.site = self.site
            applet.name = name
            applet.author = author
            applet.label = label
            applet.star = star
            applet.heart = heart
            applet.share = share
            applet.page_url = 'http://www.xcxwo.com%s' % url
            applet.icon = icon
            applet.qrcode = qrcode
            # applet.qrcode_conetnt = qrcode_conetnt
            yield Request('http://www.xcxwo.com%s' % url,
                          meta={
                              'type': 'detail',
                              'applet': applet
                          },
                          dont_filter=True)
        yield FormRequest("http://www.xcxwo.com/app/appList",
                          formdata={
                              "ckey": '',
                              "page": str(response.meta['page_no'] + 1),
                              "q": ''
                          },
                          meta={
                              'type': 'list',
                              'page_no': response.meta['page_no'] + 1
                          },
                          dont_filter=True)

    def parse_detail(self, response):
        applet = response.meta['applet']
        applet.pictures = ','.join(
            response.xpath(
                '//table[@class="table"]/tr/td/div/a/@href').extract())
        applet.summary = self.commonParser.trim(
            response.xpath(
                '//span[@class="app-description"]/text()').extract_first())
        applet.publish_time = response.xpath(
            '//span[@class="app-info-item"]/text()').extract_first()
        yield ModelItem.getInstance(applet)