コード例 #1
0
ファイル: views.py プロジェクト: harriewang/bigouzu
def Index(request,query):
	
	"""***比购族首页显示及搜索***"""
	hotwords = SearchLog.objects.values('keyword').annotate(keyword_count=Count('keyword')).order_by('-keyword_count')[:25]
	
	try:
		query=request.GET['query']				#获取搜索的关键词
		if not query:
			return HttpResponseRedirect("/")	#如果搜索框中提交的关键词为空,仍然显示首页
		
		#--保存搜索日志--#
		keyword=query.encode('utf-8')
		ip=request.META['REMOTE_ADDR']
		sessionID=request.session.session_key
		s=SearchLog(keyword=keyword,ip=ip,sessionID=sessionID)
		s.save()
		
		#--将用户提交的搜索关键词传递给API部分,并通过解析XML得到搜索结果--#
		
		url='http://bigouzu.com/search/api/books/q=' + query	#得到真实的url
		url=url.encode('utf-8')										#对url进行转码
		url=urllib2.unquote(url)										#对url进行反引用
		data=urllib2.urlopen(url)										#得到API的所有数据
		root=ET.parse(data).getroot()								#通过ElementTree进行xml解析
		query=root.find('Query').text								#得到Query数据
		num=root.find('Num').text									#得到结果个数
		detail=root.findall('Detail')									#得到所有符合搜索的书籍信息的迭代器
		books=[]
		for d in detail:
			info=makedict(title=d.find('book_title').text, author=d.find('book_author').text,
							siteName=d.find('site_name').text, siteUrl=d.find('site_url').text,
							siteLogo=d.find('site_logo').text, youhui=d.find('youhui').text,
							oprice=d.find('old_price').text, price=d.find('price').text,
							buyUrl=d.find('buy_url').text, state=d.find('book_state').text,
							deliverInfo=d.find('deliver_info').text,imgUrl=d.find('book_imgUrl').text,
							)
			books.append(info)										#通过循环得到所有书籍信息的列表
		data.close()													#关闭data对象
		context={'books':books,'query':query,'num':num}
		return render_to_response('books/search_result.html',context)
	except:
		#context={'books':list()}
		return render_to_response('index.html',{'hotwords':hotwords})
コード例 #2
0
ファイル: views.py プロジェクト: mpac/deeptweet
def search(item):
	spellings = ItemSpelling.objects.filter(item = item.id, item__active = True)

	modifiers = eval(item.modifier.data)
	radius = eval(item.radius.data)

	log.debug('MODIFIERS: ' + ', '.join(modifiers))

	for modifier in modifiers:
		log.debug('MODIFIER: ' + modifier)
		# continue

		for spelling in spellings:			
			if len(modifier):
				search_text = spelling.text + ' ' + modifier
				geocode = ()
			else:
				search_text = spelling.text

				if radius:
					geocode = radius
				else:
					geocode = ()

			log.debug('SEARCH TEXT: ' + search_text)
			log.debug('RADIUS DATA LENGTH: %d' % (len(geocode)))

			search_log = SearchLog.objects.filter(
				item = item.id, spelling = spelling.text, modifier = modifier,
				radius = item.radius
			).order_by(
				'-last_twitter_id'
			)

			if search_log:
				twitter_since_id = search_log[0].last_twitter_id
			else:
				twitter_since_id = None

			log.debug('SINCE TWEET ID: ' + str(twitter_since_id))

			search = None
			search_error = False

			try:
				search = api.GetSearch(
					search_text, per_page = 100,
					geocode = geocode, since_id = twitter_since_id
				)
			except:
				search_error = True

			if search_error:
				try:
					search = api.GetSearch(
						search_text, per_page = 100,
						geocode = geocode
					)
				except:
					pass

			if search and len(search) > 0:
				# Save record of search.

				first_search_id = search[len(search) - 1].id
				last_search_id = search[0].id

				log.debug('FIRST TWEET ID: %d' % (first_search_id))
				log.debug('LAST TWEET ID: %d' % (last_search_id))

				search_log = SearchLog()

				search_log.item = item
				search_log.spelling = spelling.text
				search_log.modifier = modifier
				search_log.radius = item.radius

				search_log.amount = len(search)
				search_log.maximum = 100

				search_log.first_twitter_id = str(first_search_id)
				search_log.last_twitter_id = str(last_search_id)

				search_log.save()

			for s in search:
				# print s
				# continue

				handler.save_new_tweet(s, modifier, radius, item, spelling)