Example #1
0
	traceback.print_exc(file=sys.stdout)
	print "subject get failed"
	exit()
event_str = remote_resource[10:-25].replace("'",'"').replace('brd:','"brd":').replace('file:','"file":').replace('title:','"title":')
event_list = json.loads( event_str )

log.write( "%s - LilyBBS Events fetch successful!\n" % (datetime.now(),) )

f = open(path+'/lastupdate_bbsevent.log', 'w')
for i in range(0,len(event_list)):
	board = event_list[i]['brd']
	title = event_list[i]['title'] 
	if title+'\n' not in last_update:
		print title.encode('UTF-8')
		link = ''.join( ('http://bbs.nju.edu.cn/bbstcon?board=', event_list[i]['brd'], '&file=', event_list[i]['file']) )
		friendly_link = multi_update.short_url( ''.join( ('http://bbs.nju.edu.cn/main.html?bbstcon%3Fboard%3D', event_list[i]['brd'], '%26file%3D', event_list[i]['file'], ) ) ) # generate the thread link
		################################################################################
		###---###---### The following codes are the same with p_bbstop10 ###---###---###
		### handle Chinese cut off bug in LilyBBS system. Just f**k it!
		#------UPDATE: move to read_url function------
		#page = urllib2.urlopen(link).read()
		#page = unicode(page, 'gbk', 'ignore') 
		#if page[page.find(u'发信站')-1] != '\n':
		#	page = page.replace(u'发信站:', u'\n发信站:')
		### end
		try:
			page = pq(url=link, opener=read_url) ('textarea').eq(0).text()
		except:
			log.write("%s - source: %s\n%s%s\n" % (datetime.now(), 'LilyBBS TOP10', ' '*29, 'FETCH BBS POST FAILED!!!!!', ))
			traceback.print_exc(file=sys.stdout)
			continue
Example #2
0
if len(top10_list) != 30:
	log.write( "%s - LilyBBS TOP10 fetch error!\n" % (datetime.now(),) )
	log.close()
	exit()

top10_list.make_links_absolute()
log.write( "%s - LilyBBS Top10 fetch successful!\n" % (datetime.now(),) )

f = open(path+'/lastupdate_bbstop10.log', 'w')
for i in range(0,30,3):
	board = pq(top10_list[i]).text()
	title = "[%s]%s" % ( board, pq(top10_list[i+1]).text(), )
	if title not in last_update+db_fetch():
		print title.encode('UTF-8')
		link = pq(top10_list[i+1]).attr.href
		friendly_link = multi_update.short_url( ''.join( ('http://bbs.nju.edu.cn/main.html?', urllib.pathname2url(link[22:]), ) ) )# generate the thread link

		try:
			page = pq(url=link, opener=read_url) ('textarea')
		except:
			log.write("%s - source: %s\n%s%s\n" % (datetime.now(), 'LilyBBS TOP10', ' '*29, 'FETCH BBS POST FAILED!!!!!', ))
			traceback.print_exc(file=sys.stdout)
			continue
		
		original_post = read_post(page,0)
		author = original_post[0]
		time = original_post[1]
		sofa_post = read_post(page, 1)
		bench_post = read_post(page, 2)
		floor_post = read_post(page, 3)
		sofa = ''.join( (u'<li>沙发 ', sofa_post[0], u' 说:', sofa_post[2], '</li>', ) )