Ejemplo n.º 1
0
	def GET(self):
		p = web.input(names=[])
		if p is None or p.names is None:
			return '{"success": "Invalid or empty names parameter"}'
		
		found = []
		for n in p.names:
			prof = memcache.get(n)
			if prof is None:	
				try:
					raw_page = urlfetch.fetch("http://www.ratemyprofessors.com/SelectTeacher.jsp?the_dept=All&sid=1074&orderby=TLName&letter=" + n.split(',')[0],
						method=urlfetch.GET,
						deadline=10)
					
					prof = scraper.strip_professors(raw_page.content, unicode(n))
				except urlfetch.DownloadError:
					data = '{"success":"urlfetch.DownloadError: RateMyProfessors.com request exceeded maximum of 10 seconds"}'
					return json.dumps(data)
				except urlfetch.Error:
					data = '{"success":"urlfetch.Error: RateMyProfessors.com is not available at the moment}'
					return json.dumps(data)
				except DeadlineExceedError:
					data = '{"success":"DeadlineExceedError: Request to RateMyProfessors.com timed out."}'
					return json.dumps(data)
					
			found.extend(prof)
			memcache.set(n, prof, 24 * 60)
		
		data = {'success': 'true', 'professors': found}
		return json.dumps(data)
Ejemplo n.º 2
0
	def GET(self):
		p = web.input()
		#logging.debug(p)
		#data = memcache.get("PROF")
		if p is None or p.name is None:
			return get_rmp_error('Empty Request','The professor must have a last name in order to find ratings.')
		#if data is None:
		try:
			q = urllib.quote_plus(p.name[0])
			#logging.debug('Query param: ' + q)
			raw_page = urlfetch.fetch("http://www.ratemyprofessors.com/SelectTeacher.jsp?the_dept=All&sid=1074&orderby=TLName&letter=" + q,
										method=urlfetch.GET,
										deadline=10)
			data = scraper.strip_professors(raw_page.content, unicode(p.name))
			#memcache.add("PROF", data, 60 * 60)
		except urlfetch.DownloadError:
			data = get_rmp_error('urlfetch.DownloadError','RateMyProfessors.com request exceeded 10 seconds')
		except urlfetch.Error:
			data = get_rmp_error('urlfetch.Error','RateMyProfessors.com is not available at the moment')
		
		return data