Пример #1
0
	def _incremental_update(self, request):
		
		# Get the time of the last sync
		last_sync = request.POST.get('last_sync', None)
		
		if last_sync is not None:
			# If last_sync was in the POST data
			
			try:
				# Convert the last time the client synced with the database into DateTime
				time = utils.parseDateTime(last_sync)
				
			except ValueError:
				# If the conversion failed
				self.data['meta']['error'] = "last_sync was not in the format YYYY-MM-DD HH:MM:SS"
				return None
				
			else:
				# If the conversion was successfull
				
				# Get all updates for the current user since that time
				return Update.objects.filter(user=self.user, time__gte=last_sync).order_by('-time')
				
		else:
			# If the last sync time was not found, return an error
			self.data['meta']['error'] = "last_sync time not found"
			return
Пример #2
0
	def __init__(self, url):
# 		try:
		xml = unicode(urlopen(url).read(), errors='ignore')
		self.soup = BeautifulStoneSoup(xml)
# 		except (HTTPError, URLError, httplib.BadStatusLine, httplib.InvalidURL, ValueError, IOError):
# 			return None
		
		if self.soup is not None:
			
			self.status = True
			entry = self.soup
			
			author =  entry.find('dc:creator')
			self.author = unescape(author.string) if author is not None else None
			date = entry.find('dc:date')
			
			if date is not None:
				dateString = ''
				if re.match('^[0-9][0-9][0-9][0-9]$', date.string): dateString = date.string + '-01-01 00:00:00'
				elif re.match('^[0-9][0-9][0-9][0-9]-[0-9][0-9]$', date.string): dateString = date.string + '-01 00:00:00'
				elif re.match('^[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]$', date.string): dateString = date.string + ' 00:00:00'
				self.published = utils.parseDateTime(dateString)
			else:
				self.published = None
				
			description = entry.find('dc:description')
			self.description = unescape(description.string) if description is not None else None
			
			self.width = self.height = self.depth = self.pages = self.format = None
			
			try:
				for format in entry.findAll('dc:format'):
					if format.string.startswith('Dimensions'):
						dimensions = format.string.split(' ')[1]
						dimensions = dimensions.split('x')
						self.width = float(dimensions[0])
						self.height = float(dimensions[1])
						self.depth = float(dimensions[2])
					elif re.match('[0-9]', format.string):
						self.pages = int(format.string.split(' ')[0])
					else:
						self.format = unescape(format.string)
			except IndexError:
				# Some dimensions are not present
				pass
			
			
			
			identifiers = entry.findAll('dc:identifier')
			
			self.googleid = identifiers[0].string
			self.isbn10 = self.isbn13 = None
			for identifier in identifiers[1:]:
				if identifier.string.startswith('ISBN:'):
					isbn = identifier.string.lstrip('ISBN:')
					if len(isbn) == 10:
						self.isbn10 = isbn
					elif len(isbn) == 13:
						self.isbn13 = isbn
					else:
						# ERROR: It's an ISBN but is not 10 or 13 digits long
						pass
			
			language = entry.find('dc:language')
			self.language = language.string if language is not None else None
			publisher = entry.find('dc:publisher')
			self.publisher = unescape(publisher.string) if publisher is not None else None
			
			self.thumbnail_base = None
			self.thumbnail_huge = None
			self.thumbnail_small = None
			self.thumbnail_large = None
			
			# Thubnail
			for link in entry.findAll('link'):
				if link['rel'] == self.THUMBNAIL_REL:
					url = httplib.urlsplit(unescape(link['href']))
					self.thumbnail_base = url.scheme + '://' + url.netloc + url.path + '?id=' + self.googleid + '&printsec=frontcover&img=1&zoom='
					self.thumbnail_large = self.thumbnail_base + '1'
					self.thumbnail_small = self.thumbnail_base + '5'
					self.thumbnail_huge = self.thumbnail_base + '0'
			
			self.subjects = []
			for subject in entry.findAll('dc:subject'):
				self.subjects.append(unescape(subject.string))
			
			title = entry.find('dc:title')
			self.title = unescape(title.string) if title is not None else None
			
		else:
			self.status = False