def test_collection_stamps(self): if not self._is_up(): return self.storage.set_user(_UID, email="*****@*****.**") self.storage.set_collection(_UID, "tabs") self.storage.set_collection(_UID, "foo") self.storage.set_collection(_UID, "baz") self.storage.set_item(_UID, "tabs", "1", payload=_PLD * 200) self.storage.set_item(_UID, "foo", "1", payload=_PLD * 200) stamps = self.storage.get_collection_timestamps(_UID) # pump cache if self._is_up(): cached_stamps = self.storage.cache.get("1:stamps") self.assertEquals(stamps["tabs"], cached_stamps["tabs"]) stamps2 = self.storage.get_collection_timestamps(_UID) self.assertEquals(len(stamps), len(stamps2)) if self._is_up(): self.assertEquals(len(stamps), 2) else: self.assertEquals(len(stamps), 1) # checking the stamps if self._is_up(): stamps = self.storage.cache.get("1:stamps") keys = stamps.keys() keys.sort() self.assertEquals(keys, ["foo", "tabs"]) # adding a new item should modify the stamps cache now = round_time() self.storage.set_item(_UID, "baz", "2", payload=_PLD * 200, storage_time=now) # checking the stamps if self._is_up(): stamps = self.storage.cache.get("1:stamps") self.assertEqual(stamps["baz"], now) stamps = self.storage.get_collection_timestamps(_UID) if self._is_up(): _stamps = self.storage.cache.get("1:stamps") keys = _stamps.keys() keys.sort() self.assertEquals(keys, ["baz", "foo", "tabs"]) # deleting the item should also update the stamp time.sleep(0.2) # to make sure the stamps differ now = round_time() self.storage.delete_item(_UID, "baz", "2", storage_time=now) stamps = self.storage.get_collection_timestamps(_UID) self.assertEqual(stamps["baz"], now) # and that kills the size cache self.assertTrue(self.storage.cache.get("1:size") is None) # until we asked for it again size = self.storage.get_collection_sizes(1) self.assertEqual(self.storage.cache.get("1:size") / 1024, sum(size.values()))
def _update_stamp(self, user_id, collection_name, storage_time): # update the stamps cache if storage_time is None: storage_time = round_time() stamps = self.get_collection_timestamps(user_id) stamps[collection_name] = storage_time self.cache.set(_key(user_id, 'stamps'), stamps)
def set_item(self, user_id, collection_name, item_id, storage_time=None, **values): """Adds or update an item""" if storage_time is None: storage_time = round_time() if 'payload' in values and 'modified' not in values: values['modified'] = storage_time return self._set_item(user_id, collection_name, item_id, **values)
def test_round_time(self): # returns a two-digits decimal of the current time res = round_time() self.assertEqual(len(str(res).split('.')[-1]), 2) # can take a timestamp res = round_time(129084.198271987) self.assertEqual(str(res), '129084.20') # can take a str timestamp res = round_time('129084.198271987') self.assertEqual(str(res), '129084.20') # bad values raise ValueErrors self.assertRaises(ValueError, round_time, 'bleh') self.assertRaises(ValueError, round_time, object()) # changing the precision res = round_time(129084.198271987, precision=3) self.assertEqual(str(res), '129084.198')
def _was_modified(self, request, user_id, collection_name): """Checks the X-If-Unmodified-Since header.""" unmodified = request.headers.get('X-If-Unmodified-Since') if unmodified is None: return False unmodified = round_time(unmodified) storage = self._get_storage(request) max = storage.get_collection_max_timestamp(user_id, collection_name) if max is None: return False return max > unmodified
def set_item(self, user_id, collection_name, item_id, storage_time=None, **values): """Adds or update an item""" values['id'] = item_id if storage_time is None: storage_time = round_time() self._update_item(values, storage_time) self._update_cache(user_id, collection_name, [values], storage_time) if collection_name == 'tabs': # return now : we don't store tabs in sql return storage_time return self.sqlstorage.set_item(user_id, collection_name, item_id, storage_time=storage_time, **values)
def set_items(self, user_id, collection_name, items, storage_time=None): """Adds or update a batch of items. Returns a list of success or failures. """ if storage_time is None: storage_time = round_time() for item in items: self._update_item(item, storage_time) self._update_cache(user_id, collection_name, items, storage_time) if collection_name == 'tabs': # return now : we don't store tabs in sql return len(items) return self.sqlstorage.set_items(user_id, collection_name, items, storage_time=storage_time)
def __call__(self, request): """Entry point for the WSGI app.""" # the app is being killed, no more requests please if self.killing: raise HTTPServiceUnavailable() request.server_time = round_time() # gets request-specific config request.config = self._host_specific(request.host, self.config) # pre-hook before_headers = self._before_call(request) try: response = self._dispatch_request(request) except HTTPException, response: # set before-call headers on all responses response.headers.update(before_headers) raise
def check(value): res = bigint2time(time2bigint(round_time(value))) res = str(res) self.assertTrue('.' in res) self.assertEqual(len(str(res).split('.')[-1]), 2)
def set_items(self, user_id, collection_name, items, storage_time=None): """Adds or update a batch of items. Returns a list of success or failures. """ if not self.standard_collections: self.set_collection(user_id, collection_name) if storage_time is None: storage_time = round_time() if self.engine_name in ('sqlite', 'postgresql'): count = 0 for item in items: if 'id' not in item: continue item_id = item['id'] item['modified'] = storage_time self.set_item(user_id, collection_name, item_id, **item) count += 1 return count # XXX See if SQLAlchemy knows how to do batch inserts # that's quite specific to mysql fields = ('id', 'parentid', 'predecessorid', 'sortindex', 'modified', 'payload', 'payload_size', 'ttl') table = self._get_wbo_table_name(user_id) query = 'insert into %s (username, collection, %s) values ' \ % (table, ','.join(fields)) values = {} values['collection'] = self._get_collection_id(user_id, collection_name) values['user_id'] = user_id # building the values batch binds = [':%s%%(num)d' % field for field in fields] pattern = '(:user_id,:collection,%s) ' % ','.join(binds) lines = [] for num, item in enumerate(items): lines.append(pattern % {'num': num}) for field in fields: value = item.get(field) if value is None: continue if field == 'modified' and value is not None: value = _roundedbigint(storage_time) values['%s%d' % (field, num)] = value if ('payload%d' % num in values and 'modified%d' % num not in values): values['modified%d' % num] = _roundedbigint(storage_time) if values.get('ttl%d' % num) is None: values['ttl%d' % num] = 2100000000 else: values['ttl%d' % num] += int(storage_time) if 'payload%d' % num in values: size = len(values['payload%d' % num]) values['payload_size%d' % num] = size query += ','.join(lines) # allowing updates as well query += (' on duplicate key update parentid = values(parentid),' 'predecessorid = values(predecessorid),' 'sortindex = values(sortindex),' 'modified = values(modified), payload = values(payload),' 'payload_size = values(payload_size),' 'ttl = values(ttl)') return self._do_query(sqltext(query), **values)
def _convert_args(self, kw): """Converts incoming arguments for GET and DELETE on collections. This function will also raise a 400 on bad args. Unknown args are just dropped. XXX see if we want to raise a 400 in that case """ args = {} filters = {} convert_name = {'older': 'modified', 'newer': 'modified', 'index_above': 'sortindex', 'index_below': 'sortindex'} for arg in ('older', 'newer', 'index_above', 'index_below'): value = kw.get(arg) if value is None: continue try: if arg in ('older', 'newer'): value = round_time(value) else: value = float(value) except ValueError: raise HTTPBadRequest('Invalid value for "%s"' % arg) if arg in ('older', 'index_below'): filters[convert_name[arg]] = '<', value else: filters[convert_name[arg]] = '>', value # convert limit and offset limit = offset = None for arg in ('limit', 'offset'): value = kw.get(arg) if value is None: continue try: value = int(value) except ValueError: raise HTTPBadRequest('Invalid value for "%s"' % arg) if arg == 'limit': limit = value else: offset = value # we can't have offset without limit if limit is not None: args['limit'] = limit if offset is not None and limit is not None: args['offset'] = offset for arg in ('predecessorid', 'parentid'): value = kw.get(arg) if value is None: continue filters[arg] = '=', value # XXX should we control id lengths ? for arg in ('ids',): value = kw.get(arg) if value is None: continue filters['id'] = 'in', value.split(',') sort = kw.get('sort') if sort in ('oldest', 'newest', 'index'): args['sort'] = sort args['full'] = kw.get('full', False) args['filters'] = filters return args
def _roundedbigint(value): return time2bigint(round_time(value))
def test_collection_stamps(self): if not self._is_up(): return self.storage.set_user(_UID, email='*****@*****.**') self.storage.set_collection(_UID, 'tabs') self.storage.set_collection(_UID, 'foo') self.storage.set_collection(_UID, 'baz') self.storage.set_item(_UID, 'tabs', '1', payload=_PLD * 200) self.storage.set_item(_UID, 'foo', '1', payload=_PLD * 200) stamps = self.storage.get_collection_timestamps(_UID) # pump cache if self._is_up(): cached_stamps = self.storage.cache.get('1:stamps') self.assertEquals(stamps['tabs'], cached_stamps['tabs']) stamps2 = self.storage.get_collection_timestamps(_UID) self.assertEquals(len(stamps), len(stamps2)) if self._is_up(): self.assertEquals(len(stamps), 2) else: self.assertEquals(len(stamps), 1) # checking the stamps if self._is_up(): stamps = self.storage.cache.get('1:stamps') keys = stamps.keys() keys.sort() self.assertEquals(keys, ['foo', 'tabs']) # adding a new item should modify the stamps cache now = round_time() self.storage.set_item(_UID, 'baz', '2', payload=_PLD * 200, storage_time=now) # checking the stamps if self._is_up(): stamps = self.storage.cache.get('1:stamps') self.assertEqual(stamps['baz'], now) stamps = self.storage.get_collection_timestamps(_UID) if self._is_up(): _stamps = self.storage.cache.get('1:stamps') keys = _stamps.keys() keys.sort() self.assertEquals(keys, ['baz', 'foo', 'tabs']) # deleting the item should also update the stamp time.sleep(0.2) # to make sure the stamps differ now = round_time() cached_size = self.storage.cache.get('1:size') self.storage.delete_item(_UID, 'baz', '2', storage_time=now) stamps = self.storage.get_collection_timestamps(_UID) self.assertEqual(stamps['baz'], now) # that should have left the cached size alone. self.assertEquals(self.storage.cache.get('1:size'), cached_size) # until we force it to be recalculated. size = self.storage.get_collection_sizes(1) self.assertEqual( self.storage.cache.get('1:size') / 1024., sum(size.values()))
def set_items(self, user_id, collection_name, items, storage_time=None): """Adds or update a batch of items. Returns a list of success or failures. """ if storage_time is None: storage_time = round_time() if self.engine_name in ('sqlite', 'postgresql'): count = 0 for item in items: if 'id' not in item: continue item_id = item['id'] item['modified'] = storage_time self.set_item(user_id, collection_name, item_id, **item) count += 1 return count # XXX See if SQLAlchemy knows how to do batch inserts # that's quite specific to mysql fields = ('id', 'parentid', 'predecessorid', 'sortindex', 'modified', 'payload', 'payload_size', 'ttl') table = self._get_wbo_table_name(user_id) query = 'insert into %s (username, collection, %s) values ' \ % (table, ','.join(fields)) values = {} values['collection'] = self._get_collection_id(user_id, collection_name) values['user_id'] = user_id # building the values batch binds = [':%s%%(num)d' % field for field in fields] pattern = '(:user_id,:collection,%s) ' % ','.join(binds) lines = [] for num, item in enumerate(items): lines.append(pattern % {'num': num}) for field in fields: value = item.get(field) if value is None: continue if field == 'modified' and value is not None: value = _roundedbigint(storage_time) values['%s%d' % (field, num)] = value if ('payload%d' % num in values and 'modified%d' % num not in values): values['modified%d' % num] = _roundedbigint(storage_time) if values.get('ttl%d' % num) is None: values['ttl%d' % num] = 2100000000 else: values['ttl%d' % num] += int(storage_time) if 'payload%d' % num in values: size = len(values['payload%d' % num]) values['payload_size%d' % num] = size query += ','.join(lines) # allowing updates as well query += (' on duplicate key update parentid = values(parentid),' 'predecessorid = values(predecessorid),' 'sortindex = values(sortindex),' 'modified = values(modified), payload = values(payload),' 'payload_size = values(payload_size),' 'ttl = values(ttl)') return self._do_query(sqltext(query), **values)
def test_collection_stamps(self): if not self._is_up(): return self.storage.set_user(_UID, email='*****@*****.**') self.storage.set_collection(_UID, 'tabs') self.storage.set_collection(_UID, 'foo') self.storage.set_collection(_UID, 'baz') self.storage.set_item(_UID, 'tabs', '1', payload=_PLD * 200) self.storage.set_item(_UID, 'foo', '1', payload=_PLD * 200) stamps = self.storage.get_collection_timestamps(_UID) # pump cache if self._is_up(): cached_stamps = self.storage.cache.get('1:stamps') self.assertEquals(stamps['tabs'], cached_stamps['tabs']) stamps2 = self.storage.get_collection_timestamps(_UID) self.assertEquals(len(stamps), len(stamps2)) if self._is_up(): self.assertEquals(len(stamps), 2) else: self.assertEquals(len(stamps), 1) # checking the stamps if self._is_up(): stamps = self.storage.cache.get('1:stamps') keys = stamps.keys() keys.sort() self.assertEquals(keys, ['foo', 'tabs']) # adding a new item should modify the stamps cache now = round_time() self.storage.set_item(_UID, 'baz', '2', payload=_PLD * 200, storage_time=now) # checking the stamps if self._is_up(): stamps = self.storage.cache.get('1:stamps') self.assertEqual(stamps['baz'], now) stamps = self.storage.get_collection_timestamps(_UID) if self._is_up(): _stamps = self.storage.cache.get('1:stamps') keys = _stamps.keys() keys.sort() self.assertEquals(keys, ['baz', 'foo', 'tabs']) # deleting the item should also update the stamp time.sleep(0.2) # to make sure the stamps differ now = round_time() cached_size = self.storage.cache.get('1:size') self.storage.delete_item(_UID, 'baz', '2', storage_time=now) stamps = self.storage.get_collection_timestamps(_UID) self.assertEqual(stamps['baz'], now) # that should have left the cached size alone. self.assertEquals(self.storage.cache.get('1:size'), cached_size) # until we force it to be recalculated. size = self.storage.get_collection_sizes(1) self.assertEqual(self.storage.cache.get('1:size') / 1024., sum(size.values()))
def __call__(self, request): if request.method in ('HEAD',): raise HTTPBadRequest('"%s" not supported' % request.method) request.server_time = round_time() # gets request-specific config request.config = self._host_specific(request.host, self.config) # pre-hook before_headers = self._before_call(request) # XXX # removing the trailing slash - ambiguity on client side url = request.path_info.rstrip('/') if url != '': request.environ['PATH_INFO'] = request.path_info = url if (self.heartbeat_page is not None and url == '/%s' % self.heartbeat_page): return self._heartbeat(request) if self.debug_page is not None and url == '/%s' % self.debug_page: return self._debug(request) match = self.mapper.routematch(environ=request.environ) if match is None: return HTTPNotFound() match, __ = match # authentication control if self.auth is not None: self.auth.check(request, match) function = self._get_function(match['controller'], match['action']) if function is None: raise HTTPNotFound('Unkown URL %r' % request.path_info) # extracting all the info from the headers and the url request.sync_info = match # the GET mapping is filled on GET and DELETE requests if request.method in ('GET', 'DELETE'): params = dict(request.GET) else: params = {} try: result = function(request, **params) except BackendError: err = traceback.format_exc() logger.error(err) raise HTTPServiceUnavailable(retry_after=self.retry_after) if isinstance(result, basestring): response = getattr(request, 'response', None) if response is None: response = Response(result) elif isinstance(result, str): response.body = result else: # if it's not str it's unicode, which really shouldn't happen module = getattr(function, '__module__', 'unknown') name = getattr(function, '__name__', 'unknown') logger.warn('Unicode response returned from: %s - %s' % (module, name)) response.unicode_body = result else: # result is already a Response response = result # setting up the X-Weave-Timestamp response.headers['X-Weave-Timestamp'] = str(request.server_time) response.headers.update(before_headers) return response