def _get_item(self, table_name, key): table_spec = self.tables[table_name] sql_table = table_spec['sql_table'] table_def = table_spec['table_def'] if table_def.range_key: expr = sql.and_( sql_table.c.hash_key == utils.parse_value( key['HashKeyElement']), sql_table.c.range_key == utils.parse_value( key['RangeKeyElement'])) else: expr = sql_table.c.hash_key == utils.parse_value( key['HashKeyElement']) q = sql.select([sql_table], expr) try: res = list(self.engine.execute(q))[0] except IndexError: return None else: capacity_size = (len(res[sql_table.c.content]) / 1024) + 1 self.table_read_op_capacity[table_name] += capacity_size self.tables[table_name]['read_counter'].record(capacity_size) return json.loads(res[sql_table.c.content])
def do_deleteitem(self, args): table_spec = self.tables[args['TableName']] sql_table = table_spec['sql_table'] table_def = table_spec['table_def'] if table_def.range_key: expr = sql.and_( sql_table.c.hash_key == utils.parse_value( args['Key']['HashKeyElement']), sql_table.c.range_key == utils.parse_value( args['Key']['RangeKeyElement'])) else: expr = sql_table.c.hash_key == utils.parse_value( args['Key']['HashKeyElement']) item = self.do_getitem(args) # This one is kinda weird, we're basically gonna transfer whatever we # racked up in reads to the write column. op_capacity = self.table_read_op_capacity[args['TableName']] self.table_read_op_capacity[args['TableName']] = 0 self.tables[args['TableName']]['read_counter'].record(op_capacity * -1) self.table_write_op_capacity[args['TableName']] += op_capacity self.tables[args['TableName']]['write_counter'].record(op_capacity) del_q = sql_table.delete().where(expr) res = self.engine.execute(del_q) out = {'Attributes': item['Item']} return out
def do_deleteitem(self, args): table_spec = self.tables[args['TableName']] sql_table = table_spec['sql_table'] table_def = table_spec['table_def'] if table_def.range_key: expr = sql.and_(sql_table.c.hash_key == utils.parse_value(args['Key']['HashKeyElement']), sql_table.c.range_key == utils.parse_value(args['Key']['RangeKeyElement'])) else: expr = sql_table.c.hash_key == utils.parse_value(args['Key']['HashKeyElement']) item = self.do_getitem(args) # This one is kinda weird, we're basically gonna transfer whatever we # racked up in reads to the write column. op_capacity = self.table_read_op_capacity[args['TableName']] self.table_read_op_capacity[args['TableName']] = 0 self.tables[args['TableName']]['read_counter'].record(op_capacity * -1) self.table_write_op_capacity[args['TableName']] += op_capacity self.tables[args['TableName']]['write_counter'].record(op_capacity) del_q = sql_table.delete().where(expr) res = self.engine.execute(del_q) out = { 'Attributes': item['Item'] } return out
def do_scan(self, args): table_spec = self.tables[args['TableName']] sql_table = table_spec['sql_table'] q = sql.select([sql_table]) if 'Limit' in args: q = q.limit(args['Limit']) unsupported_keys = set(args.keys()) - set(['Limit', 'TableName', 'ScanFilter']) if unsupported_keys: raise NotImplementedError(unsupported_keys) capacity_size = 0 out = {'Items': []} for res in self.engine.execute(q): capacity_size += (len(res[sql_table.c.content]) / 1024) + 1 item_data = json.loads(res[sql_table.c.content]) item = utils.parse_item(item_data) if 'ScanFilter' in args: for attribute, filter_spec in args['ScanFilter'].iteritems(): if attribute not in item: continue for value_spec in filter_spec['AttributeValueList']: value = utils.parse_value(value_spec) if compare(filter_spec['ComparisonOperator'], item[attribute], value): out['Items'].append(item_data) break else: out['Items'].append(item_data) self.table_read_op_capacity[args['TableName']] += capacity_size self.tables[args['TableName']]['read_counter'].record(capacity_size) return out
def _get_item(self, table_name, key): table_spec = self.tables[table_name] sql_table = table_spec['sql_table'] table_def = table_spec['table_def'] if table_def.range_key: expr = sql.and_(sql_table.c.hash_key == utils.parse_value(key['HashKeyElement']), sql_table.c.range_key == utils.parse_value(key['RangeKeyElement'])) else: expr = sql_table.c.hash_key == utils.parse_value(key['HashKeyElement']) q = sql.select([sql_table], expr) try: res = list(self.engine.execute(q))[0] except IndexError: return None else: capacity_size = (len(res[sql_table.c.content]) / 1024) + 1 self.table_read_op_capacity[table_name] += capacity_size self.tables[table_name]['read_counter'].record(capacity_size) return json.loads(res[sql_table.c.content])
def do_scan(self, args): table_spec = self.tables[args['TableName']] sql_table = table_spec['sql_table'] q = sql.select([sql_table]) if 'Limit' in args: q = q.limit(args['Limit']) unsupported_keys = set(args.keys()) - set( ['Limit', 'TableName', 'ScanFilter']) if unsupported_keys: raise NotImplementedError(unsupported_keys) capacity_size = 0 out = {'Items': []} for res in self.engine.execute(q): capacity_size += (len(res[sql_table.c.content]) / 1024) + 1 item_data = json.loads(res[sql_table.c.content]) item = utils.parse_item(item_data) if 'ScanFilter' in args: for attribute, filter_spec in args['ScanFilter'].iteritems(): if attribute not in item: continue for value_spec in filter_spec['AttributeValueList']: value = utils.parse_value(value_spec) if compare(filter_spec['ComparisonOperator'], item[attribute], value): out['Items'].append(item_data) break else: out['Items'].append(item_data) self.table_read_op_capacity[args['TableName']] += capacity_size self.tables[args['TableName']]['read_counter'].record(capacity_size) return out
def do_updateitem(self, args): table_spec = self.tables[args['TableName']] sql_table = table_spec['sql_table'] key_spec = table_spec['key_spec'] table_def = table_spec['table_def'] if 'Expected' in args: raise NotImplementedError if args.get('ReturnValues') not in (None, 'ALL_OLD', 'ALL_NEW'): raise NotImplementedError if table_def.range_key: key = (utils.parse_value(args['Key']['HashKeyElement']), utils.parse_value(args['Key']['RangeKeyElement'])) expr = sql.and_(sql_table.c.hash_key == key[0], sql_table.c.range_key == key[1]) else: key = (utils.parse_value(args['Key']['HashKeyElement']), ) expr = sql_table.c.hash_key == key[0] # Update is one of our few transactionally important operations. By # setting self.connection, our callees should use that rather than the # connectionless self.engine method, allowing us to control the # transaction directly. self.connection = self.engine.connect() txn = self.connection.begin() q = sql.select([sql_table], expr) res = list(self.connection.execute(q)) if res: item = json.loads(res[0][sql_table.c.content]) else: item = {} item.update(utils.format_key(key_spec, key)) real_item = utils.parse_item(item) # Apply our updates for attribute, value_update in args['AttributeUpdates'].iteritems(): if value_update['Action'] == "ADD": if attribute in real_item: if isinstance(real_item[attribute], (int, float, list)): real_item[attribute] += utils.parse_value( value_update['Value']) elif isinstance(real_item[attribute], list): if hasattr(value_update['Value'], '__iter__'): real_item[attribute] += [ utils.parse_value(v) for v in value_update['Value'] ] else: real_item[attribute].append( utils.parse_value(value_update['Value'])) else: real_item[attribute].append( utils.parse_value(value_update['Value'])) else: real_item[attribute] = utils.parse_value( value_update['Value']) elif value_update['Action'] == "PUT": real_item[attribute] = utils.parse_value(value_update['Value']) elif value_update['Action'] == "DELETE": if attribute in real_item: del real_item[attribute] else: raise ValueError(value_update['Action']) # write to the db self._put_item(args['TableName'], utils.format_item(real_item)) txn.commit() self.connection = None if args.get('ReturnValues', 'NONE') == 'NONE': return {} elif args['ReturnValues'] == 'ALL_NEW': return {'Attributes': utils.format_item(real_item)} elif args['ReturnValues'] == 'ALL_OLD': return {'Attributes': item}
def do_query(self, args): table_spec = self.tables[args['TableName']] sql_table = table_spec['sql_table'] key_spec = table_spec['key_spec'] table_def = table_spec['table_def'] unsupported_keys = set(args.keys()) - set(['Limit', 'TableName', 'HashKeyValue', 'ScanIndexForward', 'ConsistentRead', 'RangeKeyCondition', 'AttributesToGet', 'ExclusiveStartKey']) if unsupported_keys: raise NotImplementedError(unsupported_keys) scan_forward = args.get('ScanIndexForward', True) expr = sql_table.c.hash_key == utils.parse_value(args['HashKeyValue']) if 'RangeKeyCondition' in args: if len(key_spec) < 2: raise NotImplementedError operator = args['RangeKeyCondition']['ComparisonOperator'] if operator == 'BETWEEN': start = utils.parse_value(args['RangeKeyCondition']['AttributeValueList'][0]) end = utils.parse_value(args['RangeKeyCondition']['AttributeValueList'][1]) expr = sql.and_(expr, sql_table.c.range_key.between(start, end)) else: range_value = utils.parse_value(args['RangeKeyCondition']['AttributeValueList'][0]) if operator == 'GT': expr = sql.and_(expr, sql_table.c.range_key > range_value) elif operator == 'LT': expr = sql.and_(expr, sql_table.c.range_key < range_value) elif operator == 'GE': expr = sql.and_(expr, sql_table.c.range_key >= range_value) elif operator == 'LE': expr = sql.and_(expr, sql_table.c.range_key <= range_value) elif operator == 'EQ': expr = sql.and_(expr, sql_table.c.range_key == range_value) elif operator == 'BEGINS_WITH': expr = sql.and_(expr, sql_table.c.range_key.like('%s%%' % range_value)) else: raise NotImplementedError if 'ExclusiveStartKey' in args: range_key = utils.parse_value(args['ExclusiveStartKey']['RangeKeyElement']) if scan_forward: expr = sql.and_(expr, sql_table.c.range_key > range_key) else: expr = sql.and_(expr, self.table.c.range_key < range_key) q = sql.select([sql_table], expr) if 'Limit' in args: limit = min(DEFAULT_LIMIT, args['Limit']) else: limit = DEFAULT_LIMIT q = q.limit(limit + 1) if len(key_spec) > 1: if scan_forward: q = q.order_by(sql_table.c.range_key.asc()) else: q = q.order_by(sql_table.c.range_key.desc()) capacity_size = 0 out = {'Items': [], 'Count': 0} for res in self.engine.execute(q): if out['Count'] == limit: break capacity_size += (len(res[sql_table.c.content]) / 1024) + 1 item_data = json.loads(res[sql_table.c.content]) item = utils.parse_item(item_data) if 'AttributesToGet' in args: out_item = dict((col_name, item_data[col_name]) for col_name in args['AttributesToGet']) out['Items'].append(out_item) else: out['Items'].append(item_data) out['Count'] += 1 if len(key_spec) > 1: out['LastEvaluatedKey'] = utils.format_item({'HashKeyElement': res[sql_table.c.hash_key], 'RangeKeyElement': res[sql_table.c.range_key]}) else: # If we didn't break out of our loop, that means we're on the last page of our result set and should have a key # The docs say the last evaulated key should be 'null', but I'm suspicious that isn't the case. if 'LastEvaluatedKey' in out: #out['LastEvaluatedKey'] = None del out['LastEvaluatedKey'] self.table_read_op_capacity[args['TableName']] += capacity_size self.tables[args['TableName']]['read_counter'].record(capacity_size) out['Count'] = len(out['Items']) return out
def test_float(self): value = {'N': '10.5'} res = utils.parse_value(value) assert_equal(res, 10.5) assert_equal(type(res), float)
def test_int(self): value = {'N': '123'} res = utils.parse_value(value) assert_equal(res, 123) assert_equal(type(res), int)
def test_string(self): value = {'S': 'Rhett'} res = utils.parse_value(value) assert_equal(res, "Rhett")
def test_multistring(self): value = {'SS': ['Rhett', 'Ziggy']} res = utils.parse_value(value) assert_equal(res, ['Rhett', 'Ziggy'])
def do_updateitem(self, args): table_spec = self.tables[args['TableName']] sql_table = table_spec['sql_table'] key_spec = table_spec['key_spec'] table_def = table_spec['table_def'] if 'Expected' in args: raise NotImplementedError if args.get('ReturnValues') not in (None, 'ALL_OLD', 'ALL_NEW'): raise NotImplementedError if table_def.range_key: key = (utils.parse_value(args['Key']['HashKeyElement']), utils.parse_value(args['Key']['RangeKeyElement'])) expr = sql.and_(sql_table.c.hash_key == key[0], sql_table.c.range_key == key[1]) else: key = (utils.parse_value(args['Key']['HashKeyElement']),) expr = sql_table.c.hash_key == key[0] # Update is one of our few transactionally important operations. By # setting self.connection, our callees should use that rather than the # connectionless self.engine method, allowing us to control the # transaction directly. self.connection = self.engine.connect() txn = self.connection.begin() q = sql.select([sql_table], expr) res = list(self.connection.execute(q)) if res: item = json.loads(res[0][sql_table.c.content]) else: item = {} item.update(utils.format_key(key_spec, key)) real_item = utils.parse_item(item) # Apply our updates for attribute, value_update in args['AttributeUpdates'].iteritems(): if value_update['Action'] == "ADD": if attribute in real_item: if isinstance(real_item[attribute], (int,float,list)): real_item[attribute] += utils.parse_value(value_update['Value']) elif isinstance(real_item[attribute], list): if hasattr(value_update['Value'], '__iter__'): real_item[attribute] += [utils.parse_value(v) for v in value_update['Value']] else: real_item[attribute].append(utils.parse_value(value_update['Value'])) else: real_item[attribute].append(utils.parse_value(value_update['Value'])) else: real_item[attribute] = utils.parse_value(value_update['Value']) elif value_update['Action'] == "PUT": real_item[attribute] = utils.parse_value(value_update['Value']) elif value_update['Action'] == "DELETE": if attribute in real_item: del real_item[attribute] else: raise ValueError(value_update['Action']) # write to the db self._put_item(args['TableName'], utils.format_item(real_item)) txn.commit() self.connection = None if args.get('ReturnValues', 'NONE') == 'NONE': return {} elif args['ReturnValues'] == 'ALL_NEW': return {'Attributes': utils.format_item(real_item)} elif args['ReturnValues'] == 'ALL_OLD': return {'Attributes': item}
def do_query(self, args): table_spec = self.tables[args['TableName']] sql_table = table_spec['sql_table'] key_spec = table_spec['key_spec'] table_def = table_spec['table_def'] unsupported_keys = set(args.keys()) - set([ 'Limit', 'TableName', 'HashKeyValue', 'ScanIndexForward', 'ConsistentRead', 'RangeKeyCondition', 'AttributesToGet', 'ExclusiveStartKey' ]) if unsupported_keys: raise NotImplementedError(unsupported_keys) scan_forward = args.get('ScanIndexForward', True) expr = sql_table.c.hash_key == utils.parse_value(args['HashKeyValue']) if 'RangeKeyCondition' in args: if len(key_spec) < 2: raise NotImplementedError operator = args['RangeKeyCondition']['ComparisonOperator'] if operator == 'BETWEEN': start = utils.parse_value( args['RangeKeyCondition']['AttributeValueList'][0]) end = utils.parse_value( args['RangeKeyCondition']['AttributeValueList'][1]) expr = sql.and_(expr, sql_table.c.range_key.between(start, end)) else: range_value = utils.parse_value( args['RangeKeyCondition']['AttributeValueList'][0]) if operator == 'GT': expr = sql.and_(expr, sql_table.c.range_key > range_value) elif operator == 'LT': expr = sql.and_(expr, sql_table.c.range_key < range_value) elif operator == 'GE': expr = sql.and_(expr, sql_table.c.range_key >= range_value) elif operator == 'LE': expr = sql.and_(expr, sql_table.c.range_key <= range_value) elif operator == 'EQ': expr = sql.and_(expr, sql_table.c.range_key == range_value) elif operator == 'BEGINS_WITH': expr = sql.and_( expr, sql_table.c.range_key.like('%s%%' % range_value)) else: raise NotImplementedError if 'ExclusiveStartKey' in args: range_key = utils.parse_value( args['ExclusiveStartKey']['RangeKeyElement']) if scan_forward: expr = sql.and_(expr, sql_table.c.range_key > range_key) else: expr = sql.and_(expr, self.table.c.range_key < range_key) q = sql.select([sql_table], expr) if 'Limit' in args: limit = min(DEFAULT_LIMIT, args['Limit']) else: limit = DEFAULT_LIMIT q = q.limit(limit + 1) if len(key_spec) > 1: if scan_forward: q = q.order_by(sql_table.c.range_key.asc()) else: q = q.order_by(sql_table.c.range_key.desc()) capacity_size = 0 out = {'Items': [], 'Count': 0} for res in self.engine.execute(q): if out['Count'] == limit: break capacity_size += (len(res[sql_table.c.content]) / 1024) + 1 item_data = json.loads(res[sql_table.c.content]) item = utils.parse_item(item_data) if 'AttributesToGet' in args: out_item = dict((col_name, item_data[col_name]) for col_name in args['AttributesToGet']) out['Items'].append(out_item) else: out['Items'].append(item_data) out['Count'] += 1 if len(key_spec) > 1: out['LastEvaluatedKey'] = utils.format_item({ 'HashKeyElement': res[sql_table.c.hash_key], 'RangeKeyElement': res[sql_table.c.range_key] }) else: # If we didn't break out of our loop, that means we're on the last page of our result set and should have a key # The docs say the last evaulated key should be 'null', but I'm suspicious that isn't the case. if 'LastEvaluatedKey' in out: #out['LastEvaluatedKey'] = None del out['LastEvaluatedKey'] self.table_read_op_capacity[args['TableName']] += capacity_size self.tables[args['TableName']]['read_counter'].record(capacity_size) out['Count'] = len(out['Items']) return out