def write_batch(self, batch): """ Batch processor. Dispatches call to appropriate :py:class:`ddbmock.database.table.Table` methods. This is the only low_level API that directly pushes throughput usage. :param batch: raw DynamoDB request batch. :returns: dict compatible with DynamoDB API :raises: :py:exc:`ddbmock.errors.ValidationException` if a ``range_key`` was provided while table has none. :raises: :py:exc:`ddbmock.errors.ResourceNotFoundException` if a table does not exist. """ ret = defaultdict(dict) for tablename, operations in batch.iteritems(): table = self.get_table(tablename) units = ItemSize(0) for operation in operations: if u'PutRequest' in operation: old, new = table.put(operation[u'PutRequest'][u'Item'], {}) units += max(old.get_size().as_units(), new.get_size().as_units()) if u'DeleteRequest' in operation: old = table.delete_item(operation[u'DeleteRequest'][u'Key'], {}) units += old.get_size().as_units() push_write_throughput(tablename, units) ret[tablename][u'ConsumedCapacityUnits'] = units return ret
def scan(post, table): if post[u'AttributesToGet'] and post[u'Count']: raise ValidationException( "Can not filter fields when only count is requested") results = table.scan( post[u'ScanFilter'], post[u'AttributesToGet'], post[u'ExclusiveStartKey'], post[u'Limit'], ) capacity = 0.5 * results.size.as_units() push_write_throughput(table.name, capacity) ret = { "Count": len(results.items), "ScannedCount": results.scanned, "ConsumedCapacityUnits": capacity, } if results.last_key: ret['LastEvaluatedKey'] = results.last_key if not post[u'Count']: ret[u'Items'] = results.items return ret
def scan(post, table): if post[u'AttributesToGet'] and post[u'Count']: raise ValidationException("Can not filter fields when only count is requested") results = table.scan( post[u'ScanFilter'], post[u'AttributesToGet'], post[u'ExclusiveStartKey'], post[u'Limit'], ) capacity = 0.5*results.size.as_units() push_write_throughput(table.name, capacity) ret = { "Count": len(results.items), "ScannedCount": results.scanned, "ConsumedCapacityUnits": capacity, } if results.last_key: ret['LastEvaluatedKey'] = results.last_key if not post[u'Count']: ret[u'Items'] = results.items return ret
def query(post, table): if post[u'AttributesToGet'] and post[u'Count']: raise ValidationException( "Can filter fields when only count is requested") base_capacity = 1 if post[u'ConsistentRead'] else 0.5 results = table.query( post[u'HashKeyValue'], post[u'RangeKeyCondition'], post[u'AttributesToGet'], post[u'ExclusiveStartKey'], not post[u'ScanIndexForward'], post[u'Limit'], ) capacity = base_capacity * results.size.as_units() push_write_throughput(table.name, capacity) ret = { "Count": len(results.items), "ConsumedCapacityUnits": capacity, } if results.last_key is not None: ret['LastEvaluatedKey'] = results.last_key if not post[u'Count']: ret[u'Items'] = results.items return ret
def query(post, table): if post[u'AttributesToGet'] and post[u'Count']: raise ValidationException("Can filter fields when only count is requested") base_capacity = 1 if post[u'ConsistentRead'] else 0.5 results = table.query( post[u'HashKeyValue'], post[u'RangeKeyCondition'], post[u'AttributesToGet'], post[u'ExclusiveStartKey'], not post[u'ScanIndexForward'], post[u'Limit'], ) capacity = base_capacity*results.size.as_units() push_write_throughput(table.name, capacity) ret = { "Count": len(results.items), "ConsumedCapacityUnits": capacity, } if results.last_key is not None: ret['LastEvaluatedKey'] = results.last_key if not post[u'Count']: ret[u'Items'] = results.items return ret
def delete_item(post, table): item = table.delete_item(post[u"Key"], post[u"Expected"]) capacity = item.get_size().as_units() push_write_throughput(table.name, capacity) if post[u"ReturnValues"] == "ALL_OLD": return {"ConsumedCapacityUnits": capacity, "Attributes": item} else: return {"ConsumedCapacityUnits": capacity}
def put_item(post, table): old, new = table.put(post[u'Item'], post[u'Expected']) capacity = max(old.get_size().as_units(), new.get_size().as_units()) push_write_throughput(table.name, capacity) ret = { "ConsumedCapacityUnits": capacity, } if post[u'ReturnValues'] == "ALL_OLD": ret["Attributes"] = old return ret
def write_batch(self, batch): ret = defaultdict(dict) for tablename, operations in batch.iteritems(): table = self.get_table(tablename) units = ItemSize(0) for operation in operations: if u'PutRequest' in operation: old, new = table.put(operation[u'PutRequest'][u'Item'], {}) units += max(old.get_size().as_units(), new.get_size().as_units()) if u'DeleteRequest' in operation: old = table.delete_item(operation[u'DeleteRequest'][u'Key'], {}) units += old.get_size().as_units() push_write_throughput(tablename, units) ret[tablename][u'ConsumedCapacityUnits'] = units return ret
def update_item(post, table): old, new = table.update_item( post[u'Key'], post[u'AttributeUpdates'], post[u'Expected'], ) capacity = max(old.get_size().as_units(), new.get_size().as_units()) push_write_throughput(table.name, capacity) ret = {"ConsumedCapacityUnits": capacity} if post[u'ReturnValues'] == "ALL_OLD": ret["Attributes"] = old elif post[u'ReturnValues'] == "ALL_NEW": ret["Attributes"] = new elif post[u'ReturnValues'] == "UPDATED_OLD": ret["Attributes"] = old - new elif post[u'ReturnValues'] == "UPDATED_NEW": ret["Attributes"] = new - old return ret