def test_non_ascii_chars(self, dynamodb): aws_stack.create_dynamodb_table(TEST_DDB_TABLE_NAME, partition_key=PARTITION_KEY) table = dynamodb.Table(TEST_DDB_TABLE_NAME) # write some items containing non-ASCII characters items = { "id1": { PARTITION_KEY: "id1", "data": "foobar123 ✓" }, "id2": { PARTITION_KEY: "id2", "data": "foobar123 £" }, "id3": { PARTITION_KEY: "id3", "data": "foobar123 ¢" }, } for k, item in items.items(): table.put_item(Item=item) for item_id in items.keys(): item = table.get_item(Key={PARTITION_KEY: item_id})["Item"] # need to fix up the JSON and convert str to unicode for Python 2 item1 = json_safe(item) item2 = json_safe(items[item_id]) assert item1 == item2 # clean up delete_table(TEST_DDB_TABLE_NAME)
def test_stream_spec_and_region_replacement(self): aws_stack.create_dynamodb_table(TEST_DDB_TABLE_NAME_4, partition_key=PARTITION_KEY, stream_view_type='NEW_AND_OLD_IMAGES') table = self.dynamodb.Table(TEST_DDB_TABLE_NAME_4) # assert ARN formats expected_arn_prefix = 'arn:aws:dynamodb:' + aws_stack.get_local_region( ) self.assertTrue(table.table_arn.startswith(expected_arn_prefix)) self.assertTrue( table.latest_stream_arn.startswith(expected_arn_prefix)) # assert shard ID formats ddbstreams = aws_stack.connect_to_service('dynamodbstreams') result = ddbstreams.describe_stream( StreamArn=table.latest_stream_arn)['StreamDescription'] self.assertIn('Shards', result) for shard in result['Shards']: self.assertRegex(shard['ShardId'], r'^shardId\-[0-9]{20}\-[a-zA-Z0-9]{1,36}$') # clean up delete_table(TEST_DDB_TABLE_NAME_4)
def test_return_values_in_put_item(self, dynamodb): aws_stack.create_dynamodb_table(TEST_DDB_TABLE_NAME, partition_key=PARTITION_KEY) table = dynamodb.Table(TEST_DDB_TABLE_NAME) # items which are being used to put in the table item1 = {PARTITION_KEY: "id1", "data": "foobar"} item2 = {PARTITION_KEY: "id2", "data": "foobar"} response = table.put_item(Item=item1, ReturnValues="ALL_OLD") # there is no data present in the table already so even if return values # is set to 'ALL_OLD' as there is no data it will not return any data. assert not response.get("Attributes") # now the same data is present so when we pass return values as 'ALL_OLD' # it should give us attributes response = table.put_item(Item=item1, ReturnValues="ALL_OLD") assert response.get("Attributes") assert item1.get("id") == response.get("Attributes").get("id") assert item1.get("data") == response.get("Attributes").get("data") response = table.put_item(Item=item2) # we do not have any same item as item2 already so when we add this by default # return values is set to None so no Attribute values should be returned assert not response.get("Attributes") response = table.put_item(Item=item2) # in this case we already have item2 in the table so on this request # it should not return any data as return values is set to None so no # Attribute values should be returned assert not response.get("Attributes")
def test_multiple_update_expressions(self): dynamodb = aws_stack.connect_to_service('dynamodb') aws_stack.create_dynamodb_table(TEST_DDB_TABLE_NAME, partition_key=PARTITION_KEY) table = self.dynamodb.Table(TEST_DDB_TABLE_NAME) item_id = short_uid() table.put_item(Item={PARTITION_KEY: item_id, 'data': 'foobar123 ✓'}) response = dynamodb.update_item( TableName=TEST_DDB_TABLE_NAME, Key={PARTITION_KEY: { 'S': item_id }}, UpdateExpression='SET attr1 = :v1, attr2 = :v2', ExpressionAttributeValues={ ':v1': { 'S': 'value1' }, ':v2': { 'S': 'value2' } }) self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200) item = table.get_item(Key={PARTITION_KEY: item_id})['Item'] self.assertEqual(item['attr1'], 'value1') self.assertEqual(item['attr2'], 'value2')
def test_global_tables(self): aws_stack.create_dynamodb_table(TEST_DDB_TABLE_NAME, partition_key=PARTITION_KEY) dynamodb = aws_stack.connect_to_service('dynamodb') # create global table regions = [{'RegionName': 'us-east-1'}, {'RegionName': 'us-west-1'}, {'RegionName': 'eu-central-1'}] response = dynamodb.create_global_table(GlobalTableName=TEST_DDB_TABLE_NAME, ReplicationGroup=regions)['GlobalTableDescription'] self.assertIn('ReplicationGroup', response) self.assertEqual(len(regions), len(response['ReplicationGroup'])) # describe global table response = dynamodb.describe_global_table(GlobalTableName=TEST_DDB_TABLE_NAME)['GlobalTableDescription'] self.assertIn('ReplicationGroup', response) self.assertEqual(len(regions), len(response['ReplicationGroup'])) # update global table updates = [ {'Create': {'RegionName': 'us-east-2'}}, {'Create': {'RegionName': 'us-west-2'}}, {'Delete': {'RegionName': 'us-west-1'}} ] response = dynamodb.update_global_table(GlobalTableName=TEST_DDB_TABLE_NAME, ReplicaUpdates=updates)['GlobalTableDescription'] self.assertIn('ReplicationGroup', response) self.assertEqual(len(regions) + 1, len(response['ReplicationGroup'])) # assert exceptions for invalid requests with self.assertRaises(Exception) as ctx: dynamodb.create_global_table(GlobalTableName=TEST_DDB_TABLE_NAME, ReplicationGroup=regions) self.assertIn('GlobalTableAlreadyExistsException', str(ctx.exception)) with self.assertRaises(Exception) as ctx: dynamodb.describe_global_table(GlobalTableName='invalid-table-name') self.assertIn('GlobalTableNotFoundException', str(ctx.exception))
def test_query_on_deleted_resource(self): table_name = 'ddb-table-%s' % short_uid() partition_key = 'username' dynamodb = aws_stack.connect_to_service('dynamodb') aws_stack.create_dynamodb_table(table_name, partition_key) rs = dynamodb.query( TableName=table_name, KeyConditionExpression='{} = :username'.format(partition_key), ExpressionAttributeValues={':username': { 'S': 'test' }}) self.assertEqual(rs['ResponseMetadata']['HTTPStatusCode'], 200) dynamodb.delete_table(TableName=table_name) with self.assertRaises(Exception) as ctx: dynamodb.query( TableName=table_name, KeyConditionExpression='{} = :username'.format(partition_key), ExpressionAttributeValues={':username': { 'S': 'test' }}) self.assertIn('ResourceNotFoundException', str(ctx.exception))
def forward_to_fallback_url(func_arn, data): """ If LAMBDA_FALLBACK_URL is configured, forward the invocation of this non-existing Lambda to the configured URL. """ if not config.LAMBDA_FALLBACK_URL: return None if config.LAMBDA_FALLBACK_URL.startswith('dynamodb://'): table_name = urlparse( config.LAMBDA_FALLBACK_URL.replace('dynamodb://', 'http://')).netloc dynamodb = aws_stack.connect_to_service('dynamodb') item = { 'id': { 'S': short_uid() }, 'timestamp': { 'N': str(now_utc()) }, 'payload': { 'S': str(data) } } aws_stack.create_dynamodb_table(table_name, partition_key='id') dynamodb.put_item(TableName=table_name, Item=item) return '' if re.match(r'^https?://.+', config.LAMBDA_FALLBACK_URL): response = safe_requests.post(config.LAMBDA_FALLBACK_URL, data) return response.content raise ClientError('Unexpected value for LAMBDA_FALLBACK_URL: %s' % config.LAMBDA_FALLBACK_URL)
def test_query_on_deleted_resource(self): table_name = "ddb-table-%s" % short_uid() partition_key = "username" dynamodb = aws_stack.connect_to_service("dynamodb") aws_stack.create_dynamodb_table(table_name, partition_key) rs = dynamodb.query( TableName=table_name, KeyConditionExpression="{} = :username".format(partition_key), ExpressionAttributeValues={":username": { "S": "test" }}, ) self.assertEqual(200, rs["ResponseMetadata"]["HTTPStatusCode"]) dynamodb.delete_table(TableName=table_name) with self.assertRaises(Exception) as ctx: dynamodb.query( TableName=table_name, KeyConditionExpression="{} = :username".format(partition_key), ExpressionAttributeValues={":username": { "S": "test" }}, ) self.assertIn("ResourceNotFoundException", str(ctx.exception))
def test_stream_spec_and_region_replacement(self): ddbstreams = aws_stack.connect_to_service('dynamodbstreams') kinesis = aws_stack.connect_to_service('kinesis') table_name = 'ddb-%s' % short_uid() aws_stack.create_dynamodb_table( table_name, partition_key=PARTITION_KEY, stream_view_type='NEW_AND_OLD_IMAGES') table = self.dynamodb.Table(table_name) # assert ARN formats expected_arn_prefix = 'arn:aws:dynamodb:' + aws_stack.get_local_region() self.assertTrue(table.table_arn.startswith(expected_arn_prefix)) self.assertTrue(table.latest_stream_arn.startswith(expected_arn_prefix)) # assert stream has been created stream_tables = [s['TableName'] for s in ddbstreams.list_streams()['Streams']] self.assertIn(table_name, stream_tables) stream_name = get_kinesis_stream_name(table_name) self.assertIn(stream_name, kinesis.list_streams()['StreamNames']) # assert shard ID formats result = ddbstreams.describe_stream(StreamArn=table.latest_stream_arn)['StreamDescription'] self.assertIn('Shards', result) for shard in result['Shards']: self.assertRegex(shard['ShardId'], r'^shardId\-[0-9]{20}\-[a-zA-Z0-9]{1,36}$') # clean up delete_table(table_name) # assert stream has been deleted stream_tables = [s['TableName'] for s in ddbstreams.list_streams()['Streams']] self.assertNotIn(table_name, stream_tables) self.assertNotIn(stream_name, kinesis.list_streams()['StreamNames'])
def test_dynamodb_error_injection(): if not do_run(): return dynamodb = aws_stack.connect_to_resource('dynamodb') # create table with stream forwarding config aws_stack.create_dynamodb_table(TEST_TABLE_NAME, partition_key=PARTITION_KEY) table = dynamodb.Table(TEST_TABLE_NAME) # by default, no errors test_no_errors = table.put_item(Item={ PARTITION_KEY: short_uid(), 'data': 'foobar123' }) assert_equal(test_no_errors['ResponseMetadata']['HTTPStatusCode'], 200) # with a probability of 1, always throw errors config.DYNAMODB_ERROR_PROBABILITY = 1.0 assert_raises(ClientError, table.put_item, Item={ PARTITION_KEY: short_uid(), 'data': 'foobar123' }) # reset probability to zero config.DYNAMODB_ERROR_PROBABILITY = 0.0
def test_non_ascii_chars(self): aws_stack.create_dynamodb_table(TEST_DDB_TABLE_NAME, partition_key=PARTITION_KEY) table = self.dynamodb.Table(TEST_DDB_TABLE_NAME) # write some items containing non-ASCII characters items = { 'id1': { PARTITION_KEY: 'id1', 'data': 'foobar123 ✓' }, 'id2': { PARTITION_KEY: 'id2', 'data': 'foobar123 £' }, 'id3': { PARTITION_KEY: 'id3', 'data': 'foobar123 ¢' } } for k, item in items.items(): table.put_item(Item=item) for item_id in items.keys(): item = table.get_item(Key={PARTITION_KEY: item_id})['Item'] # need to fix up the JSON and convert str to unicode for Python 2 item1 = json_safe(item) item2 = json_safe(items[item_id]) self.assertEqual(item1, item2) # clean up delete_table(TEST_DDB_TABLE_NAME)
def test_return_values_in_put_item(self): aws_stack.create_dynamodb_table(TEST_DDB_TABLE_NAME, partition_key=PARTITION_KEY) table = self.dynamodb.Table(TEST_DDB_TABLE_NAME) # items which are being used to put in the table item1 = {PARTITION_KEY: 'id1', 'data': 'foobar'} item2 = {PARTITION_KEY: 'id2', 'data': 'foobar'} response = table.put_item(Item=item1, ReturnValues='ALL_OLD') # there is no data present in the table already so even if return values # is set to 'ALL_OLD' as there is no data it will not return any data. self.assertFalse(response.get('Attributes')) # now the same data is present so when we pass return values as 'ALL_OLD' # it should give us attributes response = table.put_item(Item=item1, ReturnValues='ALL_OLD') self.assertTrue(response.get('Attributes')) self.assertEqual(response.get('Attributes').get('id'), item1.get('id')) self.assertEqual(response.get('Attributes').get('data'), item1.get('data')) response = table.put_item(Item=item2) # we do not have any same item as item2 already so when we add this by default # return values is set to None so no Attribute values should be returned self.assertFalse(response.get('Attributes')) response = table.put_item(Item=item2) # in this case we already have item2 in the table so on this request # it should not return any data as return values is set to None so no # Attribute values should be returned self.assertFalse(response.get('Attributes'))
def test_query_on_deleted_resource(self): table_name = "ddb-table-%s" % short_uid() partition_key = "username" dynamodb = aws_stack.create_external_boto_client("dynamodb") aws_stack.create_dynamodb_table(table_name, partition_key) rs = dynamodb.query( TableName=table_name, KeyConditionExpression="{} = :username".format(partition_key), ExpressionAttributeValues={":username": { "S": "test" }}, ) assert rs["ResponseMetadata"]["HTTPStatusCode"] == 200 dynamodb.delete_table(TableName=table_name) with pytest.raises(Exception) as ctx: dynamodb.query( TableName=table_name, KeyConditionExpression="{} = :username".format(partition_key), ExpressionAttributeValues={":username": { "S": "test" }}, ) assert ctx.match("ResourceNotFoundException")
def test_multiple_update_expressions(self): dynamodb = aws_stack.connect_to_service('dynamodb') aws_stack.create_dynamodb_table(TEST_DDB_TABLE_NAME, partition_key=PARTITION_KEY) table = self.dynamodb.Table(TEST_DDB_TABLE_NAME) item_id = short_uid() table.put_item(Item={PARTITION_KEY: item_id, 'data': 'foobar123 ✓'}) response = dynamodb.update_item( TableName=TEST_DDB_TABLE_NAME, Key={PARTITION_KEY: { 'S': item_id }}, UpdateExpression='SET attr1 = :v1, attr2 = :v2', ExpressionAttributeValues={ ':v1': { 'S': 'value1' }, ':v2': { 'S': 'value2' } }) self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200) item = table.get_item(Key={PARTITION_KEY: item_id})['Item'] self.assertEqual(item['attr1'], 'value1') self.assertEqual(item['attr2'], 'value2') attributes = [{'AttributeName': 'id', 'AttributeType': STRING}] user_id_idx = [ { 'Create': { 'IndexName': 'id-index', 'KeySchema': [{ 'AttributeName': 'id', 'KeyType': 'HASH' }], 'Projection': { 'ProjectionType': 'INCLUDE', 'NonKeyAttributes': ['data'] }, 'ProvisionedThroughput': { 'ReadCapacityUnits': 5, 'WriteCapacityUnits': 5 } } }, ] # for each index table.update(AttributeDefinitions=attributes, GlobalSecondaryIndexUpdates=user_id_idx) with self.assertRaises(Exception) as ctx: table.query(TableName=TEST_DDB_TABLE_NAME, IndexName='id-index', KeyConditionExpression=Key(PARTITION_KEY).eq(item_id), Select='ALL_ATTRIBUTES') self.assertIn('ValidationException', str(ctx.exception))
def get_dynamodb_table(self): # set max_attempts=1 to speed up the test execution dynamodb = aws_stack.connect_to_resource("dynamodb", config=self.retry_config()) table_name = f"table-{short_uid()}" aws_stack.create_dynamodb_table(table_name, partition_key=PARTITION_KEY) return dynamodb.Table(table_name)
def test_dynamodb_stream_records_with_update_item(self): table_name = 'test-ddb-table-%s' % short_uid() dynamodb = aws_stack.connect_to_service('dynamodb') ddbstreams = aws_stack.connect_to_service('dynamodbstreams') aws_stack.create_dynamodb_table( table_name, partition_key=PARTITION_KEY, stream_view_type='NEW_AND_OLD_IMAGES' ) table = self.dynamodb.Table(table_name) response = ddbstreams.describe_stream(StreamArn=table.latest_stream_arn) self.assertEqual(200, response['ResponseMetadata']['HTTPStatusCode']) self.assertEqual(1, len(response['StreamDescription']['Shards'])) shard_id = response['StreamDescription']['Shards'][0]['ShardId'] starting_sequence_number = int(response['StreamDescription']['Shards'][0] .get('SequenceNumberRange').get('StartingSequenceNumber')) response = ddbstreams.get_shard_iterator( StreamArn=table.latest_stream_arn, ShardId=shard_id, ShardIteratorType='LATEST' ) self.assertEqual(200, response['ResponseMetadata']['HTTPStatusCode']) self.assertIn('ShardIterator', response) iterator_id = response['ShardIterator'] item_id = short_uid() for _ in range(2): dynamodb.update_item( TableName=table_name, Key={PARTITION_KEY: {'S': item_id}}, UpdateExpression='SET attr1 = :v1, attr2 = :v2', ExpressionAttributeValues={ ':v1': {'S': 'value1'}, ':v2': {'S': 'value2'} }, ReturnValues='ALL_NEW', ReturnConsumedCapacity='INDEXES', ) records = ddbstreams.get_records(ShardIterator=iterator_id) self.assertEqual(200, records['ResponseMetadata']['HTTPStatusCode']) self.assertEqual(2, len(records['Records'])) self.assertTrue(isinstance(records['Records'][0]['dynamodb']['ApproximateCreationDateTime'], datetime)) self.assertEqual('1.1', records['Records'][0]['eventVersion']) self.assertEqual('INSERT', records['Records'][0]['eventName']) self.assertNotIn('OldImage', records['Records'][0]['dynamodb']) self.assertGreater(int(records['Records'][0]['dynamodb']['SequenceNumber']), starting_sequence_number) self.assertTrue(isinstance(records['Records'][1]['dynamodb']['ApproximateCreationDateTime'], datetime)) self.assertEqual('1.1', records['Records'][1]['eventVersion']) self.assertEqual('MODIFY', records['Records'][1]['eventName']) self.assertIn('OldImage', records['Records'][1]['dynamodb']) self.assertGreater(int(records['Records'][1]['dynamodb']['SequenceNumber']), starting_sequence_number) dynamodb.delete_table(TableName=table_name)
def test_create_object_put_via_dynamodb( self, s3_client, lambda_client, dynamodb_client, s3_create_bucket ): # TODO: inline lambda function bucket_name = s3_create_bucket() function_name = "func-%s" % short_uid() table_name = "table-%s" % short_uid() testutil.create_lambda_function( handler_file=TEST_LAMBDA_PYTHON_TRIGGERED_S3, func_name=function_name, runtime=LAMBDA_RUNTIME_PYTHON36, client=lambda_client, ) # this test uses dynamodb as an intermediary to get the notifications from the lambda back to the test aws_stack.create_dynamodb_table( table_name=table_name, partition_key="uuid", client=dynamodb_client ) try: s3_client.put_bucket_notification_configuration( Bucket=bucket_name, NotificationConfiguration={ "LambdaFunctionConfigurations": [ { "LambdaFunctionArn": aws_stack.lambda_function_arn(function_name), "Events": ["s3:ObjectCreated:*"], } ] }, ) # put an object obj = s3_client.put_object(Bucket=bucket_name, Key=table_name, Body="something..") etag = obj["ETag"] time.sleep(2) table = aws_stack.connect_to_resource("dynamodb").Table(table_name) def check_table(): rs = table.scan() assert len(rs["Items"]) == 1 return rs rs = retry(check_table, retries=4, sleep=3) event = rs["Items"][0]["data"] assert event["eventSource"] == "aws:s3" assert event["eventName"] == "ObjectCreated:Put" assert event["s3"]["bucket"]["name"] == bucket_name assert event["s3"]["object"]["eTag"] == etag finally: # clean up lambda_client.delete_function(FunctionName=function_name) dynamodb_client.delete_table(TableName=table_name)
def test_time_to_live(self): dynamodb = aws_stack.connect_to_resource('dynamodb') dynamodb_client = aws_stack.connect_to_service('dynamodb') aws_stack.create_dynamodb_table(TEST_DDB_TABLE_NAME_3, partition_key=PARTITION_KEY) table = dynamodb.Table(TEST_DDB_TABLE_NAME_3) # Insert some items to the table items = { 'id1': {PARTITION_KEY: 'id1', 'data': 'IT IS'}, 'id2': {PARTITION_KEY: 'id2', 'data': 'TIME'}, 'id3': {PARTITION_KEY: 'id3', 'data': 'TO LIVE!'} } for k, item in items.items(): table.put_item(Item=item) # Describe TTL when still unset. response = testutil.send_describe_dynamodb_ttl_request(TEST_DDB_TABLE_NAME_3) assert response.status_code == 200 assert json.loads(response._content)['TimeToLiveDescription']['TimeToLiveStatus'] == 'DISABLED' # Enable TTL for given table response = testutil.send_update_dynamodb_ttl_request(TEST_DDB_TABLE_NAME_3, True) assert response.status_code == 200 assert json.loads(response._content)['TimeToLiveSpecification']['Enabled'] is True # Describe TTL status after being enabled. response = testutil.send_describe_dynamodb_ttl_request(TEST_DDB_TABLE_NAME_3) assert response.status_code == 200 assert json.loads(response._content)['TimeToLiveDescription']['TimeToLiveStatus'] == 'ENABLED' # Disable TTL for given table response = testutil.send_update_dynamodb_ttl_request(TEST_DDB_TABLE_NAME_3, False) assert response.status_code == 200 assert json.loads(response._content)['TimeToLiveSpecification']['Enabled'] is False # Describe TTL status after being disabled. response = testutil.send_describe_dynamodb_ttl_request(TEST_DDB_TABLE_NAME_3) assert response.status_code == 200 assert json.loads(response._content)['TimeToLiveDescription']['TimeToLiveStatus'] == 'DISABLED' # Enable TTL for given table again response = testutil.send_update_dynamodb_ttl_request(TEST_DDB_TABLE_NAME_3, True) assert response.status_code == 200 assert json.loads(response._content)['TimeToLiveSpecification']['Enabled'] is True # Describe TTL status after being enabled again. response = testutil.send_describe_dynamodb_ttl_request(TEST_DDB_TABLE_NAME_3) assert response.status_code == 200 assert json.loads(response._content)['TimeToLiveDescription']['TimeToLiveStatus'] == 'ENABLED' # Clean up table dynamodb_client.delete_table(TableName=TEST_DDB_TABLE_NAME_3)
def test_region_replacement(self): dynamodb = aws_stack.connect_to_resource('dynamodb') aws_stack.create_dynamodb_table(TEST_DDB_TABLE_NAME_4, partition_key=PARTITION_KEY, stream_view_type='NEW_AND_OLD_IMAGES') table = dynamodb.Table(TEST_DDB_TABLE_NAME_4) expected_arn_prefix = 'arn:aws:dynamodb:' + aws_stack.get_local_region( ) assert table.table_arn.startswith(expected_arn_prefix) assert table.latest_stream_arn.startswith(expected_arn_prefix)
def test_empty_and_binary_values(self): aws_stack.create_dynamodb_table(TEST_DDB_TABLE_NAME, partition_key=PARTITION_KEY) table = self.dynamodb.Table(TEST_DDB_TABLE_NAME) # items which are being used to put in the table item1 = {PARTITION_KEY: 'id1', 'data': ''} item2 = {PARTITION_KEY: 'id2', 'data': b'foobar'} response = table.put_item(Item=item1) self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200) response = table.put_item(Item=item2) self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200)
def create_api_gateway_and_deploy(response_template, is_api_key_required=False): apigw_client = aws_stack.connect_to_service('apigateway') response = apigw_client.create_rest_api(name='my_api', description='this is my api') api_id = response['id'] resources = apigw_client.get_resources(restApiId=api_id) root_resources = [ resource for resource in resources['items'] if resource['path'] == '/' ] root_id = root_resources[0]['id'] apigw_client.put_method(restApiId=api_id, resourceId=root_id, httpMethod='PUT', authorizationType='NONE', apiKeyRequired=is_api_key_required) apigw_client.put_method_response( restApiId=api_id, resourceId=root_id, httpMethod='PUT', statusCode='200', ) aws_stack.create_dynamodb_table('MusicCollection', partition_key='id') # Ensure that it works fine when providing the integrationHttpMethod-argument apigw_client.put_integration( restApiId=api_id, resourceId=root_id, httpMethod='PUT', type='AWS_PROXY', uri= 'arn:aws:apigateway:us-east-1:dynamodb:action/PutItem&Table=MusicCollection', integrationHttpMethod='PUT', ) apigw_client.put_integration_response( restApiId=api_id, resourceId=root_id, httpMethod='PUT', statusCode='200', selectionPattern='', responseTemplates=response_template) apigw_client.create_deployment(restApiId=api_id, stageName='staging') return api_id
def test_empty_and_binary_values(self): aws_stack.create_dynamodb_table(TEST_DDB_TABLE_NAME, partition_key=PARTITION_KEY) table = self.dynamodb.Table(TEST_DDB_TABLE_NAME) # items which are being used to put in the table item1 = {PARTITION_KEY: "id1", "data": ""} item2 = {PARTITION_KEY: "id2", "data": b"foobar"} response = table.put_item(Item=item1) self.assertEqual(200, response["ResponseMetadata"]["HTTPStatusCode"]) response = table.put_item(Item=item2) self.assertEqual(200, response["ResponseMetadata"]["HTTPStatusCode"])
def test_time_to_live(self): aws_stack.create_dynamodb_table(TEST_DDB_TABLE_NAME_3, partition_key=PARTITION_KEY) table = self.dynamodb.Table(TEST_DDB_TABLE_NAME_3) # Insert some items to the table items = { 'id1': {PARTITION_KEY: 'id1', 'data': 'IT IS'}, 'id2': {PARTITION_KEY: 'id2', 'data': 'TIME'}, 'id3': {PARTITION_KEY: 'id3', 'data': 'TO LIVE!'} } for k, item in items.items(): table.put_item(Item=item) # Describe TTL when still unset. response = testutil.send_describe_dynamodb_ttl_request(TEST_DDB_TABLE_NAME_3) self.assertEqual(response.status_code, 200) self.assertEqual(json.loads(response._content)['TimeToLiveDescription']['TimeToLiveStatus'], 'DISABLED') # Enable TTL for given table response = testutil.send_update_dynamodb_ttl_request(TEST_DDB_TABLE_NAME_3, True) self.assertEqual(response.status_code, 200) self.assertTrue(json.loads(response._content)['TimeToLiveSpecification']['Enabled']) # Describe TTL status after being enabled. response = testutil.send_describe_dynamodb_ttl_request(TEST_DDB_TABLE_NAME_3) self.assertEqual(response.status_code, 200) self.assertEqual(json.loads(response._content)['TimeToLiveDescription']['TimeToLiveStatus'], 'ENABLED') # Disable TTL for given table response = testutil.send_update_dynamodb_ttl_request(TEST_DDB_TABLE_NAME_3, False) self.assertEqual(response.status_code, 200) self.assertFalse(json.loads(response._content)['TimeToLiveSpecification']['Enabled']) # Describe TTL status after being disabled. response = testutil.send_describe_dynamodb_ttl_request(TEST_DDB_TABLE_NAME_3) self.assertEqual(response.status_code, 200) self.assertEqual(json.loads(response._content)['TimeToLiveDescription']['TimeToLiveStatus'], 'DISABLED') # Enable TTL for given table again response = testutil.send_update_dynamodb_ttl_request(TEST_DDB_TABLE_NAME_3, True) self.assertEqual(response.status_code, 200) self.assertTrue(json.loads(response._content)['TimeToLiveSpecification']['Enabled']) # Describe TTL status after being enabled again. response = testutil.send_describe_dynamodb_ttl_request(TEST_DDB_TABLE_NAME_3) self.assertEqual(response.status_code, 200) self.assertEqual(json.loads(response._content)['TimeToLiveDescription']['TimeToLiveStatus'], 'ENABLED') # clean up delete_table(TEST_DDB_TABLE_NAME_3)
def test_dynamodb_stream_to_lambda(self): table_name = "ddb-table-%s" % short_uid() function_name = "func-%s" % short_uid() partition_key = "SK" aws_stack.create_dynamodb_table( table_name=table_name, partition_key=partition_key, stream_view_type="NEW_AND_OLD_IMAGES", ) table = self.dynamodb.Table(table_name) latest_stream_arn = table.latest_stream_arn testutil.create_lambda_function( handler_file=TEST_LAMBDA_PYTHON_ECHO, func_name=function_name, runtime=LAMBDA_RUNTIME_PYTHON36, ) lambda_client = aws_stack.create_external_boto_client("lambda") lambda_client.create_event_source_mapping( EventSourceArn=latest_stream_arn, FunctionName=function_name) item = {"SK": short_uid(), "Name": "name-{}".format(short_uid())} table.put_item(Item=item) events = retry( check_expected_lambda_log_events_length, retries=3, sleep=1, function_name=function_name, expected_length=1, regex_filter=r"Records", ) self.assertEqual(1, len(events)) self.assertEqual(1, len(events[0]["Records"])) dynamodb_event = events[0]["Records"][0]["dynamodb"] self.assertEqual("NEW_AND_OLD_IMAGES", dynamodb_event["StreamViewType"]) self.assertEqual({"SK": {"S": item["SK"]}}, dynamodb_event["Keys"]) self.assertEqual({"S": item["Name"]}, dynamodb_event["NewImage"]["Name"]) self.assertIn("SequenceNumber", dynamodb_event) dynamodb = aws_stack.create_external_boto_client("dynamodb") dynamodb.delete_table(TableName=table_name)
def test_deletion_event_source_mapping_with_dynamodb(self): function_name = "lambda_func-{}".format(short_uid()) ddb_table = "ddb_table-{}".format(short_uid()) testutil.create_lambda_function( handler_file=TEST_LAMBDA_PYTHON_ECHO, func_name=function_name, runtime=LAMBDA_RUNTIME_PYTHON36, ) table_arn = aws_stack.create_dynamodb_table( ddb_table, partition_key="id")["TableDescription"]["TableArn"] lambda_client = aws_stack.create_external_boto_client("lambda") lambda_client.create_event_source_mapping(FunctionName=function_name, EventSourceArn=table_arn) dynamodb_client = aws_stack.create_external_boto_client("dynamodb") dynamodb_client.delete_table(TableName=ddb_table) result = lambda_client.list_event_source_mappings( EventSourceArn=table_arn) self.assertEqual(0, len(result["EventSourceMappings"])) # clean up lambda_client.delete_function(FunctionName=function_name)
def test_large_data_download(self): aws_stack.create_dynamodb_table(TEST_DDB_TABLE_NAME_2, partition_key=PARTITION_KEY) table = self.dynamodb.Table(TEST_DDB_TABLE_NAME_2) # Create a large amount of items num_items = 20 for i in range(0, num_items): item = {PARTITION_KEY: 'id%s' % i, 'data1': 'foobar123 ' * 1000} table.put_item(Item=item) # Retrieve the items. The data will be transmitted to the client with chunked transfer encoding result = table.scan(TableName=TEST_DDB_TABLE_NAME_2) self.assertEqual(len(result['Items']), num_items) # clean up delete_table(TEST_DDB_TABLE_NAME_2)
def test_deletion_event_source_mapping_with_dynamodb( self, create_lambda_function, lambda_client ): function_name = f"lambda_func-{short_uid()}" ddb_table = f"ddb_table-{short_uid()}" create_lambda_function( func_name=function_name, handler_file=TEST_LAMBDA_PYTHON_ECHO, runtime=LAMBDA_RUNTIME_PYTHON36, ) table_arn = aws_stack.create_dynamodb_table(ddb_table, partition_key="id")[ "TableDescription" ]["TableArn"] lambda_client.create_event_source_mapping( FunctionName=function_name, EventSourceArn=table_arn ) dynamodb_client = aws_stack.create_external_boto_client("dynamodb") dynamodb_client.delete_table(TableName=ddb_table) result = lambda_client.list_event_source_mappings(EventSourceArn=table_arn) assert 0 == len(result["EventSourceMappings"])
def test_deletion_event_source_mapping_with_dynamodb( self, create_lambda_function, lambda_client, dynamodb_client, lambda_su_role): try: function_name = f"lambda_func-{short_uid()}" ddb_table = f"ddb_table-{short_uid()}" create_lambda_function( func_name=function_name, handler_file=TEST_LAMBDA_PYTHON_ECHO, runtime=LAMBDA_RUNTIME_PYTHON36, role=lambda_su_role, ) latest_stream_arn = aws_stack.create_dynamodb_table( table_name=ddb_table, partition_key="id", client=dynamodb_client, stream_view_type="NEW_IMAGE", )["TableDescription"]["LatestStreamArn"] result = lambda_client.create_event_source_mapping( FunctionName=function_name, EventSourceArn=latest_stream_arn, StartingPosition="TRIM_HORIZON", ) event_source_mapping_uuid = result["UUID"] _await_dynamodb_table_active(dynamodb_client, ddb_table) dynamodb_client.delete_table(TableName=ddb_table) result = lambda_client.list_event_source_mappings( EventSourceArn=latest_stream_arn) assert 1 == len(result["EventSourceMappings"]) finally: lambda_client.delete_event_source_mapping( UUID=event_source_mapping_uuid)
def test_region_replacement(self): aws_stack.create_dynamodb_table( TEST_DDB_TABLE_NAME_4, partition_key=PARTITION_KEY, stream_view_type='NEW_AND_OLD_IMAGES' ) table = self.dynamodb.Table(TEST_DDB_TABLE_NAME_4) expected_arn_prefix = 'arn:aws:dynamodb:' + aws_stack.get_local_region() self.assertTrue(table.table_arn.startswith(expected_arn_prefix)) self.assertTrue(table.latest_stream_arn.startswith(expected_arn_prefix)) # clean up delete_table(TEST_DDB_TABLE_NAME_4)
def test_large_data_download(self, dynamodb): aws_stack.create_dynamodb_table(TEST_DDB_TABLE_NAME_2, partition_key=PARTITION_KEY) table = dynamodb.Table(TEST_DDB_TABLE_NAME_2) # Create a large amount of items num_items = 20 for i in range(0, num_items): item = {PARTITION_KEY: "id%s" % i, "data1": "foobar123 " * 1000} table.put_item(Item=item) # Retrieve the items. The data will be transmitted to the client with chunked transfer encoding result = table.scan(TableName=TEST_DDB_TABLE_NAME_2) assert len(result["Items"]) == num_items # clean up delete_table(TEST_DDB_TABLE_NAME_2)