def test_get_unexisting_keys(self): self.connection(_lv.BatchWriteItem().table("Aaa").put( { "h": "1", "a": "xxx" }, { "h": "2", "a": "yyy" }, )) r = self.connection(_lv.BatchGetItem().table("Aaa").keys({"h": "1"}, {"h": "2"}, {"h": "3"})) self.assertEqual(sorted(r.responses["Aaa"], key=lambda i: i["h"]), [{ "h": "1", "a": "xxx" }, { "h": "2", "a": "yyy" }]) self.assertEqual(r.unprocessed_keys, {})
def test(self): self.connection.expect._call_.withArguments( self.ActionChecker( "Query", { "TableName": "Table", "KeyConditions": {"h": {"ComparisonOperator": "EQ", "AttributeValueList": [{"N": "0"}]}} } ) ).andReturn( _lv.QueryResponse( Items=[{"h": {"N": "0"}, "r": {"S": "foo"}}, {"h": {"N": "0"}, "r": {"S": "bar"}}], LastEvaluatedKey={"h": {"N": "0"}, "r": {"S": "bar"}}, ) ) self.connection.expect._call_.withArguments( self.ActionChecker( "Query", { "TableName": "Table", "KeyConditions": {"h": {"ComparisonOperator": "EQ", "AttributeValueList": [{"N": "0"}]}}, "ExclusiveStartKey": {"h": {"N": "0"}, "r": {"S": "bar"}}, } ) ).andReturn( _lv.QueryResponse( Items=[{"h": {"N": "0"}, "r": {"S": "baz"}}], ) ) self.assertEqual( list(iterate_query(self.connection.object, _lv.Query("Table").key_eq("h", 0))), [{'h': 0, 'r': 'foo'}, {'h': 0, 'r': 'bar'}, {'h': 0, 'r': 'baz'}] )
def test_simple_batch_get(self): self.connection(_lv.BatchWriteItem().table("Aaa").put( { "h": "1", "a": "xxx" }, { "h": "2", "a": "yyy" }, { "h": "3", "a": "zzz" }, )) r = self.connection(_lv.BatchGetItem().table("Aaa").keys({"h": "1"}, {"h": "2"}, {"h": "3"})) self.assertEqual(list(r.responses.keys()), ["Aaa"]) self.assertEqual(sorted(r.responses["Aaa"], key=lambda i: i["h"]), [{ "h": "1", "a": "xxx" }, { "h": "2", "a": "yyy" }, { "h": "3", "a": "zzz" }])
def batch_delete_item(connection, table, *keys): """ Make as many :class:`.BatchWriteItem` actions as needed to delete all specified keys. Including processing :attr:`.BatchWriteItemResponse.unprocessed_items`. >>> batch_delete_item( ... connection, ... table, ... {"h": 0}, ... {"h": 1}, ... {"h": 2} ... ) """ keys = list(keys) unprocessed_items = [] while len(keys) != 0: r = connection(_lv.BatchWriteItem().table(table).delete(keys[:25])) keys = keys[25:] if isinstance(r.unprocessed_items, dict) and table in r.unprocessed_items: unprocessed_items.extend(r.unprocessed_items[table]) while len(unprocessed_items) != 0: r = connection(_lv.BatchWriteItem().previous_unprocessed_items({table: unprocessed_items[:25]})) unprocessed_items = unprocessed_items[25:] if isinstance(r.unprocessed_items, dict) and table in r.unprocessed_items: unprocessed_items.extend(r.unprocessed_items[table])
def test_simple_batch_put(self): r = self.connection(_lv.BatchWriteItem().table("Aaa").put( { "h": "1", "a": "xxx" }, { "h": "2", "a": "yyy" }, { "h": "3", "a": "zzz" }, )) self.assertEqual( self.connection(_lv.GetItem("Aaa", {"h": "1"})).item, { "h": "1", "a": "xxx" }) self.assertEqual( self.connection(_lv.GetItem("Aaa", {"h": "2"})).item, { "h": "2", "a": "yyy" }) self.assertEqual( self.connection(_lv.GetItem("Aaa", {"h": "3"})).item, { "h": "3", "a": "zzz" })
def test_simple_delete(self): self.connection(_lv.PutItem("Aaa", {"h": "simple", "a": "yyy"})) self.connection(_lv.DeleteItem("Aaa", {"h": "simple"})) self.assertEqual( self.connection(_lv.GetItem("Aaa", {"h": "simple"})).item, None)
def test_get_with_unprocessed_keys(self): _lv.batch_put_item(self.connection, "Aaa", [{"h": unicode(i), "xs": "x" * 300000} for i in range(100)]) # 300kB items ensure a single BatchGetItem will return at most 55 items r1 = self.connection(_lv.BatchGetItem().table("Aaa").keys({"h": unicode(i)} for i in range(100))) self.assertEqual(len(r1.unprocessed_keys["Aaa"]["Keys"]), 45) self.assertEqual(len(r1.responses["Aaa"]), 55)
def setUp(self): super(WaitForTableDeletionConnectedIntegTests, self).setUp() self.table = self.make_table_name() self.connection( _lv.CreateTable(self.table).hash_key( "h", _lv.STRING).provisioned_throughput(1, 1)) _lv.wait_for_table_activation(self.connection, self.table)
def test_network_error(self): connection = _lv.Connection( "us-west-2", _lv.StaticCredentials("DummyKey", "DummySecret"), "http://localhost:65555/", _lv.ExponentialBackoffRetryPolicy(0, 1, 3)) with self.assertRaises(_lv.NetworkError): connection(self.TestAction("ListTables", {}))
def test_wait_after_third_failure(self): self.assertEqual( self.policy.retry( object(), [_lv.ServerError(), _lv.ServerError(), _lv.ServerError()]), 9)
def test_complex_update(self): self.connection( _lv.PutItem( "Aaa", { "h": "complex", "a": "a", "b": "b", "c": "c", "d": set([41, 43]), "e": 42, "f": set([41, 42, 43]), "g": set([39, 40]), })) r = self.connection( _lv.UpdateItem("Aaa", { "h": "complex" }).set("a", ":s").set("b", ":i").remove("c").add("d", "s").add( "e", "i").delete("f", "s").delete("g", "s").expression_attribute_value( "s", set([42, 43])).expression_attribute_value( "i", 52).return_values_all_new()) self.assertEqual( r.attributes, { "h": "complex", "a": set([42, 43]), "b": 52, "d": set([41, 42, 43]), "e": 94, "f": set([41]), "g": set([39, 40]), })
def batch_put_item(connection, table, *items): """ Make as many :class:`.BatchWriteItem` actions as needed to put all specified items. Including processing :attr:`.BatchWriteItemResponse.unprocessed_items`. >>> batch_put_item( ... connection, ... table, ... {"h": 0, "a": 42}, ... {"h": 1, "a": 57}, ... {"h": 2, "a": 33, "b": 22}, ... ) """ items = list(items) unprocessed_items = [] while len(items) != 0: r = connection(_lv.BatchWriteItem().table(table).put(items[:25])) items = items[25:] if isinstance(r.unprocessed_items, dict) and table in r.unprocessed_items: unprocessed_items.extend(r.unprocessed_items[table]) while len(unprocessed_items) != 0: r = connection(_lv.BatchWriteItem().previous_unprocessed_items({table: unprocessed_items[:25]})) unprocessed_items = unprocessed_items[25:] if isinstance(r.unprocessed_items, dict) and table in r.unprocessed_items: unprocessed_items.extend(r.unprocessed_items[table])
def iterate_batch_get_item(connection, table, *keys): """ Make as many :class:`.BatchGetItem` actions as needed to iterate over all specified items. Including processing :attr:`.BatchGetItemResponse.unprocessed_keys`. .. Warning, this is NOT doctest. Because doctests aren't stable because items order changes. :: >>> for item in iterate_batch_get_item(connection, table, {"h": 0}, {"h": 1}, {"h": 2}): ... print item {u'h': 1, u'gr': 0, u'gh': 0} {u'h': 2, u'gr': 0, u'gh': 0} {u'h': 0, u'gr': 0, u'gh': 0} Note that items are returned in an unspecified order. """ keys = list(keys) unprocessed_keys = [] while len(keys) != 0: r = connection(_lv.BatchGetItem().table(table).keys(keys[:100])) keys = keys[100:] if isinstance(r.unprocessed_keys, dict) and table in r.unprocessed_keys and "Keys" in r.unprocessed_keys[table]: unprocessed_keys.extend(r.unprocessed_keys[table]["Keys"]) for item in r.responses.get(table, []): yield item while len(unprocessed_keys) != 0: r = connection(_lv.BatchGetItem().previous_unprocessed_keys({table: {"Keys": unprocessed_keys[:100]}})) unprocessed_keys = unprocessed_keys[100:] if isinstance(r.unprocessed_keys, dict) and table in r.unprocessed_keys and "Keys" in r.unprocessed_keys[table]: unprocessed_keys.extend(r.unprocessed_keys[table]["Keys"]) for item in r.responses.get(table, []): yield item
def test_delete_and_create_gsi(self): r = self.connection( _lv.UpdateTable("Aaa").delete_global_secondary_index("the_gsi")) self.assertEqual( r.table_description.global_secondary_indexes[0].index_status, "DELETING") _lv.wait_for_table_activation(self.connection, "Aaa") r = self.connection(_lv.DescribeTable("Aaa")) self.assertEqual(r.table.global_secondary_indexes, None) self.assertEqual( len(r.table.attribute_definitions), 1) # The previous definition of attribute "hh" has disapeared. r = self.connection( _lv.UpdateTable("Aaa").create_global_secondary_index( "new_gsi").provisioned_throughput(1, 2).hash_key( "nh", _lv.NUMBER).project_all()) self.assertEqual( r.table_description.global_secondary_indexes[0]. provisioned_throughput.read_capacity_units, 1) self.assertEqual( r.table_description.global_secondary_indexes[0]. provisioned_throughput.write_capacity_units, 2) self.assertEqual( r.table_description.global_secondary_indexes[0].key_schema[0]. attribute_name, "nh") self.assertEqual( r.table_description.global_secondary_indexes[0].key_schema[0]. key_type, "HASH") self.assertEqual(len(r.table_description.attribute_definitions), 2)
def test_paginated_segmented_scan(self): # If this test fails randomly, change it to assert on the sum and union of the results r01 = self.connection( _lv.Scan("Aaa").segment(0, 2).limit(1) ) r02 = self.connection( _lv.Scan("Aaa").segment(0, 2).exclusive_start_key({"h": "1"}) ) r11 = self.connection( _lv.Scan("Aaa").segment(1, 2).limit(1) ) r12 = self.connection( _lv.Scan("Aaa").segment(1, 2).exclusive_start_key({"h": "0"}) ) self.assertEqual(r01.count, 1) self.assertEqual(r01.items[0], {"h": "1", "v": 1}) self.assertEqual(r01.last_evaluated_key, {"h": "1"}) self.assertEqual(r01.scanned_count, 1) self.assertEqual(r02.count, 1) self.assertEqual(r02.items[0], {"h": "3", "v": 3}) self.assertEqual(r02.last_evaluated_key, None) self.assertEqual(r02.scanned_count, 1) self.assertEqual(r11.count, 1) self.assertEqual(r11.items[0], {"h": "0", "v": 0}) self.assertEqual(r11.last_evaluated_key, {"h": "0"}) self.assertEqual(r11.scanned_count, 1) self.assertEqual(r12.count, 1) self.assertEqual(r12.items[0], {"h": "2", "v": 2}) self.assertEqual(r12.last_evaluated_key, None) self.assertEqual(r12.scanned_count, 1)
def test_gsi_creating(self): self.connection.expect._call_.withArguments( self.ActionChecker("DescribeTable", { "TableName": "Table" })).andReturn( _lv.DescribeTableResponse( Table={ "TableStatus": "ACTIVE", "GlobalSecondaryIndexes": [{ "IndexStatus": "CREATING" }] })) self.sleep.expect(3) self.connection.expect._call_.withArguments( self.ActionChecker( "DescribeTable", {"TableName": "Table"})).andReturn( _lv.DescribeTableResponse( Table={ "TableStatus": "ACTIVE", "GlobalSecondaryIndexes": [{ "IndexStatus": "ACTIVE" }] })) self.sleep.expect(3) wait_for_table_activation(self.connection.object, "Table")
def test_get_without_unprocessed_keys(self): _lv.batch_put_item(self.connection, "Aaa", [{"h": unicode(i)} for i in range(100)]) r = self.connection(_lv.BatchGetItem().table("Aaa").keys({"h": unicode(i)} for i in range(100))) self.assertEqual(r.unprocessed_keys, {}) self.assertEqual(len(r.responses["Aaa"]), 100)
def test(self): _lv.batch_delete_item(self.connection, "Aaa", [{ "h": self.key(i) } for i in range(100)]) self.assertEqual([], list( _lv.iterate_scan(self.connection, _lv.Scan("Aaa"))))
def test_give_up_after_fourth_failure(self): self.assertIsNone( self.policy.retry(object(), [ _lv.ServerError(), _lv.ServerError(), _lv.ServerError(), _lv.ServerError() ]))
def test(self): self.connection( _lv.CreateTable("Aaa").hash_key("h", _lv.STRING).provisioned_throughput( 1, 1)) _lv.wait_for_table_activation(self.connection, "Aaa") self.assertEqual( self.connection(_lv.DescribeTable("Aaa")).table.table_status, "ACTIVE")
def test_return_old_values(self): self.connection(_lv.PutItem("Aaa", {"h": "return", "a": "yyy"})) r = self.connection( _lv.DeleteItem("Aaa", { "h": "return" }).return_values_all_old()) self.assertEqual(r.attributes, {"h": "return", "a": "yyy"})
def test(self): self.connection( _lv.CreateTable(self.table).hash_key("tab_h", _lv.STRING).range_key("tab_r", _lv.NUMBER).provisioned_throughput(1, 1) .global_secondary_index("gsi").hash_key("gsi_h", _lv.STRING).range_key("gsi_r", _lv.NUMBER).project_all().provisioned_throughput(1, 1) ) _lv.wait_for_table_activation(self.connection, self.table) r = self.connection(_lv.DescribeTable(self.table)) self.assertEqual(r.table.table_status, "ACTIVE") self.assertEqual(r.table.global_secondary_indexes[0].index_status, "ACTIVE")
def test_get_without_unprocessed_keys(self): _lv.batch_put_item(self.connection, "Aaa", [{ "h": str(i) } for i in range(100)]) r = self.connection(_lv.BatchGetItem().table("Aaa").keys( {"h": str(i)} for i in range(100))) self.assertEqual(r.unprocessed_keys, {}) self.assertEqual(len(r.responses["Aaa"]), 100)
def test_one_page(self): self.connection.expect._call_.withArguments( self.ActionChecker("BatchGetItem", {"RequestItems": {"Aaa": {"Keys": [{"h": {"S": "a"}}, {"h": {"S": "b"}}]}}}) ).andReturn( _lv.BatchGetItemResponse(Responses={"Aaa": [{"h": {"S": "c"}}, {"h": {"S": "d"}}]}) ) self.assertEqual( list(_lv.iterate_batch_get_item(self.connection.object, "Aaa", {"h": "a"}, {"h": "b"})), [{"h": "c"}, {"h": "d"}] )
def test_add_and_delete_from_same_set(self): self.connection(_lv.PutItem("Aaa", {"h": "expr", "a": {1, 2, 3}})) # A bit sad: you can't add to and delete from the same set with self.assertRaises(_lv.ValidationException): self.connection( _lv.UpdateItem("Aaa", { "h": "expr" }).delete("a", "three").add("a", "four").expression_attribute_value( "three", {3}).expression_attribute_value( "four", {4}).return_values_all_new())
def test_one_page_followed_by_empty_page(self): self.connection.expect._call_.withArguments( self.ActionChecker("ListTables", {})).andReturn( _lv.ListTablesResponse(TableNames=["A", "B", "C"], LastEvaluatedTableName="D")) self.connection.expect._call_.withArguments( self.ActionChecker("ListTables", {"ExclusiveStartTableName": "D"})).andReturn( _lv.ListTablesResponse(TableNames=[])) self.assertEqual(list(iterate_list_tables(self.connection.object)), ["A", "B", "C"])
def test(self): self.connection( _lv.CreateTable(self.table).hash_key("tab_h", _lv.STRING). range_key("tab_r", _lv.NUMBER).provisioned_throughput( 1, 1).global_secondary_index("gsi").hash_key( "gsi_h", _lv.STRING).range_key( "gsi_r", _lv.NUMBER).project_all().provisioned_throughput(1, 1)) _lv.wait_for_table_activation(self.connection, self.table) r = self.connection(_lv.DescribeTable(self.table)) self.assertEqual(r.table.table_status, "ACTIVE") self.assertEqual(r.table.global_secondary_indexes[0].index_status, "ACTIVE")
def test_one_unprocessed_item(self): self.connection.expect._call_.withArguments( self.ActionChecker("BatchWriteItem", {"RequestItems": {"Aaa": [{"PutRequest": {"Item": {"h": {"S": "a"}}}}, {"PutRequest": {"Item": {"h": {"S": "b"}}}}]}}) ).andReturn( _lv.BatchWriteItemResponse(UnprocessedItems={"Aaa": [{"PutRequest": {"Item": {"h": {"S": "c"}}}}]}) ) self.connection.expect._call_.withArguments( self.ActionChecker("BatchWriteItem", {"RequestItems": {"Aaa": [{"PutRequest": {"Item": {"h": {"S": "c"}}}}]}}) ).andReturn( _lv.BatchWriteItemResponse() ) batch_put_item(self.connection.object, "Aaa", {"h": "a"}, {"h": "b"})
def make(self, dependencies): connection = make_connection() table = table_name_prefix connection( _lv.CreateTable(table).hash_key("tab_h", _lv.STRING).range_key("tab_r", _lv.NUMBER).provisioned_throughput(1, 1) .global_secondary_index("gsi").hash_key("gsi_h", _lv.STRING).range_key("gsi_r", _lv.NUMBER).project_all().provisioned_throughput(1, 1) .local_secondary_index("lsi").hash_key("tab_h").range_key("lsi_r", _lv.NUMBER).project_all().provisioned_throughput(1, 1) ) _lv.wait_for_table_activation(connection, table) return table
def test_get_with_unprocessed_keys(self): _lv.batch_put_item( self.connection, "Aaa", [{ "h": str(i), "xs": "x" * 300000 } for i in range(100)] ) # 300kB items ensure a single BatchGetItem will return at most 55 items r1 = self.connection(_lv.BatchGetItem().table("Aaa").keys( {"h": str(i)} for i in range(100))) self.assertEqual(len(r1.unprocessed_keys["Aaa"]["Keys"]), 45) self.assertEqual(len(r1.responses["Aaa"]), 55)
def test_set(self): r = self.connection( _lv.UpdateItem("Aaa", { "h": "set" }).set("a", ":v").set("#p", ":w").expression_attribute_value( "v", "aaa").expression_attribute_value( "w", "bbb").expression_attribute_name("p", "b")) self.assertEqual( self.connection(_lv.GetItem("Aaa", {"h": "set"})).item, { "h": "set", "a": "aaa", "b": "bbb" })
def setUp(self): super(ListTablesLocalIntegTests, self).setUp() self.connection( _lv.CreateTable("Aaa").hash_key("h", _lv.STRING).provisioned_throughput( 1, 2)) self.connection( _lv.CreateTable("Bbb").hash_key("h", _lv.STRING).provisioned_throughput( 1, 2)) self.connection( _lv.CreateTable("Ccc").hash_key("h", _lv.STRING).provisioned_throughput( 1, 2))
def test_condition_expression(self): self.connection(_lv.PutItem("Aaa", {"h": "expr", "a": 42, "b": 42})) r = self.connection( _lv.UpdateItem("Aaa", { "h": "expr" }).set("checked", ":true").expression_attribute_value( "true", True).condition_expression("a=b").return_values_all_new()) self.assertEqual(r.attributes, { "h": "expr", "a": 42, "b": 42, "checked": True })
def test(self): r = self.connection(_lv.DescribeTable("Aaa")) self.assertDateTimeIsReasonable(r.table.creation_date_time) self.assertEqual(r.table.attribute_definitions[0].attribute_name, "h") self.assertEqual(r.table.attribute_definitions[0].attribute_type, "S") self.assertEqual(r.table.global_secondary_indexes, None) self.assertEqual(r.table.item_count, 0) self.assertEqual(r.table.key_schema[0].attribute_name, "h") self.assertEqual(r.table.key_schema[0].key_type, "HASH") self.assertEqual(r.table.local_secondary_indexes, None) self.assertEqual( r.table.provisioned_throughput.last_decrease_date_time, datetime.datetime(1970, 1, 1)) self.assertEqual( r.table.provisioned_throughput.last_increase_date_time, datetime.datetime(1970, 1, 1)) self.assertEqual( r.table.provisioned_throughput.number_of_decreases_today, 0) self.assertEqual(r.table.provisioned_throughput.read_capacity_units, 1) self.assertEqual(r.table.provisioned_throughput.write_capacity_units, 2) self.assertEqual(r.table.table_name, "Aaa") self.assertEqual(r.table.table_size_bytes, 0) self.assertEqual(r.table.table_status, "ACTIVE")
def test_one_page(self): self.connection.expect._call_.withArguments( self.ActionChecker("BatchGetItem", {"RequestItems": {"Aaa": {"Keys": [{"h": {"S": "a"}}, {"h": {"S": "b"}}]}}}) ).andReturn( _lv.BatchGetItemResponse(Responses={"Aaa": [{"h": {"S": "c"}}, {"h": {"S": "d"}}]}) ) self.assertEqual( list(_lv.iterate_batch_get_item(self.connection.object, "Aaa", {"h": u"a"}, {"h": u"b"})), [{"h": "c"}, {"h": "d"}] )
def test_delete_and_create_gsi(self): r = self.connection(_lv.UpdateTable("Aaa").delete_global_secondary_index("the_gsi")) self.assertEqual(r.table_description.global_secondary_indexes[0].index_status, "DELETING") _lv.wait_for_table_activation(self.connection, "Aaa") r = self.connection(_lv.DescribeTable("Aaa")) self.assertEqual(r.table.global_secondary_indexes, None) self.assertEqual(len(r.table.attribute_definitions), 1) # The previous definition of attribute "hh" has disapeared. r = self.connection( _lv.UpdateTable("Aaa") .create_global_secondary_index("new_gsi") .provisioned_throughput(1, 2) .hash_key("nh", _lv.NUMBER) .project_all() ) self.assertEqual(r.table_description.global_secondary_indexes[0].provisioned_throughput.read_capacity_units, 1) self.assertEqual(r.table_description.global_secondary_indexes[0].provisioned_throughput.write_capacity_units, 2) self.assertEqual(r.table_description.global_secondary_indexes[0].key_schema[0].attribute_name, "nh") self.assertEqual(r.table_description.global_secondary_indexes[0].key_schema[0].key_type, "HASH") self.assertEqual(len(r.table_description.attribute_definitions), 2)
def test_several_pages(self): self.connection.expect._call_.withArguments( self.ActionChecker("BatchGetItem", {"RequestItems": {"Aaa": {"Keys": [{"h": {"N": str(i)}} for i in range(0, 100)]}}}) ).andReturn( _lv.BatchGetItemResponse(Responses={"Aaa": [{"h": {"N": str(i)}} for i in range(1000, 1100)]}) ) self.connection.expect._call_.withArguments( self.ActionChecker("BatchGetItem", {"RequestItems": {"Aaa": {"Keys": [{"h": {"N": str(i)}} for i in range(100, 200)]}}}) ).andReturn( _lv.BatchGetItemResponse(Responses={"Aaa": [{"h": {"N": str(i)}} for i in range(1100, 1200)]}) ) self.connection.expect._call_.withArguments( self.ActionChecker("BatchGetItem", {"RequestItems": {"Aaa": {"Keys": [{"h": {"N": str(i)}} for i in range(200, 250)]}}}) ).andReturn( _lv.BatchGetItemResponse(Responses={"Aaa": [{"h": {"N": str(i)}} for i in range(1200, 1250)]}) ) self.assertEqual( list(_lv.iterate_batch_get_item(self.connection.object, "Aaa", ({"h": i} for i in range(0, 250)))), [{"h": i} for i in range(1000, 1250)] )
def setUp(self): super(WaitForTableDeletionConnectedIntegTests, self).setUp() self.table = self.make_table_name() self.connection(_lv.CreateTable(self.table).hash_key("h", _lv.STRING).provisioned_throughput(1, 1)) _lv.wait_for_table_activation(self.connection, self.table)
def test_no_keys(self): self.assertEqual( list(_lv.iterate_batch_get_item(self.connection.object, "Aaa", [])), [] )
def setUp(self): super(BatchDeleteItemLocalIntegTests, self).setUp() _lv.batch_put_item(self.connection, "Aaa", [{"h": self.key(i)} for i in range(100)])
def test_parallel_scan(self): keys = [] for segment in _lv.parallelize_scan(_lv.Scan("Aaa"), 3): keys.extend(item["h"] for item in _lv.iterate_scan(self.connection, segment)) self.assertEqual(sorted(keys), self.keys)
def test(self): self.assertEqual( list(_lv.iterate_list_tables(self.connection)), self.table_names )
def test(self): _lv.batch_put_item(self.connection, "Aaa", [{"h": self.key(i)} for i in range(100)]) self.assertEqual(len(list(_lv.iterate_scan(self.connection, _lv.Scan("Aaa")))), 100)
def test_simple_scan(self): self.assertEqual( sorted(item["h"] for item in _lv.iterate_scan(self.connection, _lv.Scan("Aaa"))), self.keys )
def test(self): self.connection(_lv.DeleteTable(self.table)) _lv.wait_for_table_deletion(self.connection, self.table) with self.assertRaises(_lv.ResourceNotFoundException): self.connection(_lv.DescribeTable(self.table))
def clean(self, table): connection = make_connection() connection(_lv.DeleteTable(table)) _lv.wait_for_table_deletion(connection, table)
def global_setup(): connection = _lv.Connection("us-west-2", _lv.EnvironmentCredentials()) table1 = "LowVoltage.Tests.Doc.1" table2 = "LowVoltage.Tests.Doc.2" try: connection(_lv.DescribeTable(table1)) except _lv.ResourceNotFoundException: connection( _lv.CreateTable(table) .hash_key("h", _lv.NUMBER).provisioned_throughput(1, 1) .global_secondary_index("gsi").hash_key("gh", _lv.NUMBER).range_key("gr", _lv.NUMBER).provisioned_throughput(1, 1).project_all() ) try: connection(_lv.DescribeTable(table2)) except _lv.ResourceNotFoundException: connection( _lv.CreateTable(table2) .hash_key("h", _lv.NUMBER).range_key("r1", _lv.NUMBER).provisioned_throughput(1, 1) .local_secondary_index("lsi").hash_key("h", _lv.NUMBER).range_key("r2", _lv.NUMBER).project_all() ) _lv.wait_for_table_activation(connection, table1) _lv.batch_put_item( connection, table1, [{"h": h, "gh": h * h, "gr": 10 - 2 * h} for h in range(7)], ) _lv.wait_for_table_activation(connection, table1) _lv.batch_put_item( connection, table1, [{"h": h, "a": 0} for h in range(7, 10)], ) _lv.wait_for_table_activation(connection, table2) _lv.batch_put_item( connection, table2, [{"h": h, "r1": 0, "r2": 0} for h in range(10)], ) _lv.batch_put_item( connection, table2, [{"h": 42, "r1": r1, "r2": 10 - r1} for r1 in range(6)], ) _lv.batch_put_item( connection, table2, [{"h": 42, "r1": r1} for r1 in range(6, 10)], ) return connection, table1, table2
def test(self): self.connection(_lv.CreateTable("Aaa").hash_key("h", _lv.STRING).provisioned_throughput(1, 1)) _lv.wait_for_table_activation(self.connection, "Aaa") self.assertEqual(self.connection(_lv.DescribeTable("Aaa")).table.table_status, "ACTIVE")
def test(self): keys = [item["h"] for item in _lv.iterate_batch_get_item(self.connection, "Aaa", ({"h": self.key(i)} for i in range(250)))] self.assertEqual(sorted(keys), [self.key(i) for i in range(250)])
def setUp(self): super(IterateBatchGetItemLocalIntegTests, self).setUp() _lv.batch_put_item(self.connection, "Aaa", [{"h": self.key(i), "xs": "x" * 300000} for i in range(250)]) # 300kB items ensure a single BatchGetItem will return at most 55 items
def test_simple_query(self): self.assertEqual( sorted(item["r"] for item in _lv.iterate_query(self.connection, _lv.Query("Aaa").key_eq("h", u"0"))), self.keys )