def get_indexes(all_indexes): indexes = [] global_indexes = [] for index in all_indexes: name = index['name'] schema = get_schema_param(index.get('hash_key_name'), index.get('hash_key_type'), index.get('range_key_name'), index.get('range_key_type')) throughput = { 'read': index.get('read_capacity', 1), 'write': index.get('write_capacity', 1) } if index['type'] == 'all': indexes.append(AllIndex(name, parts=schema)) elif index['type'] == 'global_all': global_indexes.append(GlobalAllIndex(name, parts=schema, throughput=throughput)) elif index['type'] == 'global_include': global_indexes.append(GlobalIncludeIndex(name, parts=schema, throughput=throughput, includes=index['includes'])) elif index['type'] == 'global_keys_only': global_indexes.append(GlobalKeysOnlyIndex(name, parts=schema, throughput=throughput)) elif index['type'] == 'include': indexes.append(IncludeIndex(name, parts=schema, includes=index['includes'])) elif index['type'] == 'keys_only': indexes.append(KeysOnlyIndex(name, parts=schema)) return indexes, global_indexes
def pf_ddb_create_table(self, name): # creating a full table with all options specified. users = Table.create(name, schema=[ HashKey('username'), RangeKey('friend_count', data_type=NUMBER) ], throughput={ 'read': 5, 'write': 5, }, indexes=[ KeysOnlyIndex('LastNameIndex', parts=[ HashKey('username'), RangeKey('last_name') ]), ]) # Wait for it. time.sleep(60) return json.dumps(name + " table created")
def createTable(self): provider = Provider('aws') connection = DynamoDBConnection(aws_access_key_id=provider.get_access_key(), aws_secret_access_key=provider.get_secret_key(), region=self.regionv2) self.blockTablev2 = Table.create(self.tableName + "Blocks", schema=[ HashKey('blockId'), RangeKey('blockNum', data_type=NUMBER) ], throughput={'read': 30, 'write': 10}, connection=connection ) self.tablev2 = Table.create(self.tableName, schema=[ HashKey('path'), RangeKey('name') ], throughput={'read': 30, 'write': 10}, indexes=[ KeysOnlyIndex("Links", parts=[ HashKey('path'), RangeKey('link') ]) ], connection=connection ) description = connection.describe_table(self.tableName) iter = 0 while description["Table"]["TableStatus"] != "ACTIVE": print "Waiting for %s to create %d..." % (self.tableName, iter) iter += 1 sleep(1) description = connection.describe_table(self.tableName) self.table = self.conn.get_table(self.tableName) self.blockTable = self.conn.get_table(self.tableName + "Blocks")
def test_integration(self): # Test creating a full table with all options specified. users = Table.create('users', schema=[ HashKey('username'), RangeKey('friend_count', data_type=NUMBER) ], throughput={ 'read': 5, 'write': 5, }, indexes=[ KeysOnlyIndex('LastNameIndex', parts=[ HashKey('username'), RangeKey('last_name') ]), ]) self.addCleanup(users.delete) self.assertEqual(len(users.schema), 2) self.assertEqual(users.throughput['read'], 5) # Wait for it. time.sleep(60) # Make sure things line up if we're introspecting the table. users_hit_api = Table('users') users_hit_api.describe() self.assertEqual(len(users.schema), len(users_hit_api.schema)) self.assertEqual(users.throughput, users_hit_api.throughput) self.assertEqual(len(users.indexes), len(users_hit_api.indexes)) # Test putting some items individually. users.put_item(data={ 'username': '******', 'first_name': 'John', 'last_name': 'Doe', 'friend_count': 4 }) users.put_item(data={ 'username': '******', 'first_name': 'Alice', 'last_name': 'Expert', 'friend_count': 2 }) time.sleep(5) # Test batch writing. with users.batch_write() as batch: batch.put_item({ 'username': '******', 'first_name': 'Jane', 'last_name': 'Doe', 'friend_count': 3 }) batch.delete_item(username='******', friend_count=2) batch.put_item({ 'username': '******', 'first_name': 'Bob', 'last_name': 'Smith', 'friend_count': 1 }) time.sleep(5) # Test getting an item & updating it. # This is the "safe" variant (only write if there have been no # changes). jane = users.get_item(username='******', friend_count=3) self.assertEqual(jane['first_name'], 'Jane') jane['last_name'] = 'Doh' self.assertTrue(jane.save()) # Test strongly consistent getting of an item. # Additionally, test the overwrite behavior. client_1_jane = users.get_item( username='******', friend_count=3, consistent=True ) self.assertEqual(jane['first_name'], 'Jane') client_2_jane = users.get_item( username='******', friend_count=3, consistent=True ) self.assertEqual(jane['first_name'], 'Jane') # Write & assert the ``first_name`` is gone, then... del client_1_jane['first_name'] self.assertTrue(client_1_jane.save()) check_name = users.get_item( username='******', friend_count=3, consistent=True ) self.assertEqual(check_name['first_name'], None) # ...overwrite the data with what's in memory. client_2_jane['first_name'] = 'Joan' # Now a write that fails due to default expectations... self.assertRaises(exceptions.JSONResponseError, client_2_jane.save) # ... so we force an overwrite. self.assertTrue(client_2_jane.save(overwrite=True)) check_name_again = users.get_item( username='******', friend_count=3, consistent=True ) self.assertEqual(check_name_again['first_name'], 'Joan') # Reset it. jane['username'] = '******' jane['first_name'] = 'Jane' jane['last_name'] = 'Doe' jane['friend_count'] = 3 self.assertTrue(jane.save(overwrite=True)) # Test the partial update behavior. client_3_jane = users.get_item( username='******', friend_count=3, consistent=True ) client_4_jane = users.get_item( username='******', friend_count=3, consistent=True ) client_3_jane['favorite_band'] = 'Feed Me' # No ``overwrite`` needed due to new data. self.assertTrue(client_3_jane.save()) # Expectations are only checked on the ``first_name``, so what wouldn't # have succeeded by default does succeed here. client_4_jane['first_name'] = 'Jacqueline' self.assertTrue(client_4_jane.partial_save()) partial_jane = users.get_item( username='******', friend_count=3, consistent=True ) self.assertEqual(partial_jane['favorite_band'], 'Feed Me') self.assertEqual(partial_jane['first_name'], 'Jacqueline') # Reset it. jane['username'] = '******' jane['first_name'] = 'Jane' jane['last_name'] = 'Doe' jane['friend_count'] = 3 self.assertTrue(jane.save(overwrite=True)) # Ensure that partial saves of a brand-new object work. sadie = Item(users, data={ 'username': '******', 'first_name': 'Sadie', 'favorite_band': 'Zedd', 'friend_count': 7 }) self.assertTrue(sadie.partial_save()) serverside_sadie = users.get_item( username='******', friend_count=7, consistent=True ) self.assertEqual(serverside_sadie['first_name'], 'Sadie') # Test the eventually consistent query. results = users.query( username__eq='johndoe', last_name__eq='Doe', index='LastNameIndex', attributes=('username',), reverse=True ) for res in results: self.assertTrue(res['username'] in ['johndoe',]) self.assertEqual(res.keys(), ['username']) # Test the strongly consistent query. c_results = users.query( username__eq='johndoe', last_name__eq='Doe', index='LastNameIndex', reverse=True, consistent=True ) for res in c_results: self.assertTrue(res['username'] in ['johndoe',]) # Test scans without filters. all_users = users.scan(limit=7) self.assertEqual(all_users.next()['username'], 'bob') self.assertEqual(all_users.next()['username'], 'jane') self.assertEqual(all_users.next()['username'], 'johndoe') # Test scans with a filter. filtered_users = users.scan(limit=2, username__beginswith='j') self.assertEqual(filtered_users.next()['username'], 'jane') self.assertEqual(filtered_users.next()['username'], 'johndoe') # Test deleting a single item. johndoe = users.get_item(username='******', friend_count=4) johndoe.delete() # Test the eventually consistent batch get. results = users.batch_get(keys=[ {'username': '******', 'friend_count': 1}, {'username': '******', 'friend_count': 3} ]) batch_users = [] for res in results: batch_users.append(res) self.assertTrue(res['first_name'] in ['Bob', 'Jane']) self.assertEqual(len(batch_users), 2) # Test the strongly consistent batch get. c_results = users.batch_get(keys=[ {'username': '******', 'friend_count': 1}, {'username': '******', 'friend_count': 3} ], consistent=True) c_batch_users = [] for res in c_results: c_batch_users.append(res) self.assertTrue(res['first_name'] in ['Bob', 'Jane']) self.assertEqual(len(c_batch_users), 2) # Test count, but in a weak fashion. Because lag time. self.assertTrue(users.count() > -1) # Test query count count = users.query_count( username__eq='bob', ) self.assertEqual(count, 1) # Test without LSIs (describe calls shouldn't fail). admins = Table.create('admins', schema=[ HashKey('username') ]) self.addCleanup(admins.delete) time.sleep(60) admins.describe() self.assertEqual(admins.throughput['read'], 5) self.assertEqual(admins.indexes, []) # A single query term should fail on a table with *ONLY* a HashKey. self.assertRaises( exceptions.QueryError, admins.query, username__eq='johndoe' ) # But it shouldn't break on more complex tables. res = users.query(username__eq='johndoe') # Test putting with/without sets. mau5_created = users.put_item(data={ 'username': '******', 'first_name': 'dead', 'last_name': 'mau5', 'friend_count': 2, 'friends': set(['skrill', 'penny']), }) self.assertTrue(mau5_created) penny_created = users.put_item(data={ 'username': '******', 'first_name': 'Penny', 'friend_count': 0, 'friends': set([]), }) self.assertTrue(penny_created)
import sys sys.path.append('/Users/pgregg/nosql-service') sys.path.append('/Users/pgregg/nosql-service/tests') sys.path.append('/Users/pgregg/nosql-service/tests/integration') sys.path.append('/Users/pgregg/nosql-service/tests/integration/dynamodb2') import unittest from boto.dynamodb2 import exceptions from boto.dynamodb2.fields import HashKey, RangeKey, KeysOnlyIndex from boto.dynamodb2.items import Item from boto.dynamodb2.table import Table from boto.dynamodb2.types import NUMBER dynamodb = True # Test creating a full table with all options specified. users = Table.create( 'users', schema=[HashKey('username'), RangeKey('friend_count', data_type=NUMBER)], throughput={ 'read': 5, 'write': 5, }, indexes=[ KeysOnlyIndex('LastNameIndex', parts=[HashKey('username'), RangeKey('last_name')]), ])