Ejemplo n.º 1
0
def sendtodynamo_cnn(cnnjson):
  ''' Send json to DynamoDB
  Assumes that article timestamps have been deduped to avoid collisions
  '''

  conn = connect_to_region('us-west-2', aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
  
  hashkey = "CNN" # primary key to be used for DynamoDB table

  try:
    table = Table('CNN', connection=conn)
    table.describe()
  except boto.exception.JSONResponseError:
    print "Creating table"
    table = Table.create('CNN', schema=[HashKey('source'), RangeKey('tstamp',data_type=NUMBER)], throughput={'read':25, 'write':25}, indexes=[GlobalAllIndex('showidx',parts=[HashKey('show')],throughput={'read':10,'write':5})])

  iteration = 0
  for article in cnnjson:
    # Iterate through list of articles and upload to table
    rangekey = float(article['timestamp'])
    rowdata = {'source':hashkey,'tstamp':rangekey, 'cnnShow':article['show']}
    for key in article.keys():
      rowdata[key]=article[key]
    item = table.put_item(data = rowdata)
    iteration += 1
    if iteration%100==0:
      print "Uploaded "+iteration+" articles"

  return None
Ejemplo n.º 2
0
    def bind(self):
        """Bind to existing tables.
        """
        if self.data_points_name and self.index_name:
            data_points_table = Table(self.data_points_name,
                                      connection=self.connection)
            try:
                s1 = data_points_table.describe()['Table']['TableStatus']
            except:
                raise
            else:
                self.data_points_table = data_points_table
                self.dp_writer = TimedBatchTable(
                    self.data_points_table.batch_write())

            index_table = Table(self.index_name, connection=self.connection)
            try:
                s2 = index_table.describe()['Table']['TableStatus']
            except:
                raise
            else:
                self.index_table = index_table

            if s1 == s2:
                self.item['state'] = s1
            else:
                self.item['state'] = 'UNDEFINED'

        return self.state
Ejemplo n.º 3
0
def table_exists(name):
    t = Table(name)
    try:
        t.describe()
        return True
    except:
        return False
Ejemplo n.º 4
0
def table_exists(name):
    t = Table(name)
    try:
        t.describe()
        return True
    except:
        return False
Ejemplo n.º 5
0
    def migrate(self, args):
        # lets get the origin table
        try:
            origin = Table(args['origin_table'], connection=self.connection)
            origin.describe()
        except Exception as e:
            return "table {0} could not be found in {1}".format(
                args['origin_table'], self.region)

        # now, create the destination_table (using create)
        destination = Table(args['destination_table'],
                            connection=self.connection)
        print "creating table {0}".format(destination.table_name)
        if self.create(args) != 'CREATING':
            print "    table {0} exists".format(destination.table_name)
        else:
            while destination.describe()['Table']['TableStatus'] != 'ACTIVE':
                print "        ..."
                time.sleep(5)
            print "    table {0} created".format(destination.table_name)

        print "copying items from {0} to {1}".format(origin.table_name,
                                                     destination.table_name)
        for item in origin.scan():
            # be sure to mold the fields into their proper shapes
            item = self._mold(item)
            destination.put_item(item, overwrite=True)
Ejemplo n.º 6
0
def _table_exists(table_name):
    table = Table(table_name)
    try:
        table.describe()
    except JSONResponseError as exc:
        if exc.error_code == 'ResourceNotFoundException':
            return False
    return True
Ejemplo n.º 7
0
def _table_exists(table_name):
    table = Table(table_name)
    try:
        table.describe()
    except JSONResponseError as exc:
        if exc.error_code == "ResourceNotFoundException":
            return False
    return True
Ejemplo n.º 8
0
    def get_table(self, table_name):
        """
        Fetch a table from DynamoDB.

        NOTE: this is a somewhat expensive operation,
              which must query dynamo for the current state of the table
        """
        table = Table(table_name, connection=self.dynamo_cx)

        # must describe the table, or it doesn't have the correct throughput values
        table.describe()

        return table
Ejemplo n.º 9
0
def create_missing_indexes(m_region, r_region, instance):
    # Skip all DynamoDB metadata tables.
    master_tables = [ i for i in db_connection(region=m_region).list_tables()['TableNames'] if i.startswith('DynamoDB') is False ]

    # Main db diff loop, just global indexes.
    for m1 in master_tables:
        # Set the region before selecting the table to describe.
        DynamoDBConnection.DefaultRegionName = m_region
        t = Table(m1)
        t.describe()

        # Set replica region to get table information.
        DynamoDBConnection.DefaultRegionName = r_region

        try:
            s = Table(m1)
            s.describe()
        except boto.exception.JSONResponseError:
            print colored("!!! CREATE TABLE '{0}' table doesn't exist in '{1}' region. ".format(m1, r_region), "red")
            # raise SystemExit("Ensure table exists in replica region and re-run.", 1)

        if len(t.global_indexes) > 0:
            print colored('{0} has globlal indexes.'.format(m1), 'green')
            # Get destination table global index names.
            d_index_names = [ i.schema()['IndexName'] for i in s.global_indexes ]

            # Check if any global indexes exist in replica table prior to trying
            # to create them and skip them if they do exist.
            for index in t.global_indexes:
                if index.schema()['IndexName'] in d_index_names:
                    print "Index '{0}' exists in replica, skipping.".format(index.schema()['IndexName'])
                    continue
                else:
                    print "\tCreating '{0}' index in replica table.".format(index.schema()['IndexName'])
                    for retry_count in range(0, 6):
                        try:
                            if s.create_global_secondary_index(index) is True:
                                print colored("Successfully created index '{0}' on '{1}' table.".format(index.schema()['IndexName'], s.table_name), "green")
                                break
                            else:
                                print colored("Failed to create index '{0}' on '{1}' table.".format(index.schema()['IndexName'], s.table_name), "red")
                        except boto.exception.JSONResponseError:
                            print "Index creation is finishing up, sleeping 1 minute before proceeding."
                            time.sleep(60)
                            continue
                    else:
                        print colored("!!! Retry count exceeded!  Manually create '{0}' index.".format(index.schema()['IndexName']), "red")
        else:
            print colored('{0} has no global indexes.  Skipping table.'.format(m1), 'red')

    print "Index creation complete."
Ejemplo n.º 10
0
 def copy(self, args):
     # lets get the origin table
     try:
         origin = Table(args['origin_table'], connection=self.connection)
         origin.describe()
     except Exception as e:
         return "table {0} could not be found in {1}".format(args['origin_table'], self.region)
     
     # now, get the destination_table
     destination = Table(args['destination_table'], connection=self.connection)
     
     print "copying items from {0} to {1}".format(origin.table_name, destination.table_name)
     for item in origin.scan():
         destination.put_item(dict(item))
Ejemplo n.º 11
0
def main():
    if len(sys.argv) == 2 and sys.argv[1] == 'check':
        print "*** Checking the table in dynamoDB, create one if not exist..."
        try:
            ddbc = DynamoDBConnection()
            src = ddbc.describe_table(iperf_table_name)['Table']
            logs = Table(iperf_table_name, schema=[HashKey('path'),RangeKey('datetime'),])
            logs.describe()
            sys.exit(0)
        except JSONResponseError:
            logs = Table.create(iperf_table_name, schema=[HashKey('path'),RangeKey('datetime'),])
            while ddbc.describe_table(iperf_table_name)['Table']['TableStatus'] != 'ACTIVE':
                sleep(3)
            sys.exit(1)
    if len(sys.argv) != 4:
        print "usage: %s <iperf_client_name> <datetime> <iperf_server_name>" % sys.argv[0]
        sys.exit(2)

    # Store arg lists
    iperf_client_name = sys.argv[1]
    datetime = sys.argv[2]
    iperf_server_name = sys.argv[3]
    path = iperf_client_name + '-' + iperf_server_name

    # Retrieve dynamoDB object
    try:
        logs = Table(iperf_table_name, schema=[HashKey('path'),RangeKey('datetime'),])
        tmp = logs.describe()
    except JSONResponseError:
        print "The table %s doesn't exist!" % iperf_table_name
        sys.exit(1)

    # Parse iperf log
    iperf = {}
    iperf['path'] = path
    iperf['datetime'] = datetime
    line = open(os.path.dirname(os.path.abspath(__file__))+'/log/'+datetime+'.log','r').readlines()[6]
    m = re.search(r"sec\s+(\d+\s+\w+)\s+(\d+\s+[\w/]+)", line)
    transfer = m.group(1)
    bandwidth = m.group(2)
    iperf['transfer'] = transfer
    iperf['bandwidth'] = bandwidth

    # Put the log to the dynamoDB table
    try:
        logs.put_item(data=iperf, overwrite=True)
    except ValidationException:
        pprint(iperf)
    except JSONResponseError:
        pass
Ejemplo n.º 12
0
def sendtodynamo_cnn(cnnjson):
    ''' Send json to DynamoDB
  Assumes that article timestamps have been deduped to avoid collisions
  '''

    conn = connect_to_region('us-west-2',
                             aws_access_key_id=AWS_ACCESS_KEY_ID,
                             aws_secret_access_key=AWS_SECRET_ACCESS_KEY)

    hashkey = "CNN"  # primary key to be used for DynamoDB table

    try:
        table = Table('CNN', connection=conn)
        table.describe()
    except boto.exception.JSONResponseError:
        print "Creating table"
        table = Table.create(
            'CNN',
            schema=[HashKey('source'),
                    RangeKey('tstamp', data_type=NUMBER)],
            throughput={
                'read': 25,
                'write': 25
            },
            indexes=[
                GlobalAllIndex('showidx',
                               parts=[HashKey('show')],
                               throughput={
                                   'read': 10,
                                   'write': 5
                               })
            ])

    iteration = 0
    for article in cnnjson:
        # Iterate through list of articles and upload to table
        rangekey = float(article['timestamp'])
        rowdata = {
            'source': hashkey,
            'tstamp': rangekey,
            'cnnShow': article['show']
        }
        for key in article.keys():
            rowdata[key] = article[key]
        item = table.put_item(data=rowdata)
        iteration += 1
        if iteration % 100 == 0:
            print "Uploaded " + iteration + " articles"

    return None
Ejemplo n.º 13
0
 def archive(self, args):
     # lets get the origin table
     try:
         origin = Table(args['origin_table'], connection=self.connection)
         origin.describe()
     except Exception as e:
         return "table {0} could not be found in {1}".format(args['origin_table'], self.region)
     
     for item in origin.scan():
         item = dict(item)
         for key in item.keys():
             if type(item[key]) is set:
                 item[key] = list(item[key])
         
         print json.dumps(item, use_decimal=True)
Ejemplo n.º 14
0
class TestCustomThroughput(unittest.TestCase):
    def setUp(self):

        # Configuration options
        self.table_name = 'conf'
        self.store_name = 'test'
        self.read_units = 10
        self.write_units = 8

        # Instanciate the store
        self.store = DynamoDBConfigStore(connection,
                                         self.table_name,
                                         self.store_name,
                                         read_units=self.read_units,
                                         write_units=self.write_units)

        # Get an Table instance for validation
        self.table = Table(self.table_name, connection=connection)

    def test_custom_throughput(self):
        """ Test that we can set custom thoughput for new tables """
        throughput = self.table.describe()[u'Table'][u'ProvisionedThroughput']

        self.assertEqual(throughput[u'ReadCapacityUnits'], self.read_units)
        self.assertEqual(throughput[u'WriteCapacityUnits'], self.write_units)

    def tearDown(self):
        """ Tear down the test case """
        self.table.delete()
Ejemplo n.º 15
0
def populate_tables(connection, table_data):

	try:
		table = Table(table_data['table_name'], connection=connection)
		table.describe()

	except JSONResponseError, e:

		if e.error_code == 'ResourceNotFoundException':
			table = Table.create(
				table_name=table_data['table_name'],
				schema=DynamoDB_Schema,
				connection=connection
			)
		else:
			raise e
Ejemplo n.º 16
0
 def get_table(self, table_name, hash_key='id', range_key='timestamp',
               throughput={'read': 5, 'write': 15}):
     if table_name in self.conn.list_tables()['TableNames']:
         table = Table(table_name, connection=self.conn)
         table.describe()  # Bug: https://github.com/boto/boto/issues/2826
         return table
     schema = [HashKey(hash_key)]
     if range_key:
         schema.append(RangeKey(range_key))
     table = Table.create(
         table_name,
         schema=schema,
         throughput=throughput,
         connection=self.conn)
     self.wait_until_table_is_active(table)
     return table
Ejemplo n.º 17
0
    def test_describe_table(self):

        self.storage_mocker.StubOutWithMock(storage, 'describe_table')

        storage.describe_table(IgnoreArg(), 'test_table').AndReturn(
            models.TableMeta(
                models.TableSchema(
                    {
                        'city1': models.ATTRIBUTE_TYPE_STRING,
                        'id': models.ATTRIBUTE_TYPE_STRING,
                        'name': models.ATTRIBUTE_TYPE_STRING
                    }, ['id', 'name'],
                    {'index_name': models.IndexDefinition('city1')}),
                models.TableMeta.TABLE_STATUS_ACTIVE))

        self.storage_mocker.ReplayAll()

        table = Table('test_table', connection=self.DYNAMODB_CON)

        table_description = table.describe()

        self.storage_mocker.VerifyAll()

        self.assertEquals('test_table',
                          table_description['Table']['TableName'])
        self.assertItemsEqual([{
            "AttributeName": "city1",
            "AttributeType": "S"
        }, {
            "AttributeName": "id",
            "AttributeType": "S"
        }, {
            "AttributeName": "name",
            "AttributeType": "S"
        }], table_description['Table']['AttributeDefinitions'])
Ejemplo n.º 18
0
    def archive(self, args):
        # lets get the origin table
        try:
            origin = Table(args['origin_table'], connection=self.connection)
            origin.describe()
        except Exception as e:
            return "table {0} could not be found in {1}".format(
                args['origin_table'], self.region)

        for item in origin.scan():
            item = dict(item)
            for key in item.keys():
                if type(item[key]) is set:
                    item[key] = list(item[key])

            print json.dumps(item, use_decimal=True)
Ejemplo n.º 19
0
class TestCustomThroughput(unittest.TestCase):

    def setUp(self):

        # Configuration options
        self.table_name = 'conf'
        self.store_name = 'test'
        self.read_units = 10
        self.write_units = 8

        # Instanciate the store
        self.store = DynamoDBConfigStore(
            connection,
            self.table_name,
            self.store_name,
            read_units=self.read_units,
            write_units=self.write_units)

        # Get an Table instance for validation
        self.table = Table(self.table_name, connection=connection)

    def test_custom_throughput(self):
        """ Test that we can set custom thoughput for new tables """
        throughput = self.table.describe()[u'Table'][u'ProvisionedThroughput']

        self.assertEqual(throughput[u'ReadCapacityUnits'], self.read_units)
        self.assertEqual(throughput[u'WriteCapacityUnits'], self.write_units)

    def tearDown(self):
        """ Tear down the test case """
        self.table.delete()
Ejemplo n.º 20
0
  def setUp(self):
    logging.getLogger('boto').setLevel(logging.CRITICAL)

    err = 'Use a real DynamoDB %s. Add datastore/dynamo/test_settings.py.'
    assert aws_access_key != '<aws access key>', err % 'access key.'
    assert aws_secret_key != '<aws secret key>', err % 'secret key.'
    self.conn = boto.dynamodb2.connect_to_region(aws_region, aws_access_key_id=aws_access_key,aws_secret_access_key=aws_secret_key)
    
    # Create an indexed table 
    table = Table(self.INDEXED_TABLE, connection=self.conn)
    try:
      status = table.describe()
    except:
      table = Table.create(self.INDEXED_TABLE, schema=[
          HashKey('department', data_type=STRING),
          RangeKey('name', data_type=STRING)
      ], indexes=[
        AllIndex('ScoreIndex', parts=[
          HashKey('department'),
          RangeKey('score', data_type=NUMBER)
        ])
      ], global_indexes=[
        GlobalAllIndex('GroupIndex', parts=[
          HashKey('group'),
          RangeKey('age', data_type=NUMBER)
        ])
      ], connection=self.conn)

    # make sure we're clean :)
    self._delete_keys_from_table(self.SIMPLE_TABLE) 
    self._delete_keys_from_table(self.INDEXED_TABLE) 
    self._delete_keys_from_table(self.RANGEKEY_TABLE) 
Ejemplo n.º 21
0
 def _table(self, name, schema, read, write):
     table = Table(name, connection=self._dynamo)
     try:
         table.describe()
         return table
     except Exception as e:
         if e.error_code != 'ResourceNotFoundException':
             raise e
         logger.debug('Creating table %s', name)
         table = Table.create(name,
                              schema=schema,
                              throughput={
                                  'read': read,
                                  'write': write
                              },
                              connection=self._dynamo)
         return table
Ejemplo n.º 22
0
    def copy(self, args):
        # lets get the origin table
        try:
            origin = Table(args['origin_table'], connection=self.connection)
            origin.describe()
        except Exception as e:
            return "table {0} could not be found in {1}".format(
                args['origin_table'], self.region)

        # now, get the destination_table
        destination = Table(args['destination_table'],
                            connection=self.connection)

        print "copying items from {0} to {1}".format(origin.table_name,
                                                     destination.table_name)
        for item in origin.scan():
            destination.put_item(dict(item))
Ejemplo n.º 23
0
def apply(message, dynamodb_connection, indexer_queue, statsd):
	"""
	Applies the specific change.

	New Table behaviour:
	If a DynamoDB Table doesn't already exist for the configured prefix, day and time resolution
	 combination, then a Table will be created.

	New Item behaviour:
	If a DynamoDB Item doesn't already exist for the metric and the configured time period, then
	 one will be created.

	Update Item behaviour:
	If a DynamoDB Item already exists for the metric and configured time period, then the new 
	data points will be merged with the existing data as per the aggregation type specified.

	Aggregation Type behaviour:
	- sum: the values for each measurement will be added together
	- average: the values for each measurement will be added together with the data point already
	 stored and divided by 2
	- minimum: the minimum value will be stored for each measuremenet out of the new value and
	 the existing value
	- maximum: the maximum value will be stored for each measuremenet out of the new value and
	 the existing value
	"""

	metric = message.metric
	start_time = message.start_time
	resolution = message.resolution
	datapoints = message.datapoints
	aggregation_type = message.aggregation_type

	table_name = get_table_name(resolution, start_time)

	try:
		table = Table(table_name, connection=dynamodb_connection)
		statsd.incr('apply.dynamodb.table.describe')
		table.describe()

	except JSONResponseError, e:

		if e.error_code == 'ResourceNotFoundException':
			statsd.incr('apply.dynamodb.table.create')
			table = Table.create(table_name, schema=DynamoDB_Schema, connection=dynamodb_connection)
		else:
			raise e
Ejemplo n.º 24
0
def desc_table(name):
    # inputs must be strings
    try:
        table = Table(name, connection=client_dynamo)
        out = table.describe()
        print json.dumps(out, indent=4, sort_keys=True)
        return out
    except KeyboardInterrupt:
        exit
Ejemplo n.º 25
0
    def get_table(self, table_name):
        """
        Fetch a Table from DynamoDB.

        NOTE: this is a somewhat expensive operation,
        which must query dynamo for the current state
        of the table.

        :type table_name: str
        :param table_name: Name of Table to load

        :rtype: boto.dynamodb2.table.Table:
        :returns: Requested Table
        """
        table = Table(table_name, connection=self.dynamo_cx)

        # must describe the table, or it doesn't have the correct throughput values
        table.describe()

        return table
Ejemplo n.º 26
0
    def get_table(self, table_name):
        """
        Fetch a Table from DynamoDB.

        NOTE: this is a somewhat expensive operation,
        which must query dynamo for the current state
        of the table.

        :type table_name: str
        :param table_name: Name of Table to load

        :rtype: boto.dynamodb2.table.Table:
        :returns: Requested Table
        """
        table = Table(table_name, connection=self.dynamo_cx)

        # must describe the table, or it doesn't have the correct throughput values
        table.describe()

        return table
Ejemplo n.º 27
0
def describe(table_name, region=None, key=None, keyid=None, profile=None):
    '''
    Describe a DynamoDB table.

    CLI example::

        salt myminion boto_dynamodb.describe table_name region=us-east-1
    '''
    conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
    table = Table(table_name, connection=conn)
    return table.describe()
Ejemplo n.º 28
0
def describe(table_name, region=None, key=None, keyid=None, profile=None):
    '''
    Describe a DynamoDB table.

    CLI example::

        salt myminion boto_dynamodb.describe table_name region=us-east-1
    '''
    conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
    table = Table(table_name, connection=conn)
    return table.describe()
Ejemplo n.º 29
0
def createTable( tname, primary_key, secondary_key=None ):
    #check that table does not already exist
    table = Table(tname)
    try:
        table.describe()
        print "Table", tname, "already exists."
        return table
    except JSONResponseError as jse:
        if jse.error_code == 'ResourceNotFoundException': 
            sc = [HashKey('src_id')]
            if secondary_key:
                sc.append(RangeKey(secondary_key))
            net_data = Table.create(tname, schema=sc, throughput={'read': 5,
'write': 15,
} )
            return net_data
        else:
            #k, something else (not table not existing) happened
            #propagate
            raise(JSONResponseError(jse))
Ejemplo n.º 30
0
def create_or_update_dynamo_table(connection, module):
    table_name = module.params.get('name')
    hash_key_name = module.params.get('hash_key_name')
    hash_key_type = module.params.get('hash_key_type')
    range_key_name = module.params.get('range_key_name')
    range_key_type = module.params.get('range_key_type')
    read_capacity = module.params.get('read_capacity')
    write_capacity = module.params.get('write_capacity')
    all_indexes = module.params.get('indexes')

    for index in all_indexes:
        validate_index(index, module)

    schema = get_schema_param(hash_key_name, hash_key_type, range_key_name, range_key_type)

    throughput = {
        'read': read_capacity,
        'write': write_capacity
    }

    indexes, global_indexes = get_indexes(all_indexes)

    result = dict(
        region=module.params.get('region'),
        table_name=table_name,
        hash_key_name=hash_key_name,
        hash_key_type=hash_key_type,
        range_key_name=range_key_name,
        range_key_type=range_key_type,
        read_capacity=read_capacity,
        write_capacity=write_capacity,
        indexes=all_indexes,
    )

    try:
        table = Table(table_name, connection=connection)


        if dynamo_table_exists(table):
            result['changed'] = update_dynamo_table(table, throughput=throughput, check_mode=module.check_mode, global_indexes=global_indexes)
        else:
            if not module.check_mode:
                Table.create(table_name, connection=connection, schema=schema, throughput=throughput, indexes=indexes, global_indexes=global_indexes)
            result['changed'] = True

        if not module.check_mode:
            result['table_status'] = table.describe()['Table']['TableStatus']

    except BotoServerError:
        result['msg'] = 'Failed to create/update dynamo table due to error: ' + traceback.format_exc()
        module.fail_json(**result)
    else:
        module.exit_json(**result)
Ejemplo n.º 31
0
    def test_describe_unexisting_table(self):

        self.storage_mocker.StubOutWithMock(storage, 'describe_table')

        storage.describe_table(IgnoreArg(),
                               'test_table1').AndRaise(TableNotExistsException)

        self.storage_mocker.ReplayAll()

        table = Table('test_table1', connection=self.DYNAMODB_CON)

        try:
            table.describe()
        except JSONResponseError as e:
            self.assertEqual(
                e.body['__type'],
                'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException')

            self.assertEqual(
                e.body['message'],
                'The resource which is being requested does not exist.')
Ejemplo n.º 32
0
def create_or_update_dynamo_table(connection, module):
    table_name = module.params.get('name')
    hash_key_name = module.params.get('hash_key_name')
    hash_key_type = module.params.get('hash_key_type')
    range_key_name = module.params.get('range_key_name')
    range_key_type = module.params.get('range_key_type')
    read_capacity = module.params.get('read_capacity')
    write_capacity = module.params.get('write_capacity')

    if range_key_name:
        schema = [
            HashKey(hash_key_name, DYNAMO_TYPE_MAP.get(hash_key_type)),
            RangeKey(range_key_name, DYNAMO_TYPE_MAP.get(range_key_type))
        ]
    else:
        schema = [HashKey(hash_key_name, DYNAMO_TYPE_MAP.get(hash_key_type))]
    throughput = {'read': read_capacity, 'write': write_capacity}

    result = dict(
        region=module.params.get('region'),
        table_name=table_name,
        hash_key_name=hash_key_name,
        hash_key_type=hash_key_type,
        range_key_name=range_key_name,
        range_key_type=range_key_type,
        read_capacity=read_capacity,
        write_capacity=write_capacity,
    )

    try:
        table = Table(table_name, connection=connection)

        if dynamo_table_exists(table):
            result['changed'] = update_dynamo_table(
                table, throughput=throughput, check_mode=module.check_mode)
        else:
            if not module.check_mode:
                Table.create(table_name,
                             connection=connection,
                             schema=schema,
                             throughput=throughput)
            result['changed'] = True

        if not module.check_mode:
            result['table_status'] = table.describe()['Table']['TableStatus']

    except BotoServerError:
        result[
            'msg'] = 'Failed to create/update dynamo table due to error: ' + traceback.format_exc(
            )
        module.fail_json(**result)
    else:
        module.exit_json(**result)
Ejemplo n.º 33
0
def create_or_update_dynamo_table(connection, module):
    table_name = module.params.get('name')
    hash_key_name = module.params.get('hash_key_name')
    hash_key_type = module.params.get('hash_key_type')
    range_key_name = module.params.get('range_key_name')
    range_key_type = module.params.get('range_key_type')
    read_capacity = module.params.get('read_capacity')
    write_capacity = module.params.get('write_capacity')
    all_indexes = module.params.get('indexes')

    for index in all_indexes:
        validate_index(index, module)

    schema = get_schema_param(hash_key_name, hash_key_type, range_key_name, range_key_type)

    throughput = {
        'read': read_capacity,
        'write': write_capacity
    }

    indexes, global_indexes = get_indexes(all_indexes)

    result = dict(
        region=module.params.get('region'),
        table_name=table_name,
        hash_key_name=hash_key_name,
        hash_key_type=hash_key_type,
        range_key_name=range_key_name,
        range_key_type=range_key_type,
        read_capacity=read_capacity,
        write_capacity=write_capacity,
        indexes=all_indexes,
    )

    try:
        table = Table(table_name, connection=connection)


        if dynamo_table_exists(table):
            result['changed'] = update_dynamo_table(table, throughput=throughput, check_mode=module.check_mode, global_indexes=global_indexes)
        else:
            if not module.check_mode:
                Table.create(table_name, connection=connection, schema=schema, throughput=throughput, indexes=indexes, global_indexes=global_indexes)
            result['changed'] = True

        if not module.check_mode:
            result['table_status'] = table.describe()['Table']['TableStatus']

    except BotoServerError:
        result['msg'] = 'Failed to create/update dynamo table due to error: ' + traceback.format_exc()
        module.fail_json(**result)
    else:
        module.exit_json(**result)
Ejemplo n.º 34
0
def create_or_update_dynamo_table(connection, module):
    table_name = module.params.get('name')
    hash_key_name = module.params.get('hash_key_name')
    hash_key_type = module.params.get('hash_key_type')
    range_key_name = module.params.get('range_key_name')
    range_key_type = module.params.get('range_key_type')
    read_capacity = module.params.get('read_capacity')
    write_capacity = module.params.get('write_capacity')

    if range_key_name:
        schema = [
            HashKey(hash_key_name, DYNAMO_TYPE_MAP.get(hash_key_type)),
            RangeKey(range_key_name, DYNAMO_TYPE_MAP.get(range_key_type))
        ]
    else:
        schema = [
            HashKey(hash_key_name, DYNAMO_TYPE_MAP.get(hash_key_type))
        ]
    throughput = {
        'read': read_capacity,
        'write': write_capacity
    }

    result = dict(
        region=module.params.get('region'),
        table_name=table_name,
        hash_key_name=hash_key_name,
        hash_key_type=hash_key_type,
        range_key_name=range_key_name,
        range_key_type=range_key_type,
        read_capacity=read_capacity,
        write_capacity=write_capacity,
    )

    try:
        table = Table(table_name, connection=connection)

        if dynamo_table_exists(table):
            result['changed'] = update_dynamo_table(table, throughput=throughput, check_mode=module.check_mode)
        else:
            if not module.check_mode:
                Table.create(table_name, connection=connection, schema=schema, throughput=throughput)
            result['changed'] = True

        if not module.check_mode:
            result['table_status'] = table.describe()['Table']['TableStatus']

    except BotoServerError:
        result['msg'] = 'Failed to create/update dynamo table due to error: ' + traceback.format_exc()
        module.fail_json(**result)
    else:
        module.exit_json(**result)
Ejemplo n.º 35
0
def describe(table_name, region=None, key=None, keyid=None, profile=None):
    """
    Describe a DynamoDB table.

    CLI Example:

    .. code-block:: bash

        salt myminion boto_dynamodb.describe table_name region=us-east-1
    """
    conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
    table = Table(table_name, connection=conn)
    return table.describe()
Ejemplo n.º 36
0
    def _create_table(self):
        ''' Create the underlying dynamodb table for writing
        locks to if it does not exist, otherwise uses the existing
        table. We use the `describe` method call to verify if the
        table exists or not.

        :returns: A handle to the underlying dynamodb table
        '''
        try:
            table = Table(self.schema.table_name)
            _logger.debug("current table description:\n%s", table.describe())
        except JSONResponseError, ex:
            _logger.exception("table %s does not exist, creating it",
                              self.schema.table_name)
            table = Table.create(
                self.schema.table_name,
                schema=[HashKey(self.schema.name, data_type=STRING)],
                throughput={
                    'read': self.schema.read_capacity,
                    'write': self.schema.write_capacity,
                })
            _logger.debug("current table description:\n%s", table.describe())
Ejemplo n.º 37
0
    def test_describe_unexisting_table(self):

        self.storage_mocker.StubOutWithMock(storage, 'describe_table')

        storage.describe_table(
            IgnoreArg(), 'test_table1'
        ).AndRaise(TableNotExistsException)

        self.storage_mocker.ReplayAll()

        table = Table('test_table1', connection=self.DYNAMODB_CON)

        try:
            table.describe()
        except JSONResponseError as e:
            self.assertEqual(
                e.body['__type'],
                'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException')

            self.assertEqual(
                e.body['message'],
                'The resource which is being requested does not exist.')
Ejemplo n.º 38
0
 def migrate(self, args):
     # lets get the origin table
     try:
         origin = Table(args['origin_table'], connection=self.connection)
         origin.describe()
     except Exception as e:
         return "table {0} could not be found in {1}".format(args['origin_table'], self.region)
     
     # now, create the destination_table (using create)
     destination = Table(args['destination_table'], connection=self.connection)
     print "creating table {0}".format(destination.table_name)
     if self.create(args) != 'CREATING':
         print "    table {0} exists".format(destination.table_name)
     else:
         while destination.describe()['Table']['TableStatus'] != 'ACTIVE':
             print "        ..."
             time.sleep(5)
         print "    table {0} created".format(destination.table_name)
     
     print "copying items from {0} to {1}".format(origin.table_name, destination.table_name)
     for item in origin.scan():
         # be sure to mold the fields into their proper shapes
         item = self._mold(item)
         destination.put_item(item, overwrite=True)
Ejemplo n.º 39
0
def make_table(ddb, name, **kwargs):

    table = Table(table_name=name, connection=ddb, **kwargs)

    while True:
        try:
            if table.describe()["Table"]["TableStatus"] == "ACTIVE":
                return table
            else:
                time.sleep(1)
        except JSONResponseError as exc:
            if exc.error_code == "ResourceNotFoundException":
                table = Table.create(table_name=name, connection=ddb, **kwargs)
            else:
                raise
Ejemplo n.º 40
0
    def test_describe_table(self):

        self.storage_mocker.StubOutWithMock(storage, 'describe_table')

        storage.describe_table(IgnoreArg(), 'test_table').AndReturn(
            models.TableMeta(
                '00000000-0000-0000-0000-000000000000',
                models.TableSchema(
                    {
                        'city1': models.AttributeType('S'),
                        'id': models.AttributeType('S'),
                        'name': models.AttributeType('S')
                    },
                    ['id', 'name'],
                    {'index_name': models.IndexDefinition('id', 'city1')}
                ),
                models.TableMeta.TABLE_STATUS_ACTIVE,
                None
            )
        )

        self.storage_mocker.ReplayAll()

        table = Table('test_table', connection=self.DYNAMODB_CON)

        table_description = table.describe()

        self.storage_mocker.VerifyAll()

        self.assertEqual('test_table',
                         table_description['Table']['TableName'])
        self.assertItemsEqual(
            [
                {
                    "AttributeName": "city1",
                    "AttributeType": "S"
                },
                {
                    "AttributeName": "id",
                    "AttributeType": "S"
                },
                {
                    "AttributeName": "name",
                    "AttributeType": "S"
                }
            ], table_description['Table']['AttributeDefinitions'])
Ejemplo n.º 41
0
 def restore(self, args):
     # create the destination_table (using create)
     destination = Table(args['destination_table'], connection=self.connection)
     print "creating table {0}".format(destination.table_name)
     if self.create(args) != 'CREATING':
         print "    table {0} exists".format(destination.table_name)
     else:
         while destination.describe()['Table']['TableStatus'] != 'ACTIVE':
             print "        ..."
             time.sleep(5)
         print "    table {0} created".format(destination.table_name)
     
     print "reading items from stdin to {0}".format(destination.table_name)
     for line in sys.stdin:
         # be sure to mold the fields into their proper shapes
         item = self._mold(json.loads(line))
         destination.put_item(item, overwrite=True)
Ejemplo n.º 42
0
    def restore(self, args):
        # create the destination_table (using create)
        destination = Table(args['destination_table'],
                            connection=self.connection)
        print "creating table {0}".format(destination.table_name)
        if self.create(args) != 'CREATING':
            print "    table {0} exists".format(destination.table_name)
        else:
            while destination.describe()['Table']['TableStatus'] != 'ACTIVE':
                print "        ..."
                time.sleep(5)
            print "    table {0} created".format(destination.table_name)

        print "reading items from stdin to {0}".format(destination.table_name)
        for line in sys.stdin:
            # be sure to mold the fields into their proper shapes
            item = self._mold(json.loads(line))
            destination.put_item(item, overwrite=True)
Ejemplo n.º 43
0
    def test_describe_table(self):

        self.storage_mocker.StubOutWithMock(storage, 'describe_table')

        storage.describe_table(IgnoreArg(), 'test_table').AndReturn(
            models.TableSchema(
                'test_table',
                {
                    'city1': models.ATTRIBUTE_TYPE_STRING,
                    'id': models.ATTRIBUTE_TYPE_STRING,
                    'name': models.ATTRIBUTE_TYPE_STRING
                },
                ['id', 'name'],
                {'index_name': models.IndexDefinition('city1')}
            )
        )

        self.storage_mocker.ReplayAll()

        table = Table('test_table', connection=self.DYNAMODB_CON)

        table_description = table.describe()

        self.storage_mocker.VerifyAll()

        self.assertEquals('test_table',
                          table_description['Table']['TableName'])
        self.assertItemsEqual(
            [
                {
                    "AttributeName": "city1",
                    "AttributeType": "S"
                },
                {
                    "AttributeName": "id",
                    "AttributeType": "S"
                },
                {
                    "AttributeName": "name",
                    "AttributeType": "S"
                }
            ], table_description['Table']['AttributeDefinitions'])
Ejemplo n.º 44
0
def getMessageTable():
    conn = None
    if os.environ.get('DEVELOPER_MODE'):
        host, port = 'localhost', 8000
        confirmDynamoDbLocalIsRunning(host, port)
        conn = DynamoDBConnection(host=host,
                                  port=port,
                                  aws_access_key_id='unit_test',
                                  aws_secret_access_key='unit_test',
                                  is_secure=False)
    else:
        conn = DynamoDBConnection()

    try:
        msg_table_desc = conn.describe_table(_message_table)
        msg_table = Table(_message_table, connection=conn)
    except JSONResponseError as e:
        # Only handle the ResourceNotFoundException here
        if e.error_code != 'ResourceNotFoundException':
            raise e

        msg_table = Table.create(
            _message_table,
            schema=[HashKey('date_string'),
                    RangeKey('date')],
            throughput={
                'read': 5,
                'write': 5
            },
            connection=conn)

    while not msg_table.describe()['Table']['TableStatus'] == "ACTIVE":
        from time import sleep
        sleep(1)

    return msg_table
Ejemplo n.º 45
0
def main():
    # TODO: Need argument parsing
    unit_flag = False

    instance_name = sys.argv[1]
    trial = sys.argv[2]
    try:
        logs = Table(instance_name, schema=[HashKey('trial'),RangeKey('parallel'),])
        tmp = logs.describe()
    except JSONResponseError:
        sys.exit()

    # Create units table
    if unit_flag:    
        try:
            units_t = Table('unixbench_unit', schema=[HashKey('test_name'),])
            tmp = units_t.describe()
        except JSONResponseError:
            units_t = Table.create('unixbench_unit', schema=[HashKey('test_name'),])
            sleep(15) # Wait for the new db becomes available
        units = {}

    multi_flag = False
    result_flag = False
    test_index = 0
    b = {}
    single = {}
    multi = {}
    for line in open(os.path.dirname(os.path.abspath(__file__))+'/log/'+instance_name+'_'+trial+'.log','r'):
        if "Benchmark Run" in line:
            result_flag = True
        elif not result_flag:
            continue
        if Tests[test_index] in line:
            m = re.search(r"\s+(\d+\.\d)\s(\w+)", line)
            if test_index+1 != len(Tests):
                if unit_flag:
                    units[Tests[test_index]] = m.group(2)
                b[Tests[test_index]] = m.group(1)
                test_index += 1
            else:
                m = re.search(r"\s+(\d+\.\d)", line)
                result_flag = False
                if not multi_flag:
                    multi_flag = True
                    single = copy.deepcopy(b)
                    single[Tests[test_index]] = m.group(1)
                    single['parallel'] = 'single'
                    single['trial'] = trial
                    test_index = 0
                    b.clear()
                else:
                    multi = copy.deepcopy(b)
                    multi[Tests[test_index]] = m.group(1)
                    multi['trial'] = trial
                    multi['parallel'] = 'multi'

    logs.put_item(data=single, overwrite=True)
    if len(multi) != 0:
        logs.put_item(data=multi, overwrite=True)

    # Upload units for tests
    if unit_flag:
        for t, u in units.iteritems():
            print t, u
            units_t.put_item(data={
                'test_name': t,
                'unit': u
            })
Ejemplo n.º 46
0
def create_or_update_dynamo_table(connection, module, boto3_dynamodb=None, boto3_sts=None, region=None):
    table_name = module.params.get('name')
    hash_key_name = module.params.get('hash_key_name')
    hash_key_type = module.params.get('hash_key_type')
    range_key_name = module.params.get('range_key_name')
    range_key_type = module.params.get('range_key_type')
    read_capacity = module.params.get('read_capacity')
    write_capacity = module.params.get('write_capacity')
    all_indexes = module.params.get('indexes')
    tags = module.params.get('tags')
    wait_for_active_timeout = module.params.get('wait_for_active_timeout')

    for index in all_indexes:
        validate_index(index, module)

    schema = get_schema_param(hash_key_name, hash_key_type, range_key_name, range_key_type)

    throughput = {
        'read': read_capacity,
        'write': write_capacity
    }

    indexes, global_indexes = get_indexes(all_indexes)

    result = dict(
        region=region,
        table_name=table_name,
        hash_key_name=hash_key_name,
        hash_key_type=hash_key_type,
        range_key_name=range_key_name,
        range_key_type=range_key_type,
        read_capacity=read_capacity,
        write_capacity=write_capacity,
        indexes=all_indexes,
    )

    try:
        table = Table(table_name, connection=connection)

        if dynamo_table_exists(table):
            result['changed'] = update_dynamo_table(table, throughput=throughput, check_mode=module.check_mode, global_indexes=global_indexes)
        else:
            if not module.check_mode:
                Table.create(table_name, connection=connection, schema=schema, throughput=throughput, indexes=indexes, global_indexes=global_indexes)
            result['changed'] = True

        if not module.check_mode:
            result['table_status'] = table.describe()['Table']['TableStatus']

        if tags:
            # only tables which are active can be tagged
            wait_until_table_active(module, table, wait_for_active_timeout)
            account_id = get_account_id(boto3_sts)
            boto3_dynamodb.tag_resource(
                ResourceArn='arn:aws:dynamodb:' +
                region +
                ':' +
                account_id +
                ':table/' +
                table_name,
                Tags=ansible_dict_to_boto3_tag_list(tags))
            result['tags'] = tags

    except BotoServerError:
        result['msg'] = 'Failed to create/update dynamo table due to error: ' + traceback.format_exc()
        module.fail_json(**result)
    else:
        module.exit_json(**result)
def main():
    n_arg = len(sys.argv)
    if 1 < n_arg:
        if sys.argv[1] == 'unixbench':
            u_data_model = 'unixbench/unixbench_ec2_userscript_model.dat'
        elif sys.argv[1] == 'x264':
            u_data_model = 'x264/x264_userscript_model.dat'
    else:
        print "usage: %s [unixbench|x264]" % sys.argv[0]
        sys.exit(0)

    # Block device storage size
    if n_arg == 3:
        dev_sda1 = BlockDeviceType()
        dev_sda1.size = int(sys.argv[2])
        bdm = BlockDeviceMapping()
        bdm['/dev/sda1'] = dev_sda1
    else:
        bdm = None

    # Lists of instance types to be benchmarked and already completed
    instances = []
    completed = []
    
    try:
        instances_dict = json.load(open("web/data/instances.json", "r"))
    except IOError:
        print "*** web/data/instances.json not found! Try ./update_instances.py ***"
        sys.exit(1)

    for k, v in instances_dict.iteritems():
        if v['cloud'] == 'EC2':
            if u_data_model == 'unixbench/unixbench_userscript_model.dat':
                try:
                    instance_logs = Table(k)
                    instance_logs.describe()
                    completed.append(k)
                except JSONResponseError:
                    instances.append(k)
            elif u_data_model == 'x264/x264_userscript_model.dat':
                instances.append(k)
            else:
                print 'Nothing to do'
                sys.exit(0)

    # Start all the benchmark at once will most likely exceeds the quota limit per user
    # Better to execute the benchmark on a category to category basis
    conn = boto.ec2.connect_to_region(region)
    
    #instances = []
    #completed = ['t1.micro_paravirtual']
    num_instances = len(instances)
    while 0 < len(instances):
        for i in instances:
            print '%s is waiting for launch' % i
        for i in completed:
            if i in instances:
                instances.remove(i)
        for i in instances:
            # Generate an user-script per instance
            userscript = ''
            if u_data_model == 'unixbench/unixbench_userscript_model.dat':
                userscript = "#!/bin/sh\nTRIAL=%d\nINSTANCE_NAME=%s\n"%(trial,i) + open(u_data_model,'r').read()
            elif u_data_model == 'x264/x264_userscript_model.dat':
                userscript = "#!/bin/sh\nTRIAL=%d\necho %s > /var/local/instance_name\n"%(trial,i) + open(u_data_model,'r').read()
            u_data = base64.b64encode(userscript)
            res = start_benchmark_instance(conn, i, u_data, bdm)
            if res is not None and not res in completed:
                completed.append(res)
            sleep(5)
        if len(completed) == num_instances:
            break
        else:
            print '*** Cooling down...'
            # 30 mins interval
            sleep(60*30) 
Ejemplo n.º 48
0
    def test_query_after_describe_with_gsi(self):
        # Create a table to using gsi to reproduce the error mentioned on issue
        # https://github.com/boto/boto/issues/2828
        users = Table.create('more_gsi_query_users',
                             schema=[HashKey('user_id')],
                             throughput={
                                 'read': 5,
                                 'write': 5
                             },
                             global_indexes=[
                                 GlobalAllIndex('EmailGSIIndex',
                                                parts=[HashKey('email')],
                                                throughput={
                                                    'read': 1,
                                                    'write': 1
                                                })
                             ])

        # Add this function to be called after tearDown()
        self.addCleanup(users.delete)

        # Wait for it.
        time.sleep(60)

        # populate a couple of items in it
        users.put_item(
            data={
                'user_id': '7',
                'username': '******',
                'first_name': 'John',
                'last_name': 'Doe',
                'email': '*****@*****.**',
            })
        users.put_item(
            data={
                'user_id': '24',
                'username': '******',
                'first_name': 'Alice',
                'last_name': 'Expert',
                'email': '*****@*****.**',
            })
        users.put_item(
            data={
                'user_id': '35',
                'username': '******',
                'first_name': 'Jane',
                'last_name': 'Doe',
                'email': '*****@*****.**',
            })

        # Try the GSI. it should work.
        rs = users.query_2(email__eq='*****@*****.**',
                           index='EmailGSIIndex')

        for rs_item in rs:
            self.assertEqual(rs_item['username'], ['johndoe'])

        # The issue arises when we're introspecting the table and try to
        # query_2 after call describe method
        users_hit_api = Table('more_gsi_query_users')
        users_hit_api.describe()

        # Try the GSI. This is what os going wrong on #2828 issue. It should
        # work fine now.
        rs = users_hit_api.query_2(email__eq='*****@*****.**',
                                   index='EmailGSIIndex')

        for rs_item in rs:
            self.assertEqual(rs_item['username'], ['johndoe'])
Ejemplo n.º 49
0
#!/usr/bin/python
# -*- coding: utf-8 -*-
from time import sleep
import sys

try:
    from boto.dynamodb2.fields import HashKey, RangeKey
    from boto.dynamodb2.table import Table
    from boto.exception import JSONResponseError
except ImportError:
    sys.exit(0)

instance_name = open('/var/local/instance_name').read().strip()

# Check if the table exists or not, create one if not
print "*** Connecting to DynamoDB '%s' table..." % instance_name
table_struct = None
try:
    logs = Table(instance_name, schema=[HashKey('trial'),RangeKey('parallel'),])
    table_struct = logs.describe()
except JSONResponseError:
    logs = Table.create(instance_name, schema=[HashKey('trial'),RangeKey('parallel'),])
    sys.stdout.write("*** DynamoDB is creating a new table...")
while table_struct is None:
    try:
        logs = Table(instance_name, schema=[HashKey('trial'),RangeKey('parallel'),])
        table_struct = logs.describe()
        print "\tDone"
    except JSONResponseError:
        sleep(5)
Ejemplo n.º 50
0
def main():
    n_arg = len(sys.argv)
    if 1 < n_arg:
        option = sys.argv[1]
        if option == 'unixbench':
            u_data_model = 'unixbench/unixbench_ec2_userscript_model.dat'
            trial = int(sys.argv[2])
        elif option == 'x264':
            u_data_model = 'x264/x264_userscript_model.dat'
            trial = int(sys.argv[2])
        elif option == 'iperf':
            u_data_model = 'iperf/iperf_userscript_model.dat'
            minute = int(sys.argv[2])
        else:
            print "unrecognized option: %s" % option
            print "usage: %s [unixbench|x264] [trial]" % sys.argv[0]
            print "usage: %s [iperf] [iperf-server] [minute]" % sys.argv[0]
            sys.exit(1)
    else:
        print "usage: %s [unixbench|x264] [trial]" % sys.argv[0]
        print "usage: %s [iperf] [iperf-server] [minute]" % sys.argv[0]
        sys.exit(1)

    '''
    # Block device storage size
    if n_arg == 3:
        dev_sda1 = BlockDeviceType()
        dev_sda1.size = int(sys.argv[2])
        bdm = BlockDeviceMapping()
        bdm['/dev/sda1'] = dev_sda1
    else:
        bdm = None
    '''
    bdm = None

    # Lists of instance types to be benchmarked and already completed
    instances = []
    completed = []
    
    try:
        instances_dict = json.load(open("web/data/instances.json", "r"))
    except IOError:
        print "*** web/data/instances.json not found! Try ./update_instances.py ***"
        sys.exit(1)

    # For EC2 unixbench, ignore the instance with results already created
    for k, v in instances_dict.iteritems():
        if v['cloud'] == 'EC2':
            if option == 'unixbench':
                try:
                    instance_logs = Table(k)
                    instance_logs.describe()
                    completed.append(k)
                except JSONResponseError:
                    instances.append(k)
            elif option == 'x264' or option == 'iperf':
                instances.append(k)
            else:
                print 'Nothing to do'
                sys.exit(0)

    # Start all the benchmark at once will most likely exceeds the quota limit per user
    # Better to execute the benchmark on a category to category basis
    conn = boto.ec2.connect_to_region(region)
    
    # For manual list of instances, modify here
    instances = ['m3.medium_hvm','m3.medium_paravirtual','m3.large_hvm','m3.large_paravirtual']
    #completed = ['t1.micro_paravirtual']
    num_instances = len(instances)
    if option == 'iperf':
        wait_until_next(0, minute)
        while True:
            runnings = []
            iperf_servers = 'declare -a arr=('
            # Start Iperf servers here
            for sid, sregion in iperfs.iteritems():
                co = boto.ec2.connect_to_region(sregion)
                co.start_instances(sid)
                print '+Stating instance %s...'%sid
                while co.get_only_instances(sid)[0].state_code != 16: # 16 (running)
                    sleep(3)
                iperf_servers += "\"" + co.get_only_instances(sid)[0].public_dns_name + "\" "
            iperf_servers += ")\n"

            for i in instances:
                userscript = "#!/bin/sh\nINSTANCE_NAME=%s\n"%(i) + iperf_servers + open(u_data_model,'r').read()
                u_data = base64.b64encode(userscript)
                res, i_id = start_benchmark_instance(conn, i, u_data, bdm)
                if res is not None:
                    runnings.append(i_id)
                # Sleep 6 mins to give interval between instances
                sleep(60*6)
            print '*** Waiting for the instances to complete iperf...'
            while conn.get_only_instances(i_id)[0].state_code != 80: # 80 (stopped)
                sleep(30)
            conn.terminate_instances(runnings)
            for i in runnings:
                print "-Instances %s terminated" % i
            # Stop Iperf servers here
            for sid, sregion in iperfs.iteritems():
                co = boto.ec2.connect_to_region(sregion)
                co.stop_instances(sid)
                print '-Stopping instance %s...'%sid
                while co.get_only_instances(sid)[0].state_code != 80: # 80 (stopped)
                    sleep(3)
            # Wait until next hour
            wait_until_next(1, minute)
    else:
        while 0 < len(instances):
            for i in instances:
                print '%s is waiting for launch' % i
            for i in completed:
                if i in instances:
                    instances.remove(i)
            for i in instances:
                # Generate an user-script per instance
                userscript = ''
                if option == 'unixbench':
                    userscript = "#!/bin/sh\nTRIAL=%d\nINSTANCE_NAME=%s\n"%(trial,i) + open(u_data_model,'r').read()
                elif option == 'x264':
                    userscript = "#!/bin/sh\nTRIAL=%d\necho %s > /var/local/instance_name\n"%(trial,i) + open(u_data_model,'r').read()
                u_data = base64.b64encode(userscript)
                res, i_id = start_benchmark_instance(conn, i, u_data, bdm)
                if res is not None and not res in completed:
                    completed.append(res)
                sleep(5)
            if len(completed) == num_instances:
                break
            else:
                print '*** Cooling down...'
                # 30 mins interval
                sleep(60*30)
Ejemplo n.º 51
0
    def test_integration(self):
        # Test creating a full table with all options specified.
        users = Table.create('users', schema=[
            HashKey('username'),
            RangeKey('friend_count', data_type=NUMBER)
        ], throughput={
            'read': 5,
            'write': 5,
        }, indexes=[
            KeysOnlyIndex('LastNameIndex', parts=[
                HashKey('username'),
                RangeKey('last_name')
            ]),
        ])
        self.addCleanup(users.delete)

        self.assertEqual(len(users.schema), 2)
        self.assertEqual(users.throughput['read'], 5)

        # Wait for it.
        time.sleep(60)

        # Make sure things line up if we're introspecting the table.
        users_hit_api = Table('users')
        users_hit_api.describe()
        self.assertEqual(len(users.schema), len(users_hit_api.schema))
        self.assertEqual(users.throughput, users_hit_api.throughput)
        self.assertEqual(len(users.indexes), len(users_hit_api.indexes))

        # Test putting some items individually.
        users.put_item(data={
            'username': '******',
            'first_name': 'John',
            'last_name': 'Doe',
            'friend_count': 4
        })

        users.put_item(data={
            'username': '******',
            'first_name': 'Alice',
            'last_name': 'Expert',
            'friend_count': 2
        })

        time.sleep(5)

        # Test batch writing.
        with users.batch_write() as batch:
            batch.put_item({
                'username': '******',
                'first_name': 'Jane',
                'last_name': 'Doe',
                'friend_count': 3
            })
            batch.delete_item(username='******', friend_count=2)
            batch.put_item({
                'username': '******',
                'first_name': 'Bob',
                'last_name': 'Smith',
                'friend_count': 1
            })

        time.sleep(5)

        # Test getting an item & updating it.
        # This is the "safe" variant (only write if there have been no
        # changes).
        jane = users.get_item(username='******', friend_count=3)
        self.assertEqual(jane['first_name'], 'Jane')
        jane['last_name'] = 'Doh'
        self.assertTrue(jane.save())

        # Test strongly consistent getting of an item.
        # Additionally, test the overwrite behavior.
        client_1_jane = users.get_item(
            username='******',
            friend_count=3,
            consistent=True
        )
        self.assertEqual(jane['first_name'], 'Jane')
        client_2_jane = users.get_item(
            username='******',
            friend_count=3,
            consistent=True
        )
        self.assertEqual(jane['first_name'], 'Jane')

        # Write & assert the ``first_name`` is gone, then...
        del client_1_jane['first_name']
        self.assertTrue(client_1_jane.save())
        check_name = users.get_item(
            username='******',
            friend_count=3,
            consistent=True
        )
        self.assertEqual(check_name['first_name'], None)

        # ...overwrite the data with what's in memory.
        client_2_jane['first_name'] = 'Joan'
        # Now a write that fails due to default expectations...
        self.assertRaises(exceptions.JSONResponseError, client_2_jane.save)
        # ... so we force an overwrite.
        self.assertTrue(client_2_jane.save(overwrite=True))
        check_name_again = users.get_item(
            username='******',
            friend_count=3,
            consistent=True
        )
        self.assertEqual(check_name_again['first_name'], 'Joan')

        # Reset it.
        jane['username'] = '******'
        jane['first_name'] = 'Jane'
        jane['last_name'] = 'Doe'
        jane['friend_count'] = 3
        self.assertTrue(jane.save(overwrite=True))

        # Test the partial update behavior.
        client_3_jane = users.get_item(
            username='******',
            friend_count=3,
            consistent=True
        )
        client_4_jane = users.get_item(
            username='******',
            friend_count=3,
            consistent=True
        )
        client_3_jane['favorite_band'] = 'Feed Me'
        # No ``overwrite`` needed due to new data.
        self.assertTrue(client_3_jane.save())
        # Expectations are only checked on the ``first_name``, so what wouldn't
        # have succeeded by default does succeed here.
        client_4_jane['first_name'] = 'Jacqueline'
        self.assertTrue(client_4_jane.partial_save())
        partial_jane = users.get_item(
            username='******',
            friend_count=3,
            consistent=True
        )
        self.assertEqual(partial_jane['favorite_band'], 'Feed Me')
        self.assertEqual(partial_jane['first_name'], 'Jacqueline')

        # Reset it.
        jane['username'] = '******'
        jane['first_name'] = 'Jane'
        jane['last_name'] = 'Doe'
        jane['friend_count'] = 3
        self.assertTrue(jane.save(overwrite=True))

        # Ensure that partial saves of a brand-new object work.
        sadie = Item(users, data={
            'username': '******',
            'first_name': 'Sadie',
            'favorite_band': 'Zedd',
            'friend_count': 7
        })
        self.assertTrue(sadie.partial_save())
        serverside_sadie = users.get_item(
            username='******',
            friend_count=7,
            consistent=True
        )
        self.assertEqual(serverside_sadie['first_name'], 'Sadie')

        # Test the eventually consistent query.
        results = users.query(
            username__eq='johndoe',
            last_name__eq='Doe',
            index='LastNameIndex',
            attributes=('username',),
            reverse=True
        )

        for res in results:
            self.assertTrue(res['username'] in ['johndoe',])
            self.assertEqual(res.keys(), ['username'])


        # Test the strongly consistent query.
        c_results = users.query(
            username__eq='johndoe',
            last_name__eq='Doe',
            index='LastNameIndex',
            reverse=True,
            consistent=True
        )

        for res in c_results:
            self.assertTrue(res['username'] in ['johndoe',])

        # Test scans without filters.
        all_users = users.scan(limit=7)
        self.assertEqual(all_users.next()['username'], 'bob')
        self.assertEqual(all_users.next()['username'], 'jane')
        self.assertEqual(all_users.next()['username'], 'johndoe')

        # Test scans with a filter.
        filtered_users = users.scan(limit=2, username__beginswith='j')
        self.assertEqual(filtered_users.next()['username'], 'jane')
        self.assertEqual(filtered_users.next()['username'], 'johndoe')

        # Test deleting a single item.
        johndoe = users.get_item(username='******', friend_count=4)
        johndoe.delete()

        # Test the eventually consistent batch get.
        results = users.batch_get(keys=[
            {'username': '******', 'friend_count': 1},
            {'username': '******', 'friend_count': 3}
        ])
        batch_users = []

        for res in results:
            batch_users.append(res)
            self.assertTrue(res['first_name'] in ['Bob', 'Jane'])

        self.assertEqual(len(batch_users), 2)

        # Test the strongly consistent batch get.
        c_results = users.batch_get(keys=[
            {'username': '******', 'friend_count': 1},
            {'username': '******', 'friend_count': 3}
        ], consistent=True)
        c_batch_users = []

        for res in c_results:
            c_batch_users.append(res)
            self.assertTrue(res['first_name'] in ['Bob', 'Jane'])

        self.assertEqual(len(c_batch_users), 2)

        # Test count, but in a weak fashion. Because lag time.
        self.assertTrue(users.count() > -1)

        # Test query count
        count = users.query_count(
            username__eq='bob',
        )

        self.assertEqual(count, 1)

        # Test without LSIs (describe calls shouldn't fail).
        admins = Table.create('admins', schema=[
            HashKey('username')
        ])
        self.addCleanup(admins.delete)
        time.sleep(60)
        admins.describe()
        self.assertEqual(admins.throughput['read'], 5)
        self.assertEqual(admins.indexes, [])

        # A single query term should fail on a table with *ONLY* a HashKey.
        self.assertRaises(
            exceptions.QueryError,
            admins.query,
            username__eq='johndoe'
        )
        # But it shouldn't break on more complex tables.
        res = users.query(username__eq='johndoe')

        # Test putting with/without sets.
        mau5_created = users.put_item(data={
            'username': '******',
            'first_name': 'dead',
            'last_name': 'mau5',
            'friend_count': 2,
            'friends': set(['skrill', 'penny']),
        })
        self.assertTrue(mau5_created)

        penny_created = users.put_item(data={
            'username': '******',
            'first_name': 'Penny',
            'friend_count': 0,
            'friends': set([]),
        })
        self.assertTrue(penny_created)
Ejemplo n.º 52
0
    return '%s_%s' % (TABLE_SPACE, table)


def table_names(tables):
    """Given a list of table base names, return a dictionary
       mapping from base name to full name.

    @param tables: a list of base table names.
    @return: a dictionary mapping from base bame to full name.
    """
    return dict(map(lambda name: (name, table_name(name)), tables))

config_table = Table(table_name('config'), connection=connection)
desc = None
try:
    desc = config_table.describe()
except:
    config_table = Table.create(table_name('config'),
                                schema=[HashKey('name')],
                                throughput={'read': 1, 'write': 1}, connection=connection)

desc = config_table.describe()
while MAX_WAIT and desc['Table']['TableStatus'] != 'ACTIVE':
    MAX_WAIT -= 1
    time.sleep(1)
    desc = config_table.describe()

if desc['Table']['TableStatus'] != 'ACTIVE':
    print 'error accessing', table_name('config'), 'table in region', REGION
    sys.exit(1)
Ejemplo n.º 53
0
if len(sys.argv) != 3:
    print("Usage: %s <SOURCE_TABLE> <DESTNATION_TABLE>" % sys.argv[0])
    sys.exit(1)

src_name = sys.argv[1]
dst_name = sys.argv[2]

region = os.getenv("AWS_DEFAULT_REGION", "ap-northeast-2")

DynamoDBConnection.DefaultRegionName = region
ddbc = DynamoDBConnection()

# source table
try:
    src_logs = Table(src_name, connection=ddbc)
    src_logs.describe()
except JSONResponseError:
    print("Table [%s] does not exist." % src_name)
    sys.exit(1)

print("# Read from [%s]." % src_name)
src_table = ddbc.describe_table(src_name)["Table"]

hash_key = ""
range_key = ""
for schema in src_table["KeySchema"]:
    attr_name = schema["AttributeName"]
    key_type = schema["KeyType"]
    if key_type == "HASH":
        hash_key = attr_name
    elif key_type == "RANGE":
Ejemplo n.º 54
0
    def test_integration(self):
        # Test creating a full table with all options specified.
        users = Table.create('users', schema=[
            HashKey('username'),
            RangeKey('friend_count', data_type=NUMBER)
        ], throughput={
            'read': 5,
            'write': 5,
        }, indexes={
            KeysOnlyIndex('LastNameIndex', parts=[
                HashKey('username'),
                RangeKey('last_name')
            ]),
        })
        self.addCleanup(users.delete)

        self.assertEqual(len(users.schema), 2)
        self.assertEqual(users.throughput['read'], 5)

        # Wait for it.
        time.sleep(60)

        # Make sure things line up if we're introspecting the table.
        users_hit_api = Table('users')
        users_hit_api.describe()
        self.assertEqual(len(users.schema), len(users_hit_api.schema))
        self.assertEqual(users.throughput, users_hit_api.throughput)
        self.assertEqual(len(users.indexes), len(users_hit_api.indexes))

        # Test putting some items individually.
        users.put_item(data={
            'username': '******',
            'first_name': 'John',
            'last_name': 'Doe',
            'friend_count': 4
        })

        users.put_item(data={
            'username': '******',
            'first_name': 'Alice',
            'last_name': 'Expert',
            'friend_count': 2
        })

        time.sleep(5)

        # Test batch writing.
        with users.batch_write() as batch:
            batch.put_item({
                'username': '******',
                'first_name': 'Jane',
                'last_name': 'Doe',
                'friend_count': 3
            })
            batch.delete_item(username='******', friend_count=2)
            batch.put_item({
                'username': '******',
                'first_name': 'Bob',
                'last_name': 'Smith',
                'friend_count': 1
            })

        time.sleep(5)

        # Test getting an item & updating it.
        # This is the "safe" variant (only write if there have been no
        # changes).
        jane = users.get_item(username='******', friend_count=3)
        self.assertEqual(jane['first_name'], 'Jane')
        jane['last_name'] = 'Doh'
        self.assertTrue(jane.save())

        # Test strongly consistent getting of an item.
        # Additionally, test the overwrite behavior.
        client_1_jane = users.get_item(
            username='******',
            friend_count=3,
            consistent=True
        )
        self.assertEqual(jane['first_name'], 'Jane')
        client_2_jane = users.get_item(
            username='******',
            friend_count=3,
            consistent=True
        )
        self.assertEqual(jane['first_name'], 'Jane')

        # Write & assert the ``first_name`` is gone, then...
        del client_1_jane['first_name']
        self.assertTrue(client_1_jane.save())
        check_name = users.get_item(
            username='******',
            friend_count=3,
            consistent=True
        )
        self.assertEqual(check_name['first_name'], None)

        # ...overwrite the data with what's in memory.
        client_2_jane['first_name'] = 'Joan'
        # Now a write that fails due to default expectations...
        self.assertRaises(exceptions.JSONResponseError, client_2_jane.save)
        # ... so we force an overwrite.
        self.assertTrue(client_2_jane.save(overwrite=True))
        check_name_again = users.get_item(
            username='******',
            friend_count=3,
            consistent=True
        )
        self.assertEqual(check_name_again['first_name'], 'Joan')

        # Reset it.
        jane['username'] = '******'
        jane['first_name'] = 'Jane'
        jane['last_name'] = 'Doe'
        jane['friend_count'] = 3
        self.assertTrue(jane.save(overwrite=True))

        # Test the partial update behavior.
        client_3_jane = users.get_item(
            username='******',
            friend_count=3,
            consistent=True
        )
        client_4_jane = users.get_item(
            username='******',
            friend_count=3,
            consistent=True
        )
        client_3_jane['favorite_band'] = 'Feed Me'
        # No ``overwrite`` needed due to new data.
        self.assertTrue(client_3_jane.save())
        # Expectations are only checked on the ``first_name``, so what wouldn't
        # have succeeded by default does succeed here.
        client_4_jane['first_name'] = 'Jacqueline'
        self.assertTrue(client_4_jane.partial_save())
        partial_jane = users.get_item(
            username='******',
            friend_count=3,
            consistent=True
        )
        self.assertEqual(partial_jane['favorite_band'], 'Feed Me')
        self.assertEqual(partial_jane['first_name'], 'Jacqueline')

        # Reset it.
        jane['username'] = '******'
        jane['first_name'] = 'Jane'
        jane['last_name'] = 'Doe'
        jane['friend_count'] = 3
        self.assertTrue(jane.save(overwrite=True))

        # Ensure that partial saves of a brand-new object work.
        sadie = Item(users, data={
            'username': '******',
            'first_name': 'Sadie',
            'favorite_band': 'Zedd',
            'friend_count': 7
        })
        self.assertTrue(sadie.partial_save())
        serverside_sadie = users.get_item(
            username='******',
            friend_count=7,
            consistent=True
        )
        self.assertEqual(serverside_sadie['first_name'], 'Sadie')

        # Test the eventually consistent query.
        results = users.query(
            username__eq='johndoe',
            last_name__eq='Doe',
            index='LastNameIndex',
            attributes=('username',),
            reverse=True
        )

        for res in results:
            self.assertTrue(res['username'] in ['johndoe',])
            self.assertEqual(res.keys(), ['username'])


        # Test the strongly consistent query.
        c_results = users.query(
            username__eq='johndoe',
            last_name__eq='Doe',
            index='LastNameIndex',
            reverse=True,
            consistent=True
        )

        for res in c_results:
            self.assertTrue(res['username'] in ['johndoe',])

        # Test scans without filters.
        all_users = users.scan(limit=7)
        self.assertEqual(all_users.next()['username'], 'bob')
        self.assertEqual(all_users.next()['username'], 'jane')
        self.assertEqual(all_users.next()['username'], 'johndoe')

        # Test scans with a filter.
        filtered_users = users.scan(limit=2, username__beginswith='j')
        self.assertEqual(filtered_users.next()['username'], 'jane')
        self.assertEqual(filtered_users.next()['username'], 'johndoe')

        # Test deleting a single item.
        johndoe = users.get_item(username='******', friend_count=4)
        johndoe.delete()

        # Test the eventually consistent batch get.
        results = users.batch_get(keys=[
            {'username': '******', 'friend_count': 1},
            {'username': '******', 'friend_count': 3}
        ])
        batch_users = []

        for res in results:
            batch_users.append(res)
            self.assertTrue(res['first_name'] in ['Bob', 'Jane'])

        self.assertEqual(len(batch_users), 2)

        # Test the strongly consistent batch get.
        c_results = users.batch_get(keys=[
            {'username': '******', 'friend_count': 1},
            {'username': '******', 'friend_count': 3}
        ], consistent=True)
        c_batch_users = []

        for res in c_results:
            c_batch_users.append(res)
            self.assertTrue(res['first_name'] in ['Bob', 'Jane'])

        self.assertEqual(len(c_batch_users), 2)

        # Test count, but in a weak fashion. Because lag time.
        self.assertTrue(users.count() > -1)

        # Test query count
        count = users.query_count(
            username__eq='bob',
        )

        self.assertEqual(count, 1)

        # Test without LSIs (describe calls shouldn't fail).
        admins = Table.create('admins', schema=[
            HashKey('username')
        ])
        self.addCleanup(admins.delete)
        time.sleep(60)
        admins.describe()
        self.assertEqual(admins.throughput['read'], 5)
        self.assertEqual(admins.indexes, [])

        # A single query term should fail on a table with *ONLY* a HashKey.
        self.assertRaises(
            exceptions.QueryError,
            admins.query,
            username__eq='johndoe'
        )
        # But it shouldn't break on more complex tables.
        res = users.query(username__eq='johndoe')

        # Test putting with/without sets.
        mau5_created = users.put_item(data={
            'username': '******',
            'first_name': 'dead',
            'last_name': 'mau5',
            'friend_count': 2,
            'friends': set(['skrill', 'penny']),
        })
        self.assertTrue(mau5_created)

        penny_created = users.put_item(data={
            'username': '******',
            'first_name': 'Penny',
            'friend_count': 0,
            'friends': set([]),
        })
        self.assertTrue(penny_created)
src_table = sys.argv[1]
dst_table = sys.argv[2]
create_dst_table = False if sys.argv[3] in ['False', 'false'] else True if len(
    sys.argv) == 4 else True
region = os.getenv('AWS_DEFAULT_REGION', 'us-west-2')

# host = 'dynamodb.%s.amazonaws.com' % region
# ddbc = DynamoDBConnection(is_secure=False, region=region, host=host)
DynamoDBConnection.DefaultRegionName = region
ddbc = DynamoDBConnection()

# 1. Read and copy the target table to be copied
table_struct = None
try:
    logs = Table(src_table, connection=ddbc)
    table_struct = logs.describe()
except JSONResponseError:
    print "Table %s does not exist" % src_table
    sys.exit(1)

print '*** Reading key schema from %s table' % src_table
src = ddbc.describe_table(src_table)['Table']
hash_key = ''
range_key = ''
for schema in src['KeySchema']:
    attr_name = schema['AttributeName']
    key_type = schema['KeyType']
    if key_type == 'HASH':
        hash_key = attr_name
    elif key_type == 'RANGE':
        range_key = attr_name
Ejemplo n.º 56
0
    def check(self, args):
        self._validate()

        try:
            table = Table(args['origin_table'], connection=self.connection)
            table.describe()
        except Exception as e:
            return "table {0} could not be found in {1}".format(
                args['origin_table'], self.region)

        # first check hash- and rangekey (optional)
        hk_type = self.table_spec['schema']['hashkey']['type']
        hk_type = eval(Meister.types.format(hk_type))
        hk_name = self.table_spec['schema']['hashkey']['name']
        if table.schema[0].name != hk_name or table.schema[
                0].data_type != hk_type:
            return "{0}'s hashkey should be {1}, and type should be {2}".format(
                table.table_name, self.table_spec['schema']['hashkey']['name'],
                self.table_spec['schema']['hashkey']['type'])
        # beware, rangekey is optional
        if 'rangekey' in self.table_spec['schema']:
            rk_type = self.table_spec['schema']['rangekey']['type']
            rk_type = eval(Meister.types.format(rk_type))
            rk_name = self.table_spec['schema']['rangekey']['name']

            if len(table.schema) < 2 or \
                table.schema[1].name != rk_name or \
                table.schema[1].data_type != rk_type:
                return "{0}'s rangekey should be {1}, of type {2}".format(
                    table.table_name,
                    self.table_spec['schema']['rangekey']['name'],
                    self.table_spec['schema']['rangekey']['type'])
        elif len(table.schema) >= 2:
            return "{0} has a rangekey, specification not".format(
                table.table_name)

        # indexes
        indexes = {}
        for index in table.indexes:
            indexes[index.name] = index

        if 'indexes' in self.table_spec:
            for index in self.table_spec['indexes']:
                if index['name'] in indexes:
                    i_type = eval(
                        Meister.types.format(index['attribute']['type']))
                    i_name = index['attribute']['name']
                    if i_name != indexes[index['name']].parts[1].name or \
                            i_type != indexes[index['name']].parts[1].data_type:
                        return "index {0} in table {1} has type or name mismatch".format(
                            index['name'], table.table_name)
                    del (indexes[index['name']])
                else:
                    return "index {0} in table {1} not found in specification".format(
                        index['name'], table.table_name)
            if len(indexes) > 0:
                return "table {0} has indexes not found in specification".format(
                    table.table_name)

        # and last, indexes
        global_indexes = {}
        for index in table.global_indexes:
            global_indexes[index.name] = index

        if 'global_indexes' in self.table_spec:
            for index in self.table_spec['global_indexes']:
                if index['name'] in global_indexes:
                    i_type = eval(
                        Meister.types.format(index['attribute']['type']))
                    i_name = index['attribute']['name']
                    if i_name != global_indexes[index['name']].parts[1].name or \
                            i_type != global_indexes[index['name']].parts[1].data_type:
                        return "index {0} in table {1} has type or name mismatch".format(
                            index['name'], table.table_name)
                    del (global_indexes[index['name']])
                else:
                    return "index {0} in table {1} not found in specification".format(
                        index['name'], table.table_name)
            if len(global_indexes) > 0:
                return "table {0} has indexes not found in specification".format(
                    table.table_name)

        return "no mismatch or errors found"
Ejemplo n.º 57
0
def create_or_update_dynamo_table(connection,
                                  module,
                                  boto3_dynamodb=None,
                                  boto3_sts=None):
    table_name = module.params.get('name')
    hash_key_name = module.params.get('hash_key_name')
    hash_key_type = module.params.get('hash_key_type')
    range_key_name = module.params.get('range_key_name')
    range_key_type = module.params.get('range_key_type')
    read_capacity = module.params.get('read_capacity')
    write_capacity = module.params.get('write_capacity')
    all_indexes = module.params.get('indexes')
    region = module.params.get('region')
    tags = module.params.get('tags')
    wait_for_active_timeout = module.params.get('wait_for_active_timeout')

    for index in all_indexes:
        validate_index(index, module)

    schema = get_schema_param(hash_key_name, hash_key_type, range_key_name,
                              range_key_type)

    throughput = {'read': read_capacity, 'write': write_capacity}

    indexes, global_indexes = get_indexes(all_indexes)

    result = dict(
        region=region,
        table_name=table_name,
        hash_key_name=hash_key_name,
        hash_key_type=hash_key_type,
        range_key_name=range_key_name,
        range_key_type=range_key_type,
        read_capacity=read_capacity,
        write_capacity=write_capacity,
        indexes=all_indexes,
    )

    try:
        table = Table(table_name, connection=connection)

        if dynamo_table_exists(table):
            result['changed'] = update_dynamo_table(
                table,
                throughput=throughput,
                check_mode=module.check_mode,
                global_indexes=global_indexes)
        else:
            if not module.check_mode:
                Table.create(table_name,
                             connection=connection,
                             schema=schema,
                             throughput=throughput,
                             indexes=indexes,
                             global_indexes=global_indexes)
            result['changed'] = True

        if not module.check_mode:
            result['table_status'] = table.describe()['Table']['TableStatus']

        if tags:
            # only tables which are active can be tagged
            wait_until_table_active(module, table, wait_for_active_timeout)
            account_id = get_account_id(boto3_sts)
            boto3_dynamodb.tag_resource(
                ResourceArn='arn:aws:dynamodb:' + region + ':' + account_id +
                ':table/' + table_name,
                Tags=ansible_dict_to_boto3_tag_list(tags))
            result['tags'] = tags

    except BotoServerError:
        result[
            'msg'] = 'Failed to create/update dynamo table due to error: ' + traceback.format_exc(
            )
        module.fail_json(**result)
    else:
        module.exit_json(**result)
Ejemplo n.º 58
0
class MyDatabase(object):

    def __init__(self, region):
        self.schema = [
            HashKey(args.hashkey,   data_type=STRING),
            RangeKey(args.indexkey, data_type=STRING),
        ]
        self.indexes = [
            AllIndex('FirstName', parts=[
                HashKey(args.hashkey, data_type=STRING),
                RangeKey('FirstName', data_type=STRING),
            ]),
            AllIndex('LastName', parts=[
                HashKey(args.hashkey, data_type=STRING),
                RangeKey('LastName', data_type=STRING),
            ])
        ]
        self.conn = boto.dynamodb2.connect_to_region(
            region,
            aws_access_key_id=os.environ['AWS_KEY'],
            aws_secret_access_key=os.environ['AWS_SECRET']
        )
        return

    def __del__(self):
        return

    def list(self):
        sys.stderr.write('Tables:\n')
        tables = self.conn.list_tables()
        for table in tables['TableNames']:
            sys.stdout.write('%s\n'%table)
        return

    def create(self, name):
        self.table = Table.create(
            name,
            schema=self.schema,
            indexes=self.indexes,
            connection=self.conn
        )
        sys.stdout.write('%s\n'%self.table)
        return

    def all(self):
        query = {
            '%s__eq'%args.hashkey : args.hashval
        }  
        if args.verbose:
            prettyPrint(query, output=sys.stderr,colour=True)
        records = []
        #query['document_id__eq'] = '3e272906-a5df-4a70-b19e-b4e2504525f2'
        for row in self.table.query_2(**query):
            record = {}
            for key in row.keys():
                record[key] = row[key]
            records.append(record)
        return records

    def connect(self, name):
        self.table = Table(
            name,
            connection=self.conn
        )
        sys.stderr.write('Connect: %s\n'%self.table)
        return

    def dbschema(self):
        sys.stderr.write('Schema:\n')
        schema = self.table.describe()
        return schema
        
    def insert(self, dictstring):
        guid = '%s'%uuid.uuid4()
        data = {
            args.hashkey : args.hashval,
            args.indexkey : guid
        }
        for key in dictstring.keys():
            data[key] = dictstring[key]
        if args.verbose:
            prettyPrint(data,colour=True,output=sys.stderr)
        self.table.put_item(data=data)
        return guid

    def query(self):
        return

    def get(self, id):
        sys.stderr.write('Get(%s):\n'%id)
        get = {
            args.hashkey : args.hashval,
            args.indexkey : id
        }
        if args.verbose:
            prettyPrint(get,output=sys.stderr,colour=True)
        doc = self.table.get_item(**get)
        got = {}
        for key in doc.keys():
            got[key] = '%s'%doc.get(key)
        return got

    def update(self):
        return

    def delete(self, name):
        return

    def drop(self, name):
        table = Table(name, connection=self.conn)
        sys.stderr.write('drop=%s\n'%table)
        table.delete()
        return