def create_cache(self):
     try:
         dynamodb_conn = aws.get_connection(DYNAMODB_CACHE_SOURCE)
         dynamodb_table = aws.get_arn_from_arn_string(DYNAMODB_CACHE_SOURCE).slash_resource()
         dynamodb_conn.create_table(
             TableName=dynamodb_table,
             AttributeDefinitions=[
                 {
                     AWS_DYNAMODB.AttributeName: CACHE_DATA.KEY,
                     AWS_DYNAMODB.AttributeType: AWS_DYNAMODB.STRING
                 }
             ],
             KeySchema=[
                 {
                     AWS_DYNAMODB.AttributeName: CACHE_DATA.KEY,
                     AWS_DYNAMODB.KeyType: AWS_DYNAMODB.HASH
                 }
             ],
             ProvisionedThroughput={
                 AWS_DYNAMODB.ReadCapacityUnits: 10,
                 AWS_DYNAMODB.WriteCapacityUnites: 10
             }
         )
     except:
         pass
    def execute(self, context, obj):
        logging.info('context: %s', context)

        # randomly raise an exception
        if random.uniform(0, 1.0) < 0.5:
            raise Exception()

        logging.info('action.name=%s', self.name)

        # increment the counter
        context['count'] = context.get('count', 0) + 1

        # set the started_at (user space) variable
        if context['count'] == 1:
            context['started_at'] = int(time.time())

        # when done, emit a dynamodb record
        if context['count'] > 100:
            if 'results_arn' in context:
                table_arn = context['results_arn']
                table_name = get_arn_from_arn_string(table_arn).slash_resource()
                conn = get_connection(table_arn)
                conn.put_item(
                    TableName=table_name,
                    Item={
                        'correlation_id': {AWS_DYNAMODB.STRING: context.correlation_id},
                        'count': {AWS_DYNAMODB.NUMBER: str(context['count'])},
                        'started_at': {AWS_DYNAMODB.NUMBER: str(context['started_at'])},
                        'finished_at': {AWS_DYNAMODB.NUMBER: str(int(time.time()))},
                        'flag': {AWS_DYNAMODB.STRING: context.get('flag', 'Unknown')}
                    }
                )
            return 'done'

        return 'event1'
# library imports

# application imports
from aws_lambda_fsm.aws import get_arn_from_arn_string
from aws_lambda_fsm.constants import AWS
from aws_lambda_fsm.aws import validate_config
import settings

logging.basicConfig(level=logging.INFO)
validate_config()

for attr in dir(settings):
    if attr.startswith('PRIMARY_') or attr.startswith(
            'SECONDARY_') or attr.startswith('RESULTS_'):
        arn_string = getattr(settings, attr)
        arn = get_arn_from_arn_string(arn_string)
        if arn.service:
            logging.info('*' * 80)
            logging.info('CREATING %s', arn_string)
            logging.info('*' * 80)
            if arn.service == AWS.KINESIS:
                subprocess.call([
                    'create_kinesis_stream.py', '--kinesis_stream_arn=' + attr
                ])
            elif arn.service == AWS.DYNAMODB:
                subprocess.call([
                    'create_dynamodb_table.py', '--dynamodb_table_arn=' + attr
                ])
            elif arn.service == AWS.SNS:
                subprocess.call(
                    ['create_sns_topic.py', '--sns_topic_arn=' + attr])
logging.basicConfig(
    format='[%(levelname)s] %(asctime)-15s %(message)s',
    level=int(args.log_level) if args.log_level.isdigit() else args.log_level,
    datefmt='%Y-%m-%d %H:%M:%S')

logging.getLogger('boto3').setLevel(args.boto_log_level)
logging.getLogger('botocore').setLevel(args.boto_log_level)

validate_config()

# setup connections to AWS
dynamodb_table_arn = getattr(settings, args.dynamodb_table_arn)
logging.info('DynamoDB table ARN: %s', dynamodb_table_arn)
logging.info('DynamoDB endpoint: %s', settings.ENDPOINTS.get(AWS.DYNAMODB))
if get_arn_from_arn_string(dynamodb_table_arn).service != AWS.DYNAMODB:
    logging.fatal("%s is not a DynamoDB ARN", dynamodb_table_arn)
    sys.exit(1)
dynamodb_conn = get_connection(dynamodb_table_arn, disable_chaos=True)
dynamodb_table = get_arn_from_arn_string(dynamodb_table_arn).slash_resource()
logging.info('DynamoDB table: %s', dynamodb_table)

if 'RESULTS' in args.dynamodb_table_arn:
    # create a dynamodb table for examples/tracer
    response = dynamodb_conn.create_table(
        TableName=dynamodb_table,
        AttributeDefinitions=[
            {
                AWS_DYNAMODB.AttributeName: 'correlation_id',
                AWS_DYNAMODB.AttributeType: AWS_DYNAMODB.STRING
            },
    # start things off
    context = json.loads(args.initial_context or "{}")
    current_state = current_event = STATE.PSEUDO_INIT
    start_state_machines(args.machine_name, [context] * args.num_machines,
                         current_state=current_state,
                         current_event=current_event)
    exit(0)

# checkpoint specified, so start with a context saved to the kinesis stream
if args.checkpoint_shard_id and args.checkpoint_sequence_number:

    # setup connections to AWS
    kinesis_stream_arn = getattr(settings, args.kinesis_stream_arn)
    logging.info('Kinesis stream ARN: %s', kinesis_stream_arn)
    logging.info('Kinesis endpoint: %s', settings.ENDPOINTS.get(AWS.KINESIS))
    if get_arn_from_arn_string(kinesis_stream_arn).service != AWS.KINESIS:
        logging.fatal("%s is not a Kinesis ARN", kinesis_stream_arn)
        sys.exit(1)
    kinesis_conn = get_connection(kinesis_stream_arn)
    kinesis_stream = get_arn_from_arn_string(
        kinesis_stream_arn).slash_resource()
    logging.info('Kinesis stream: %s', kinesis_stream)

    # create a shard iterator for the specified shard and sequence number
    shard_iterator = kinesis_conn.get_shard_iterator(
        StreamName=kinesis_stream,
        ShardId=args.checkpoint_shard_id,
        ShardIteratorType=AWS_KINESIS.AT_SEQUENCE_NUMBER,
        StartingSequenceNumber=args.checkpoint_sequence_number)[
            AWS_KINESIS.ShardIterator]
from aws_lambda_fsm.constants import STREAM_DATA  # noqa: E402
from aws_lambda_fsm.constants import AWS  # noqa: E402

import settings  # noqa: E402

random.seed(args.random_seed)
STARTED_AT = str(int(time.time()))

validate_config()

# setup connections to AWS
if args.run_kinesis_lambda:
    kinesis_stream_arn = getattr(settings, args.kinesis_stream_arn)
    logging.info('Kinesis stream ARN: %s', kinesis_stream_arn)
    logging.info('Kinesis endpoint: %s', settings.ENDPOINTS.get(AWS.KINESIS))
    if get_arn_from_arn_string(kinesis_stream_arn).service != AWS.KINESIS:
        logging.fatal("%s is not a Kinesis ARN", kinesis_stream_arn)
        sys.exit(1)
    kinesis_conn = get_connection(kinesis_stream_arn, disable_chaos=True)
    kinesis_stream = get_arn_from_arn_string(
        kinesis_stream_arn).slash_resource()
    logging.info('Kinesis stream: %s', kinesis_stream)

if args.run_sqs_lambda:
    sqs_queue_arn = getattr(settings, args.sqs_queue_arn)
    logging.info('SQS queue ARN: %s', sqs_queue_arn)
    logging.info('SQS endpoint: %s', settings.ENDPOINTS.get(AWS.SQS))
    if get_arn_from_arn_string(sqs_queue_arn).service != AWS.SQS:
        logging.fatal("%s is not a SQS ARN", sqs_queue_arn)
        sys.exit(1)
    sqs_conn = get_connection(sqs_queue_arn, disable_chaos=True)
Esempio n. 7
0
parser = argparse.ArgumentParser(description='Creates AWS SNS topics.')
parser.add_argument('--sns_topic_arn', default='PRIMARY_STREAM_SOURCE')
parser.add_argument('--log_level', default='INFO')
parser.add_argument('--boto_log_level', default='INFO')
args = parser.parse_args()

logging.basicConfig(
    format='[%(levelname)s] %(asctime)-15s %(message)s',
    level=int(args.log_level) if args.log_level.isdigit() else args.log_level,
    datefmt='%Y-%m-%d %H:%M:%S')

logging.getLogger('boto3').setLevel(args.boto_log_level)
logging.getLogger('botocore').setLevel(args.boto_log_level)

validate_config()

# setup connections to AWS
sns_topic_arn = getattr(settings, args.sns_topic_arn)
logging.info('SNS topic ARN: %s', sns_topic_arn)
logging.info('SNS endpoint: %s', settings.ENDPOINTS.get(AWS.SNS))
if get_arn_from_arn_string(sns_topic_arn).service != AWS.SNS:
    logging.fatal("%s is not an SNS ARN", sns_topic_arn)
    sys.exit(1)
sns_conn = get_connection(sns_topic_arn, disable_chaos=True)
sns_topic = get_arn_from_arn_string(sns_topic_arn).resource
logging.info('SNS topic: %s', sns_topic)

# configure the topic
response = sns_conn.create_topic(Name=sns_topic)
logging.info(response)
parser.add_argument('--sleep_time', type=float, default=0.2)
args = parser.parse_args()

logging.basicConfig(
    format='[%(levelname)s] %(asctime)-15s %(message)s',
    level=int(args.log_level) if args.log_level.isdigit() else args.log_level,
    datefmt='%Y-%m-%d %H:%M:%S')

logging.getLogger('boto3').setLevel(args.boto_log_level)
logging.getLogger('botocore').setLevel(args.boto_log_level)

validate_config()

# setup connections to AWS
sqs_arn_string = getattr(settings, args.sqs_queue_arn)
sqs_arn = get_arn_from_arn_string(sqs_arn_string)
if sqs_arn.service != AWS.SQS:
    logging.fatal("%s is not an SQS ARN", sqs_arn_string)
    sys.exit(1)
sqs_conn = get_connection(sqs_arn_string, disable_chaos=True)
response = sqs_conn.get_queue_url(QueueName=sqs_arn.colon_resource())
sqs_queue_url = response[AWS_SQS.QueueUrl]

logging.info('SQS ARN: %s', sqs_arn_string)
logging.info('SQS endpoint: %s', settings.ENDPOINTS.get(AWS.SQS))
logging.info('SQS queue: %s', sqs_arn.resource)
logging.info('SQS queue url: %s', sqs_queue_url)

dest_arn_string = getattr(settings, args.dest_arn)
dest_arn = get_arn_from_arn_string(dest_arn_string)
if dest_arn.service not in ALLOWED_DEST_SERVICES:
Esempio n. 9
0
parser.add_argument('--boto_log_level', default='INFO')
args = parser.parse_args()

logging.basicConfig(
    format='[%(levelname)s] %(asctime)-15s %(message)s',
    level=int(args.log_level) if args.log_level.isdigit() else args.log_level,
    datefmt='%Y-%m-%d %H:%M:%S'
)

logging.getLogger('boto3').setLevel(args.boto_log_level)
logging.getLogger('botocore').setLevel(args.boto_log_level)

validate_config()

# setup connections to AWS
sqs_queue_arn = getattr(settings, args.sqs_queue_arn)
logging.info('SQS queue ARN: %s', sqs_queue_arn)
logging.info('SQS endpoint: %s', settings.ENDPOINTS.get(AWS.SQS))
if get_arn_from_arn_string(sqs_queue_arn).service != AWS.SQS:
    logging.fatal("%s is not an SQS ARN", sqs_queue_arn)
    sys.exit(1)
sqs_conn = get_connection(sqs_queue_arn, disable_chaos=True)
sqs_queue = get_arn_from_arn_string(sqs_queue_arn).resource
logging.info('SQS queue: %s', sqs_queue)

# configure the queue
response = sqs_conn.create_queue(
    QueueName=sqs_queue
)
logging.info(response)