def _execute(self, aws, machine_name, context, primary_stream_chaos=0.0, secondary_stream_chaos=0.0, primary_retry_chaos=0.0, secondary_retry_chaos=0.0, primary_cache_chaos=0.0, secondary_cache_chaos=0.0, empty_primary_cache=False, empty_secondary_cache=False): start_state_machine(machine_name, context, correlation_id='correlation_id') if primary_stream_chaos: aws.primary_stream_chaos = primary_stream_chaos if secondary_stream_chaos: aws.secondary_stream_chaos = secondary_stream_chaos if primary_retry_chaos: aws.primary_retry_chaos = primary_retry_chaos if secondary_retry_chaos: aws.secondary_retry_chaos = secondary_retry_chaos if primary_cache_chaos: aws.primary_cache_chaos = primary_cache_chaos if secondary_cache_chaos: aws.secondary_cache_chaos = secondary_cache_chaos if empty_primary_cache: aws.empty_primary_cache = empty_primary_cache if empty_secondary_cache: aws.empty_secondary_cache = empty_secondary_cache message = aws.get_message() while message: if AWS_CONSTANTS.KINESIS == self.MESSAGE_TYPE: handler.lambda_kinesis_handler(to_kinesis_message(message)) elif AWS_CONSTANTS.SQS == self.MESSAGE_TYPE: handler.lambda_sqs_handler(to_sqs_message(message)) elif AWS_CONSTANTS.SNS == self.MESSAGE_TYPE: handler.lambda_sns_handler(to_sns_message(message)) message = aws.get_message()
def _execute(self, aws, machine_name, context, primary_stream_chaos=0.0, secondary_stream_chaos=0.0, primary_retry_chaos=0.0, secondary_retry_chaos=0.0, primary_cache_chaos=0.0, secondary_cache_chaos=0.0, empty_primary_cache=False, empty_secondary_cache=False): start_state_machine(machine_name, context, correlation_id='correlation_id') if primary_stream_chaos: aws.primary_stream_chaos = primary_stream_chaos if secondary_stream_chaos: aws.secondary_stream_chaos = secondary_stream_chaos if primary_retry_chaos: aws.primary_retry_chaos = primary_retry_chaos if secondary_retry_chaos: aws.secondary_retry_chaos = secondary_retry_chaos if primary_cache_chaos: aws.primary_cache_chaos = primary_cache_chaos if secondary_cache_chaos: aws.secondary_cache_chaos = secondary_cache_chaos if empty_primary_cache: aws.empty_primary_cache = empty_primary_cache if empty_secondary_cache: aws.empty_secondary_cache = empty_secondary_cache message = aws.get_message() while message: handler.lambda_kinesis_handler(to_kinesis_message(message)) message = aws.get_message()
def test_start_state_machine(self, mock_time, mock_send_next_event_for_dispatch): mock_time.time.return_value = 12345. start_state_machine('name', {'aaa': 'bbb'}, correlation_id='correlation_id') mock_send_next_event_for_dispatch.assert_called_with( None, '{"system_context": {"correlation_id": "correlation_id", "current_event": ' '"pseudo_init", "current_state": "pseudo_init", "machine_name": "name", "retries": 0, ' '"started_at": 12345, "steps": 0}, "user_context": {"aaa": "bbb"}, "version": "0.1"}', 'correlation_id')
logging.info('Kinesis stream: %s', kinesis_stream) # create a shard iterator for the specified shard and sequence number shard_iterator = kinesis_conn.get_shard_iterator( StreamName=kinesis_stream, ShardId=args.checkpoint_shard_id, ShardIteratorType=AWS_KINESIS.AT_SEQUENCE_NUMBER, StartingSequenceNumber=args.checkpoint_sequence_number)[ AWS_KINESIS.ShardIterator] # get the record that has the last successful state records = kinesis_conn.get_records(ShardIterator=shard_iterator, Limit=1) if records: context = json.loads(records[AWS_KINESIS.Records][0][AWS_KINESIS.DATA]) current_state = context.get(SYSTEM_CONTEXT.CURRENT_STATE) current_event = context.get(SYSTEM_CONTEXT.CURRENT_EVENT) else: context = {} # no checkpoint specified, so start with an empty context else: context = json.loads(args.initial_context or "{}") current_state = current_event = STATE.PSEUDO_INIT # start things off start_state_machine(args.machine_name, context, correlation_id=args.correlation_id, current_state=current_state, current_event=current_event)