def group_wait_until_exists(self, sleep_for=0.5, max_attempts=60): for i in range(0, max_attempts): if self.group_exists(): return True #print('group_wait_until_exists', i,sleep_for) wait(sleep_for) return False
def test_logs(self): for i in range(0, 30): exists = self.fargate_task.log_stream_exists() wait(1) # give it more more secure to capture the logs if exists: # when log stream exists return logs = self.fargate_task.logs() # get logs assert "Hello from Docker!" in logs # confirm we got the correct logs
def test___enter__exit__(self, account_id, region): with Temp_Event_Rule_To_SQS_Queue(event_source=self.event_source) as _: wait(0.6) # wait for rules to activate with Temp_EC2_Instance() as ec2_instance: assert ec2_instance.exists() wait( 2.0 ) # todo add method to wait for events to be captured by rule and send to queue message_1 = _.sqs_queue.pop() message_2 = _.sqs_queue.pop() instance_id = ec2_instance.instance_id del message_1['time'] del message_1['id'] del message_2['time'] del message_2['id'] expected_message_1_state = 'pending' expected_message_2_state = 'shutting-down' assert _.event_rule.event_source == 'aws.ec2' assert message_1 == { 'account': account_id, 'detail': { 'instance-id': instance_id, 'state': expected_message_1_state }, 'detail-type': 'EC2 Instance State-change Notification', 'region': region, 'resources': [f'arn:aws:ec2:{region}:{account_id}:instance/{instance_id}'], 'source': 'aws.ec2', 'version': '0' } assert message_2 == { 'account': account_id, 'detail': { 'instance-id': instance_id, 'state': expected_message_2_state }, 'detail-type': 'EC2 Instance State-change Notification', 'region': region, 'resources': [f'arn:aws:ec2:{region}:{account_id}:instance/{instance_id}'], 'source': 'aws.ec2', 'version': '0' }
def test___enter__exit__(self): event_data = {"id": "event_1"} with Temp_SQS_Queue() as sqs_queue: with Temp_Event_Rule() as event_rule: target_id = event_rule.add_target_sqs_queue(sqs_queue) event_rule.send_event(event_data) wait(0.5) assert sqs_queue.size() == 1 assert sqs_queue.pop().get('detail') == event_data event_rule.delete_target(target_id=target_id)
def group_events_wait_for_pattern(self, log_stream_prefix=None, filter_pattern=None, sleep_for=0.5, max_attempts=20): if self.group_wait_until_exists(): for i in range(0, max_attempts): events = self.group_events(log_stream_prefix=log_stream_prefix, filter_pattern=filter_pattern) if len(events) > 0: return events #print('group_events_wait_for_pattern', i,sleep_for) wait(sleep_for) return []
def wait_for_state(self, state, max_wait_count=40, wait_interval=1): for i in range(0, max_wait_count): info = self.info() state = info.get('Configuration').get('State') if state == 'state': return { "status": "ok", "message": f"Status '{state}' was found after {i} * {wait_interval} seconds" } wait(wait_interval) return { "status": "error", "message": f"Status '{state}' did not occur in {i} * {wait_interval} seconds" }
def test_event_put(self): with Temp_SQS_Queue(fifo=True) as queue: target_id = queue.name() target_arn = queue.arn() rule_name = self.rule_name source_arn = self.events.rule_arn(self.rule_name) service = 'events.amazonaws.com' resource = queue.arn() target_attributes = {'SqsParameters': {'MessageGroupId': 'string'}} queue.permission_add_for_service(source_arn=source_arn, service=service, resource=resource) self.events.target_create(rule_name=rule_name, target_id=target_id, target_arn=target_arn, target_attributes=target_attributes) #assert queue.url() == SQS().queue_url_from_queue_arn(queue_arn=queue.arn()) event_data = f'{{"id":"event_1", "value***": "{random_string()}"}}' event = { 'Time': date_time_now(), 'Source': self.rule_event_source, "DetailType": "myTestType", 'Detail': event_data, } self.events.events_put([event]) self.events.events_put([event]) self.events.events_put([event]) wait( 0.5 ) # todo see if there is a better way to wait for messages to be available assert queue.size() > 0 message = queue.pop() assert message.get('detail') == json_parse(event_data) self.events.target_delete(rule_name=rule_name, target_id=target_id)
def test_role_create_assume_role(self): sts = STS() current_user_arn = sts.caller_identity_arn() original_policy = {'Statement': [ { 'Action' : 'sts:AssumeRole', 'Effect' : 'Allow', 'Principal': { 'Service': 'codebuild.amazonaws.com'}}]} new_policy = {'Statement': [{'Action' : 'sts:AssumeRole', 'Effect' : 'Allow', 'Principal': {'AWS': current_user_arn } }]} test_role = IAM(role_name="temp_role_to_test_assume_role") test_role.role_create(original_policy) role_arn = test_role.role_arn() current_assume_policy = test_role.role_assume_policy() test_role.role_assume_policy_update(new_policy) for i in range(0,15): with Catch(log_exception=False): sts.assume_role(role_arn=role_arn) sts.assume_role(role_arn=role_arn) sts.assume_role(role_arn=role_arn) sts.assume_role(role_arn=role_arn) sts.assume_role(role_arn=role_arn) sts.assume_role(role_arn=role_arn) sts.assume_role(role_arn=role_arn) sts.assume_role(role_arn=role_arn) pprint('got credentials') break print(f'after {i} seconds') wait(1) assert sts.assume_role(role_arn=role_arn).get('Credentials') is not None test_role.role_assume_policy_update(current_assume_policy) assert test_role.role_assume_policy() == current_assume_policy test_role.role_delete()
def wait(self, seconds): # to help with fluent code wait(seconds) return self
def test_wait(self): delay = 0.001 # time to wait (in seconds) start = time.time() wait(delay) end = time.time() assert end - start > delay
def logs_wait_for_data(self, task_arn, task_definition_arn, image_name, cluster_name='default',max_attempts=60 ,sleep_for=1): for i in range(0, max_attempts): # for max_attempts logs = self.logs(task_arn=task_arn,task_definition_arn=task_definition_arn,image_name=image_name,cluster_name=cluster_name) # get logs (returns '' if cloud trail doesn't exist or has no data) if logs: # if there is data return logs # return it wait(sleep_for) # if not, wait for sleep_for amount