def test_wait_jobs(self, create_waiter_with_client_mock, exception_mock): create_waiter_with_client_mock.return_value.wait.side_effect = WaiterError( '', '', '') self.assertRaises(SchedulerException, self.instance.wait_jobs, self.jobs) exception_mock.assert_called_once()
def _wait_for_instances_inservice(config, asg_name): d = {'env': config['env']} """ LifecycleState: 'Pending'|'Pending:Wait'|'Pending:Proceed'|'Quarantined'|'InService'|'Terminating'| 'Terminating:Wait'|'Terminating:Proceed'|'Terminated'|'Detaching'|'Detached'|'EnteringStandby'|'Standby' HealthStatus: 'Healthy'|'Unhealthy' """ new_instance_ids = _get_instance_ids(config, asg_name) _asg = config['session'].client('autoscaling') num_attempts = 0 while num_attempts < 40: new_ins = _asg.describe_auto_scaling_instances( InstanceIds=new_instance_ids )['AutoScalingInstances'] healthy_count = 0 for ins in new_ins: if (ins['HealthStatus'] != 'HEALTHY') or (ins['LifecycleState'] != 'InService'): _LOG.debug('instance %s not healthy: (%s, %s).', ins['InstanceId'], ins['HealthStatus'], ins['LifecycleState'], extra=d) break else: healthy_count += 1 if healthy_count == len(new_ins): break else: num_attempts += 1 time.sleep(15) else: wait_too_long = 'Wait too long for %s asg %s to have healthy instances %s' % (config['env'], asg_name, new_instance_ids) raise WaiterError(name='CustomASGInstancesHealthy', reason=wait_too_long) _LOG.debug('auto scaling group %s instances %s Healthy and InService', asg_name, new_instance_ids, extra=d)
def test_async_predict_call_pass_through_wait_result(capsys): sagemaker_session = empty_sagemaker_session() predictor_async = AsyncPredictor(Predictor(ENDPOINT, sagemaker_session)) s3_waiter = Mock(name="object_waiter") waiter_error = WaiterError( name="async-predictor-unit-test-waiter-error", reason="test-waiter-error", last_response="some response", ) s3_waiter.wait = Mock(name="wait", side_effect=[waiter_error, None]) sagemaker_session.s3_client.get_waiter = Mock( name="object_exists", return_value=s3_waiter, ) input_location = "s3://some-input-path" with pytest.raises(PollingTimeoutError, match="Inference could still be running"): predictor_async.predict(input_path=input_location) result_async = predictor_async.predict(input_path=input_location) assert sagemaker_session.sagemaker_runtime_client.invoke_endpoint_async.called assert sagemaker_session.sagemaker_client.describe_endpoint.not_called assert sagemaker_session.sagemaker_client.describe_endpoint_config.not_called expected_request_args = { "Accept": DEFAULT_ACCEPT, "InputLocation": input_location, "EndpointName": ENDPOINT, } call_args, kwargs = sagemaker_session.sagemaker_runtime_client.invoke_endpoint_async.call_args assert kwargs == expected_request_args assert result_async == RETURN_VALUE
def test_throttled_call_waitererror_timeout(self, mock_sleep): """Test throttle_call with WaiterError timeout""" mock_func = MagicMock() last_response = {"Error": {"Code": "Throttling"}} waiter_error = WaiterError('Timeout', 'test', last_response) mock_func.side_effect = waiter_error self.assertRaises(WaiterError, throttled_call, mock_func)
def test__wait_for_snapshot_status_failed(input, expected): spec = {"get_waiter.side_effect": [WaiterError(None, None, None)]} client = MagicMock(**spec) module = MagicMock() rds.wait_for_snapshot_status(client, module, "test", input) module.fail_json_aws.assert_called_once module.fail_json_aws.call_args[1]["msg"] == expected
def test_prints_error_message_and_does_not_fail_without_StatusReason(capsys, logger, client): change_set = ChangeSet(STACK) error = WaiterError('name', 'reason', {}) client.get_waiter.return_value.wait.side_effect = error with pytest.raises(SystemExit) as pytest_wrapped_e: change_set.create(template=TEMPLATE, change_set_type=CHANGE_SET_TYPE) logger.info.assert_called() assert pytest_wrapped_e.value.code == 1
def test_poll_for_code_generation_completion_with_failed_status(self): schemas_api_caller = SchemasApiCaller(self.client) self.client.get_waiter.return_value.wait.return_value = None schemas_api_caller.poll_for_code_binding_status("Java8", "aws.events", "aws.batch.BatchJobStateChange", "1") schemas_api_caller = SchemasApiCaller(self.client) self.client.get_waiter.return_value.wait.side_effect = WaiterError( name="failed", reason="Waiter encountered a terminal failure state", last_response="failed" ) with self.assertRaises(WaiterError): schemas_api_caller.poll_for_code_binding_status("Java8", "aws.events", "aws.batch.BatchJobStateChange", "1")
def test_create_AMI(self): """ Exercises the code path for creating AMI """ self.boto_instance.create_image.return_value = Mock(image_id="123") self.instance.create_AMI() self.assertEquals(self.instance.snapshot.image_id, "123") self.instance.ec2_client.get_waiter().wait.side_effect = WaiterError( "bla", "blah", "blah") self.assertRaises(WaiterError, self.instance.create_AMI)
def test_prints_error_message_but_exits_successfully_for_no_changes(capsys, logger, mocker, client): change_set = ChangeSet(STACK) status_reason = "The submitted information didn't contain changes. " \ "Submit different information to create a change set." error = WaiterError('name', 'reason', {'StatusReason': status_reason}) client.get_waiter.return_value.wait.side_effect = error change_set.create(template=TEMPLATE, change_set_type=CHANGE_SET_TYPE) logger.info.assert_called_with(status_reason)
def test_prints_error_message_for_failed_submit_and_exits(capsys, logger): cf_client_mock = Mock() change_set = ChangeSet(STACK, cf_client_mock) error = WaiterError('name', 'reason', {'StatusReason': 'StatusReason'}) cf_client_mock.get_waiter.return_value.wait.side_effect = error with pytest.raises(SystemExit) as pytest_wrapped_e: change_set.create(template=TEMPLATE, change_set_type=CHANGE_SET_TYPE) logger.info.assert_called_with('StatusReason') assert pytest_wrapped_e.value.code == 1
def test_wait_for_changeset_exception_ChangeEmpty(self): self.deployer._client.get_waiter = MagicMock( return_value=MockChangesetWaiter(ex=WaiterError( name="wait_for_changeset", reason="unit-test", last_response={ "Status": "Failed", "StatusReason": "It's a unit test" }, ))) with self.assertRaises(ChangeSetError): self.deployer.wait_for_changeset("test-id", "test-stack")
def test_wait_until_ready(self): """ Exercises the code path for waiting until instance is ready to be used """ self.boto_instance.wait_until_running.side_effect = [ "", WaiterError("blah", "blah", "blah"), ] self.boto_instance.state = -1 self.instance.wait_until_ready() self.boto_instance.wait_until_running.assert_called() self.assertRaises(WaiterError, self.instance.wait_until_ready)
def _wait_for_elb(config, elb_name, asg_name, desired_state): d = {'env': config['env']} """ Wait until the desired_state ("InService"|"OutOfService") of the instances of the asg in the elb is reached Args: elb_name (str): the elastic load balancer to check against asg_name (str): the auto scaling group to check against desired_state (str): the desired state of the instances to be in the elb Raises: Exception when the given desired_state is neither "InService" nor "OutOfService" WaitError when wait too long """ if desired_state != "InService" and desired_state != "OutOfService": raise Exception("desired_state can only be InService or OutOfService'") instance_ids_list = _get_instance_ids(config, asg_name) _elb_c = config['session'].client('elb') num_attempts = 0 while num_attempts < 40: # Note: We will get "ClientError" with "InvalidInstance" error, # especially right b4 attaching for "InService". #For now just catch and # log it, then retry. Not consiering it an error and will not raise back # to the caller. try: states = _elb_c.describe_instance_health( LoadBalancerName=elb_name, Instances=[{'InstanceId': id} for id in instance_ids_list] ) desired_state_count = 0 for state in states['InstanceStates']: if state['State'] != desired_state: _LOG.debug('instance %s NOT %s . Current state: %s', state['InstanceId'], desired_state, state['State'], extra=d) break else: _LOG.debug('instance %s is %s', state['InstanceId'], state['State'], extra=d) desired_state_count += 1 if desired_state_count == len(instance_ids_list): break else: _LOG.debug('instance %s from auto scaling group %s registered with elb %s NOT in %s yet. Tried %i time.', state['InstanceId'] , asg_name, elb_name, desired_state, num_attempts + 1 , extra=d) # don't wanna rely on logging to increment the actual counter, but then don't wanna display "Tried 0 time" num_attempts += 1 time.sleep(15) except ClientError as err: _LOG.debug('checking instances %s with elb %s for %s failed on edge case. %s', instance_ids_list, elb_name, desired_state, err, extra=d) time.sleep(15) else: wait_too_long = 'Wait too long for %s instances %s to be in %s in %s' % (config['env'], instance_ids_list, desired_state, elb_name) raise WaiterError(name='CustomASGInstancesHealthy', reason=wait_too_long)
def test_az_deploy_no_changes(client, config, capsys): # Configure waiter to throw WaiterError for FAILURE due to no changes waiter_error = WaiterError('change_set_create_complete', 'No Changes', { 'Status': 'FAILED', 'StatusReason': 'No updates are to be performed' }) waiter_mock = MagicMock(**{'wait.side_effect': waiter_error}) client.configure_mock(**{'get_waiter.return_value': waiter_mock}) runner = AzureDevOpsRunner(client, config) runner.deploy() captured = capsys.readouterr() assert '##vso[task.logissue type=warning]TestStack (test/us-east-1) - No Changes found in ChangeSet' in captured.out assert '##vso[task.complete result=SucceededWithIssues]DONE' in captured.out
def test__wait_for_stack_failure(uploader): e = WaiterError( name="StackCreateComplete", reason="Waiter encountered a terminal failure state", last_response=None, ) mock_waiter = uploader.cfn_client.get_waiter.return_value mock_waiter.wait.side_effect = e with pytest.raises(UploadError) as excinfo: uploader._wait_for_stack("stack-foo", "StackCreateComplete", "success-msg") mock_waiter.wait.assert_called_once_with(StackName="stack-foo", WaiterConfig=ANY) assert excinfo.value.__cause__ is e
def test_cleanup_instance(self): """ Exercises the code path for cleaning up the instance after use """ self.boto_instance.wait_until_terminated.side_effect = [ "", WaiterError("blah", "blah", "blah"), "", ] temp = open(f"./{self.instance.key_pair_name}.pem", "w") self.instance.key_path = f"./{self.instance.key_pair_name}.pem" self.instance.cleanup_instance() self.boto_instance.wait_until_terminated.assert_called() temp = open(f"./{self.instance.key_pair_name}.pem", "w") self.assertRaises(WaiterError, self.instance.cleanup_instance) os.remove(f"./{self.instance.key_pair_name}.pem") self.assertRaises(FileNotFoundError, self.instance.cleanup_instance)
def test_deploy_waiter_failed(client, config): # Configure waiter to throw WaiterError for some unknown other reasons waiter_error = WaiterError('change_set_create_complete', 'Other reason', { 'Status': 'FAILED', 'StatusReason': 'Some other reason' }) waiter_mock = MagicMock(**{'wait.side_effect': waiter_error}) client.configure_mock(**{'get_waiter.return_value': waiter_mock}) runner = Runner(client, config) with pytest.raises( StackError, match= 'ChangeSet creation failed - Status: FAILED, Reason: Some other reason' ): runner.deploy()
def test_az_apply_change_set_waiter_error(client, config, capsys): # Configure waiter to throw WaiterError waiter_error = WaiterError('stack_update_complete', 'Update Failed', { 'Status': 'FAILED', 'StatusReason': 'Some reason' }) waiter_mock = MagicMock(**{'wait.side_effect': waiter_error}) client.configure_mock(**{'get_waiter.return_value': waiter_mock}) runner = AzureDevOpsRunner(client, config) with pytest.raises( StackError, match='Waiter stack_update_complete failed: Update Failed'): runner.apply_change_set() captured = capsys.readouterr() assert '##vso[task.logissue type=error]TestStack (test/us-east-1) - ChangeSet TestChangeSet failed' in captured.out
def test__success(self, mocker): """success""" tgw = TGWPeering() peer = TGWPeer( aws_region="", transit_gateway="", attachment_id="", ) mocker.patch( "tgw_peering.lib.transit_gateway.TGWPeering.tgw_attachment_waiter", side_effect=WaiterError( name="TestWaiter", reason="error message", last_response="waiter_failed", ), ) with pytest.raises(WaiterError) as err: asyncio.run(tgw.accept_tgw_peering_attachment(peer)) assert str(err.value.last_response) == "waiter_failed"
def test_wait_for_execute(self, patched_time): self.deployer.describe_stack_events = MagicMock() self.deployer._client.get_waiter = MagicMock(return_value=MockCreateUpdateWaiter()) self.deployer.wait_for_execute("test", "CREATE") self.deployer.wait_for_execute("test", "UPDATE") with self.assertRaises(RuntimeError): self.deployer.wait_for_execute("test", "DESTRUCT") self.deployer._client.get_waiter = MagicMock( return_value=MockCreateUpdateWaiter( ex=WaiterError( name="create_changeset", reason="unit-test", last_response={"Status": "Failed", "StatusReason": "It's a unit test"}, ) ) ) with self.assertRaises(DeployFailedError): self.deployer.wait_for_execute("test", "CREATE")
def test_deploy_no_changes(client, config, capsys): # Configure waiter to throw WaiterError for FAILURE due to no changes waiter_error = WaiterError('change_set_create_complete', 'No Changes', { 'Status': 'FAILED', 'StatusReason': 'No updates are to be performed' }) waiter_mock = MagicMock(**{'wait.side_effect': waiter_error}) client.configure_mock(**{'get_waiter.return_value': waiter_mock}) runner = Runner(client, config) runner.deploy() client.list_change_sets.assert_called_once_with(StackName='TestStack') client.create_change_set.assert_called_once() client.get_waiter.assert_called_once_with('change_set_create_complete') client.describe_change_set.assert_not_called() client.update_termination_protection.assert_not_called() captured = capsys.readouterr() assert 'No changes to Stack TestStack' in captured.out
def test_get_object_failure(): """ Mock scenario where the get fails. """ from botocore.exceptions import WaiterError from dtool_s3.storagebroker import _object_exists mock_s3resource = MagicMock() obj = MagicMock() obj.wait_until_exists = MagicMock(side_effect=WaiterError( 'ObjectExists', 'Max attempts exceeded', {})) mock_s3resource.Object = MagicMock(return_value=obj) value = _object_exists( mock_s3resource, "dummy_bucket", "dummy_dest_path" ) assert value is False
def test__wait_for_registration_waiter_fails_describe_fails(project): mock_cfn_client = MagicMock( spec=["describe_type_registration", "set_type_default_version"]) mock_cfn_client.describe_type_registration.side_effect = ClientError( BLANK_CLIENT_ERROR, "DescribeTypeRegistration") mock_waiter = MagicMock(spec=["wait"]) mock_waiter.wait.side_effect = WaiterError( "TypeRegistrationComplete", "Waiter encountered a terminal failure state", DESCRIBE_TYPE_FAILED_RETURN, ) patch_create_waiter = patch("rpdk.core.project.create_waiter_with_client", return_value=mock_waiter) with patch_create_waiter, pytest.raises(DownstreamError): project._wait_for_registration(mock_cfn_client, REGISTRATION_TOKEN, False) mock_cfn_client.describe_type_registration.assert_called_once_with( RegistrationToken=REGISTRATION_TOKEN) mock_cfn_client.set_type_default_version.assert_not_called() mock_waiter.wait.assert_called_once_with( RegistrationToken=REGISTRATION_TOKEN)
def test__wait_for_registration_waiter_fails(project): mock_cfn_client = MagicMock(spec=[ "describe_type_registration", "set_type_default_version", "get_waiter" ]) mock_cfn_client.describe_type_registration.return_value = ( DESCRIBE_TYPE_FAILED_RETURN) mock_waiter = MagicMock(spec=["wait"]) mock_waiter.wait.side_effect = WaiterError( "TypeRegistrationComplete", "Waiter encountered a terminal failure state", DESCRIBE_TYPE_FAILED_RETURN, ) mock_cfn_client.get_waiter.return_value = mock_waiter with pytest.raises(DownstreamError): project._wait_for_registration(mock_cfn_client, REGISTRATION_TOKEN, True) mock_cfn_client.describe_type_registration.assert_called_once_with( RegistrationToken=REGISTRATION_TOKEN) mock_cfn_client.set_type_default_version.assert_not_called() mock_waiter.wait.assert_called_once_with( RegistrationToken=REGISTRATION_TOKEN)
def test_deploy_no_changes_enable_termination_protection( client, config, capsys): describe_stacks = { 'Stacks': [{ 'StackName': 'TestStack', 'StackStatus': StackStatus.CREATE_COMPLETE.name, 'CreationTime': '2019-12-31T18:30:11.12345+0000', 'EnableTerminationProtection': False }] } # Configure waiter to throw WaiterError for FAILURE due to no changes waiter_error = WaiterError('change_set_create_complete', 'No Changes', { 'Status': 'FAILED', 'StatusReason': 'No updates are to be performed' }) waiter_mock = MagicMock(**{'wait.side_effect': waiter_error}) client.configure_mock( **{ 'get_waiter.return_value': waiter_mock, 'describe_stacks.return_value': describe_stacks }) runner = Runner(client, config) runner.deploy() client.list_change_sets.assert_called_once_with(StackName='TestStack') client.create_change_set.assert_called_once() client.get_waiter.assert_called_once_with('change_set_create_complete') client.describe_change_set.assert_not_called() client.update_termination_protection.assert_called_once_with( StackName='TestStack', EnableTerminationProtection=True) captured = capsys.readouterr() assert 'No changes to Stack TestStack' in captured.out assert 'Enabled Termination Protection' in captured.out
def wait(self, **kwargs): """Mocked wait function that always raises WaiterError""" from botocore.exceptions import WaiterError # from https://github.com/amplify-education/amplify_aws_utils/blob/master/test/unit/test_resource_helper.py last_response = {"Error": {"Code": "Throttling"}} raise WaiterError('Timeout', 'test', last_response)
def test_delete_waiter_error(client, config, capsys, monkeypatch): # Prevent differences in format depending upon where this runs monkeypatch.setenv('STACKMANAGER_TIMEZONE', 'UTC') # Configure waiter waiter_error = WaiterError('stack_delete_complete', 'Delete Failed', { 'Status': 'FAILED', 'StatusReason': 'Delete failed' }) waiter_mock = MagicMock(**{'wait.side_effect': waiter_error}) # Override Stack events stack_events = { 'StackEvents': [{ 'Timestamp': datetime(2020, 1, 1, 13, 35, 11, 0, tzinfo=timezone.utc), 'LogicalResourceId': 'Topic', 'ResourceType': 'AWS::SNS::Topic', 'ResourceStatus': 'DELETE_FAILED', 'ResourceStatusReason': 'Something went wrong' }, { 'Timestamp': datetime(2020, 1, 1, 12, 0, 0, 0, tzinfo=timezone.utc), 'LogicalResourceId': 'TestStack', 'ResourceType': 'AWS::CloudFormation::Stack', 'ResourceStatus': 'CREATE_COMPLETE' }, { 'Timestamp': datetime(2020, 1, 1, 11, 58, 20, 12436, tzinfo=timezone.utc), 'LogicalResourceId': 'Topic', 'ResourceType': 'AWS::SNS::Topic', 'ResourceStatus': 'CREATE_COMPLETE' }] } paginator_mock = MagicMock(**{'paginate.return_value': [stack_events]}) client.configure_mock( **{ 'get_waiter.return_value': waiter_mock, 'get_paginator.return_value': paginator_mock }) runner = Runner(client, config) with pytest.raises( StackError, match='Waiter stack_delete_complete failed: Delete Failed'): runner.delete() captured = capsys.readouterr() assert 'Deletion of Stack TestStack failed:' in captured.err assert '2020-01-01 13:35:11 Topic AWS::SNS::Topic DELETE_FAILED Something went wrong' \ in captured.out
def raise_waiter_error(*a, **k): raise WaiterError('', {}, {})