def test_publish_on_event_bridge(): event_bridge_client = boto3.client('events', os.environ['AWS_REGION']) stubber = Stubber(event_bridge_client) stubber.add_response('put_events', { 'FailedEntryCount': 0, 'Entries': [{ 'EventId': '12456663423' }] }) stubber.activate() with stubber: response = event_bridge_client.put_events(Entries=[{ 'EventBusName': os.environ['EVENT_BUS_NAME'], 'Detail': json.dumps({ '000': [{ 'topic': '000', 'term': 'hill', 'weight': '0.17230435' }, { 'topic': '000', 'term': 'make', 'weight': '0.005689891' }, { 'topic': '000', 'term': 'favorite', 'weight': '0.0046757753' }] }), 'Source': os.environ['TOPICS_EVENT_NAMESPACE'], 'DetailType': 'topics' }]) assert response['FailedEntryCount'] is 0 assert response['Entries'][0]['EventId'] is '12456663423'
def test_can_deregister(boto3_clients): session = rot.Session(clients=boto3_clients) session.ami = AMI session.asgs = ASGS session.cluster = CLUSTER containers = copy.deepcopy(list_cont_result) containers["containerInstanceArns"] = [ "arn:aws:ecs:ap-southeast-2:111111111111:container-instance/some-cluster/a99b9853b6114c87af46c7501a3a6ba8" ] success_result = copy.deepcopy(list_task_result) success_result["taskArns"] = [] with Stubber(boto3_clients["ecs"]) as stubber: stubber.add_response( method="list_container_instances", service_response=containers, expected_params={ "cluster": CLUSTER, "filter": f"attribute:ecs.ami-id != {AMI}", }, ) stubber.add_response(method="list_tasks", service_response=list_task_result) stubber.add_response( method="list_container_instances", service_response=containers, expected_params={ "cluster": CLUSTER, "filter": f"attribute:ecs.ami-id != {AMI}", }, ) stubber.add_response(method="list_tasks", service_response=success_result) assert rot.can_deregister(session) is False assert rot.can_deregister(session) is True stubber.assert_no_pending_responses()
def test_can_clean_volumes(self, mock_sleep): client = boto3.client('ec2') stubber = Stubber(client) volumes = [{ "VolumeId": "1", "State": "in-use", "Tags": [{ "Key": "UUID", "Value": "01c6b711-a7d4-4bdf-bb2b-10b4b60594bc" }] }, { "VolumeId": "2", "State": "available", "Tags": [{ "Key": "UUID", "Value": "01c6b711-a7d4-4bdf-bb2b-10b4b60594bc" }] }, { "VolumeId": "3", "State": "available", "Tags": [{ "Key": "UUID", "Value": "01c6b711-a7d4-4bdf-bb2b-10b4b60594bc" }] }] stubber.add_response('describe_volumes', {"Volumes": volumes}) stubber.add_response('delete_volume', [], {"VolumeId": "2"}) stubber.add_response('delete_volume', [], {"VolumeId": "3"}) stubber.activate() ebspin_ec2 = ec2.Ec2(client) ebspin_ec2.clean_old_volumes("01c6b711-a7d4-4bdf-bb2b-10b4b60594bc", "1")
def test_get_copy_args_with_version(self): data_path1 = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../data/s3_obj.json") with open(data_path1, "r") as file: response1 = json.load(file) data_path2 = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../data/s3_acl.json") with open(data_path2, "r") as file: response2 = json.load(file) # with version s3_client = boto3.client("s3") stubber = Stubber(s3_client) stubber.add_response("get_object", response1) stubber.add_response("get_object_acl", response2) stubber.activate() s3 = S3() s3._client = s3_client s3.bucket_name = "hello" s3_args = S3Args(s3) result = get_copy_args(s3, "hello.json", s3_args, False) self.assertEqual( result, { "Bucket": "hello", "Key": "hello.json", "CopySource": { "Bucket": "hello", "Key": "hello.json" }, "StorageClass": "REDUCED_REDUNDANCY", "ServerSideEncryption": "aws:kms", "SSEKMSKeyId": "arn:aws:kms:ap-southeast-2:11111111:key/11111111-f48d-48b8-90d4-d5bd03a603d4", "GrantRead": "uri=http://acs.amazonaws.com/groups/global/AllUsers", }, )
def test_get_workspaces_for_directory_use_next_token(): settings = { 'region': 'us-east-1', 'hourlyLimits': 10, 'testEndOfMonth': 'yes', 'isDryRun': True, 'startTime': 1, 'endTime': 2, 'TerminateUnusedWorkspaces': 'Dry Run' } directory_id = "123qwe123qwe" workspace_helper = WorkspacesHelper(settings) client_stubber = Stubber(workspace_helper.workspaces_client) expected_params_1 = { 'DirectoryId': directory_id } response_1 = { 'Workspaces': [{'WorkspaceId':'id_1'}], 'NextToken': 's223123jj32' } expected_params_2 = { 'DirectoryId': directory_id, 'NextToken': 's223123jj32' } response_2 = { 'Workspaces': [{'WorkspaceId':'id_2'}] } client_stubber.add_response('describe_workspaces', response_1, expected_params_1) client_stubber.add_response('describe_workspaces', response_2, expected_params_2) client_stubber.activate() response = workspace_helper.get_workspaces_for_directory(directory_id) client_stubber.activate() assert response == [{'WorkspaceId': 'id_1'}, {'WorkspaceId': 'id_2'}]
def test_get_default_jobs_role_not_found(aws_session, job_role_arn, job_role_name): iam_client = boto3.client("iam") with Stubber(iam_client) as stub: stub.add_response( "list_roles", { "Roles": [{ "Arn": "arn:aws:iam::0000000000:role/nonJobsRole", "RoleName": "nonJobsRole", "Path": "/", "RoleId": "nonJobsRole-213453451345-431513", "CreateDate": time.time(), }] * 100, "IsTruncated": True, "Marker": "resp-marker", }, ) stub.add_response( "list_roles", { "Roles": [{ "Arn": "arn:aws:iam::0000000000:role/nonJobsRole2", "RoleName": "nonJobsRole2", "Path": "/", "RoleId": "nonJobsRole2-213453451345-431513", "CreateDate": time.time(), }], "IsTruncated": False, }, {"Marker": "resp-marker"}, ) aws_session._iam = iam_client with pytest.raises(RuntimeError): aws_session.get_default_jobs_role()
def __generate_lambda_stubber_for_create_function(self, lambda_client_to_stub, expected_handler='Custom_AccessControl.handler', expected_memory_value=None, expected_timeout_value=None, expected_tags=None): """Rather than use a MagicMock for the client, use a boto3 stubber to wrap a real lambda client for the expected calls when we create a custom resource lambda. Note: Can't share stubber or client across test blocks as stubber modifies client. Boto3 stubber requires all add_response calls are added before calling activate. Stubber's main advantage is that it verifies params are valid for calls and provides mechanism to inspect results. Stubber's drawback is speed, as it's slow to use :param expected_handler: The expected name of the handler function to invoke :param expected_memory_value: The memory size expected for the lambda. May be None :param expected_timeout_value: The expected timeout value specified for the Lambda. May be None :param expected_tags: A dictionary of string, string pairs expected to be specified for the lambda. May be None """ lambda_stubber = Stubber(lambda_client_to_stub) expected_params = self.__generate_create_function_lambda_expected_params(expected_handler=expected_handler) if expected_memory_value is not None: expected_params.update({'MemorySize': expected_memory_value}) if expected_timeout_value is not None: expected_params.update({'Timeout': expected_timeout_value}) if expected_tags is not None: expected_params.update({'Tags': expected_tags}) create_function_response = { 'FunctionArn': 'arn:aws:lambda:region:account:function:lambda', 'Version': '1' } lambda_stubber.add_response('create_function', create_function_response, expected_params) return lambda_stubber
def test_set_av_tags(self): scan_result = "CLEAN" scan_signature = AV_SIGNATURE_OK timestamp = get_timestamp() tag_set = { "TagSet": [ {"Key": AV_SIGNATURE_METADATA, "Value": scan_signature}, {"Key": AV_STATUS_METADATA, "Value": scan_result}, {"Key": AV_TIMESTAMP_METADATA, "Value": timestamp}, ] } s3_stubber = Stubber(self.s3_client) get_object_tagging_response = tag_set get_object_tagging_expected_params = { "Bucket": self.s3_bucket_name, "Key": self.s3_key_name, } s3_stubber.add_response( "get_object_tagging", get_object_tagging_response, get_object_tagging_expected_params, ) put_object_tagging_response = {} put_object_tagging_expected_params = { "Bucket": self.s3_bucket_name, "Key": self.s3_key_name, "Tagging": tag_set, } s3_stubber.add_response( "put_object_tagging", put_object_tagging_response, put_object_tagging_expected_params, ) with s3_stubber: s3_obj = self.s3.Object(self.s3_bucket_name, self.s3_key_name) set_av_tags(self.s3_client, s3_obj, scan_result, scan_signature, timestamp)
def __get_table_resource(self): region = self.config("region_name", namespace="cis", default="us-west-2") if self._get_cis_environment() == "local": self.boto_session = Stubber( boto3.session.Session(region_name=region)).client dynalite_port = self.config("dynalite_port", namespace="cis", default="4567") dynalite_host = self.config("dynalite_host", namespace="cis", default="localhost") dynamodb_resource = self.boto_session.resource( "dynamodb", endpoint_url="http://{}:{}".format(dynalite_host, dynalite_port)) table = dynamodb_resource.Table(self._generate_table_name()) else: dynamodb_resource = boto3.resource("dynamodb", region_name=region) table = dynamodb_resource.Table(self._generate_table_name()) return table
def test_change_password(self, _): # u = cognito_user(self.cognito_user_pool_id, self.app_id, # username=self.username) self.user.authenticate(self.password) stub = Stubber(self.user.client) stub.add_response( method="change_password", service_response={"ResponseMetadata": {"HTTPStatusCode": 200}}, expected_params={ "PreviousPassword": self.password, "ProposedPassword": "******", "AccessToken": self.user.access_token, }, ) with stub: self.user.change_password(self.password, "crazypassword$45DOG") stub.assert_no_pending_responses() with self.assertRaises(ParamValidationError): self.user.change_password(self.password, None)
def test_emit_process_completed(): mocked_response = { 'FailedEntryCount': 0, 'Entries': [{ 'EventId': '00000000-0000-0000-0000-000000000000' }] } expected_params = { 'Entries': [{ 'Source': 'scheduled-event-adjuster', 'DetailType': 'ProcessCompleted', 'Detail': '{"Updates": [{"foo": "bar"}]}', 'EventBusName': 'default' }] } eventbridge_client = boto3.client('events') stubber = Stubber(eventbridge_client) stubber.add_response('put_events', mocked_response, expected_params) bus = EventBus(eventbridge_client) with stubber: response = bus.emit_process_completed([{'foo': 'bar'}]) assert response == mocked_response
def test_read_sqs_message_returns_falsy_on_no_incoming_message(self): sqs = boto3.resource("sqs", **config.BOTO_RESOURCE_KWARGS) stubber = Stubber(sqs.meta.client) stubber.add_response( "get_queue_url", {"QueueUrl": "my_very_valid_and_existing_queue_url"}, {"QueueName": config.QUEUE_NAME}, ) stubber.add_response( "receive_message", {"Messages": []}, { "QueueUrl": "my_very_valid_and_existing_queue_url", "WaitTimeSeconds": ANY, "AttributeNames": ["AWSTraceHeader"], }, ) with mock.patch("boto3.resource") as m, stubber: m.return_value = sqs r = _read_sqs_message() assert not r
def test_table_query_can_be_stubbed_with_expressions(self): table = self.resource.Table('mytable') key_expr = Key('mykey').eq('testkey') filter_expr = Attr('myattr').eq('foo') & (Attr('myattr2').lte('buzz') | Attr('myattr2').gte('fizz')) stubber = Stubber(table.meta.client) stubber.add_response( 'query', dict(Items=list()), expected_params=dict( TableName='mytable', KeyConditionExpression=key_expr, FilterExpression=filter_expr, ), ) with stubber: response = table.query(KeyConditionExpression=key_expr, FilterExpression=filter_expr) assert response['Items'] == [] stubber.assert_no_pending_responses()
def test_get_configs(): client = boto3.client('s3') stub = Stubber(client) infos = [('test-bucket', 'some-prefix/config1.json')] expected_json = { 'GitHubRepo': 'repo', 'GitHubBranch': 'branch', 'ChangeMatchExpressions': '.*', 'CodePipelineName': 'pipeline' } expected_encoded = json.dumps(expected_json).encode() expected_json['Matches'] = filter.build_regex_matches(expected_json['ChangeMatchExpressions']) expected = [expected_json] response = { 'Body': StreamingBody(io.BytesIO(expected_encoded), len(expected_encoded)), } stub.add_response('get_object', service_response=response, expected_params={'Bucket': infos[0][0], 'Key': infos[0][1]}) with stub: actual = filter.get_configs(client, infos) stub.assert_no_pending_responses() assert actual == expected
def test_ensure_cfn_bucket_doesnt_exist_us_west(self): session = get_session("us-west-1") provider = Provider(session) action = BaseAction(context=mock_context("mynamespace"), provider_builder=MockProviderBuilder( provider, region="us-west-1")) stubber = Stubber(action.s3_conn) stubber.add_client_error( "head_bucket", service_error_code="NoSuchBucket", service_message="Not Found", http_status_code=404, ) stubber.add_response("create_bucket", service_response={}, expected_params={ "Bucket": ANY, "CreateBucketConfiguration": { "LocationConstraint": "us-west-1", } }) with stubber: action.ensure_cfn_bucket()
def test_ddb_table_name(): ddb = session.create_client('dynamodb', region_name='us-west-2') response = { 'ResponseMetadata': { 'RequestId': REQUEST_ID, 'HTTPStatusCode': 403, } } with Stubber(ddb) as stubber: stubber.add_response('describe_table', response, {'TableName': 'mytable'}) ddb.describe_table(TableName='mytable') subsegment = xray_recorder.current_segment().subsegments[0] assert subsegment.error assert subsegment.http['response']['status'] == 403 aws_meta = subsegment.aws assert aws_meta['table_name'] == 'mytable' assert aws_meta['request_id'] == REQUEST_ID assert aws_meta['region'] == 'us-west-2' assert aws_meta['operation'] == 'DescribeTable'
def test_s3_bucket_name_capture(): s3 = session.create_client('s3', region_name='us-west-2') response = { 'ResponseMetadata': { 'RequestId': REQUEST_ID, 'HTTPStatusCode': 200, } } bucket_name = 'mybucket' with Stubber(s3) as stubber: stubber.add_response('list_objects_v2', response, {'Bucket': bucket_name}) s3.list_objects_v2(Bucket=bucket_name) subsegment = xray_recorder.current_segment().subsegments[0] aws_meta = subsegment.aws assert aws_meta['bucket_name'] == bucket_name assert aws_meta['request_id'] == REQUEST_ID assert aws_meta['region'] == 'us-west-2' assert aws_meta['operation'] == 'ListObjectsV2'
def test_pass_through_on_context_missing(): """ The built-in patcher or subsegment capture logic should not throw any error when a `None` subsegment created from `LOG_ERROR` missing context. """ xray_recorder.configure(context_missing='LOG_ERROR') xray_recorder.clear_trace_entities() ddb = session.create_client('dynamodb', region_name='us-west-2') response = { 'ResponseMetadata': { 'RequestId': REQUEST_ID, 'HTTPStatusCode': 200, } } with Stubber(ddb) as stubber: stubber.add_response('describe_table', response, {'TableName': 'mytable'}) result = ddb.describe_table(TableName='mytable') assert result is not None xray_recorder.configure(context_missing='RUNTIME_ERROR')
def test_sns_event_should_publish_message(self): sns_client = botocore.session.get_session().create_client('sns') with Stubber(sns_client) as stubber: topic_arn = 'arn:aws:sns:us-east-1:topic/some-topic' subject = 'test' response = {'test': True} stubber.add_response('publish', {}, { 'TopicArn': topic_arn, 'Subject': subject, 'Message': encoder(response) }) sns_event = sns_event_handler(sns_client, topic_arn, subject) @sns_event def event_it(): return response result = event_it() self.assertEqual(response, result) stubber.assert_no_pending_responses()
def test_check_harvested_video_to_process_scheduled_not_expired(self): """Command should do nothing when there is no video expired.""" now = timezone.now() VideoFactory( id="9847c1c9-88a1-4afa-bce6-8a9d3ddd4e5b", live_state=HARVESTED, live_type=JITSI, upload_state=PENDING, starting_at=now + timedelta(days=2), uploaded_on=now - timedelta(days=3), ) out = StringIO() with Stubber( check_harvested.s3_client ) as s3_client_stubber, mock.patch( "marsha.core.management.commands.check_harvested.generate_expired_date" ) as generate_expired_date_mock: generate_expired_date_mock.return_value = now + timedelta(days=1) call_command("check_harvested", stdout=out) s3_client_stubber.assert_no_pending_responses() self.assertEqual("", out.getvalue()) out.close()
def setUp(self): self.stubber = Stubber(self.client) self.get_parameters_response = { 'Item': { 'TestMap': { 'M': { 'String1': { 'S': 'StringVal1' }, 'List1': { 'L': [{ 'S': 'ListVal1' }, { 'S': 'ListVal2' }] }, 'Number1': { 'N': '12345' }, } } } }
def test_delete_bot_called(intent_builder, put_bot_response, bot_properties, mocker): """ delete bot called test """ lex, intents = setup() delete_intent_response = {'test': 'response'} with Stubber(lex) as stubber: context = mocker.Mock() intent_builder_instance = intent_builder.return_value intent_builder_instance.delete_intents.return_value = delete_intent_response stub_get_request(stubber) stubber.add_response('delete_bot', {}, {'name': BOT_NAME}) bot_builder = LexBotBuilder(Mock(), context, lex_sdk=lex, intent_builder=intent_builder_instance) bot = Bot.create_bot(BOT_NAME, intents, MESSAGES, **bot_properties) bot_builder.delete(bot) stubber.assert_no_pending_responses()
def test_create_change_set(): s = Stack(name='teststack') aws_env = AWSEnv(regions=['us-east-1']) with default_region('us-east-1'): cfn_client = aws_env.client('cloudformation', region='us-east-1') stubber = Stubber(cfn_client) stubber.add_response('create_change_set', {}, { 'Capabilities': ['CAPABILITY_IAM'], 'StackName': 'teststack', 'ChangeSetName': 'name1', 'TemplateBody': ANY }) stubber.add_response('create_change_set', {}, { 'Capabilities': ['CAPABILITY_IAM'], 'ChangeSetName': 'name2', 'StackName': 'teststack', 'TemplateURL': ANY }) with stubber: s.create_change_set('name1') s.create_change_set('name2', url='noprotocol://nothing')
def get_table_resource(): region = config("dynamodb_region", namespace="cis", default="us-west-2") environment = config("environment", namespace="cis", default="local") table_name = "{}-identity-vault".format(environment) client_config = botocore.config.Config(max_pool_connections=50) if environment == "local": dynalite_host = config("dynalite_host", namespace="cis", default="localhost") dynalite_port = config("dynalite_port", namespace="cis", default="4567") session = Stubber(boto3.session.Session(region_name=region)).client resource = session.resource("dynamodb", endpoint_url="http://{}:{}".format( dynalite_host, dynalite_port)) else: session = boto3.session.Session(region_name=region) resource = session.resource("dynamodb", config=client_config) table = resource.Table(table_name) return table
def test_rotate_snapshots6(self): with Stubber(self.ec2) as stubber: stubber.add_response( "describe_snapshots", {"Snapshots": []}, { "Filters": [ { "Name": "volume-id", "Values": ["aaaa"] }, { "Name": "description", "Values": [ "osc-bsu-backup EF50CF3A80164A5EABAF8C78B2314C65" ], }, ] }, ) self.assertEqual(bsu.rotate_snapshots(self.ec2, ["aaaa"], 14), None)
def test_map_parameter_grouping(): """ Test special parameters that have shape of map are recorded as a list of keys based on `para_whitelist.json` """ ddb = session.create_client('dynamodb', region_name='us-west-2') response = { 'ResponseMetadata': { 'RequestId': REQUEST_ID, 'HTTPStatusCode': 500, } } with Stubber(ddb) as stubber: stubber.add_response('batch_write_item', response, {'RequestItems': ANY}) ddb.batch_write_item(RequestItems={'table1': [{}], 'table2': [{}]}) subsegment = xray_recorder.current_segment().subsegments[0] assert subsegment.fault assert subsegment.http['response']['status'] == 500 aws_meta = subsegment.aws assert sorted(aws_meta['table_names']) == ['table1', 'table2']
def test_validate_account_alias(caplog): """Test validate_account_alias.""" caplog.set_level(logging.INFO, logger='runway') alias = 'test-alias' iam_client = boto3.client('iam') stubber = Stubber(iam_client) stubber.add_response('list_account_aliases', {'AccountAliases': [alias]}) stubber.add_response('list_account_aliases', {'AccountAliases': ['no-match']}) with stubber: assert not validate_account_alias(iam_client, alias) with pytest.raises(SystemExit) as excinfo: assert validate_account_alias(iam_client, alias) assert excinfo.value.code == 1 stubber.assert_no_pending_responses() assert caplog.messages == [ 'Verified current AWS account alias matches required alias {}.'.format( alias), 'Current AWS account aliases "{}" do not match required account' ' alias {} in Runway config.'.format('no-match', alias) ]
def test_modify_workspace_properties_Always_On(mocker): settings = { 'region': 'us-east-1', 'hourlyLimits': 10, 'testEndOfMonth': 'yes', 'isDryRun': False, 'startTime': 1, 'endTime': 2 } workspace_helper = WorkspacesHelper(settings) client_stubber = Stubber(workspace_helper.workspaces_client) response = {} expected_params = { 'WorkspaceId': '123qwer', 'WorkspaceProperties': {'RunningMode': 'ALWAYS_ON'} } client_stubber.add_response('modify_workspace_properties', response, expected_params) client_stubber.activate() workspace_id = '123qwer' new_running_mode = 'ALWAYS_ON' result = workspace_helper.modify_workspace_properties(workspace_id, new_running_mode) assert result == '-M-'
def test__fail__client_error(self): """fail with client error""" tgw = TGWPeering() peer = TGWPeer( aws_region="", transit_gateway="", attachment_id="", ) stubber = Stubber(tgw.ec2_client) _err_code = "InternalException" _message = "test error" stubber.add_client_error( "delete_transit_gateway_peering_attachment", service_error_code=_err_code, service_message=_message, ) stubber.activate() with pytest.raises(ClientError) as err: tgw.delete_tgw_peering_attachment(peer) assert err.value.response["Error"]["Code"] == _err_code assert err.value.response["Error"]["Message"] == _message stubber.deactivate()
def test_resolve_cfn_outputs(self, kwargs, stack_info, expected): """Test resolve_cfn_outputs.""" client = boto3.client('cloudformation') stubber = Stubber(client) for stack, outputs in stack_info.items(): for key, val in outputs.items(): stubber.add_response( 'describe_stacks', { 'Stacks': [{ 'StackName': stack, 'CreationTime': datetime.now(), 'StackStatus': 'CREATE_COMPLETE', 'Outputs': [{ 'OutputKey': key, 'OutputValue': val }] }] }) with stubber: assert TerraformBackendConfig.resolve_cfn_outputs( client, **kwargs) == expected stubber.assert_no_pending_responses()