def test_single_mnp_job(self, args, expected, capsys, boto3_stubber, test_datadir, shared_datadir): response_parent = json.loads( read_text(shared_datadir / "aws_api_responses/batch_describe-jobs_single_mnp_job.json") ) response_children = json.loads( read_text(shared_datadir / "aws_api_responses/batch_describe-jobs_single_mnp_job_children.json") ) boto3_stubber( "batch", [ MockedBoto3Request( method="describe_jobs", response=response_parent, expected_params={"jobs": ["6abf3ecd-07a8-4faa-8a65-79e7404eb50f"]}, ), MockedBoto3Request( method="describe_jobs", response=response_children, expected_params={ "jobs": ["6abf3ecd-07a8-4faa-8a65-79e7404eb50f#0", "6abf3ecd-07a8-4faa-8a65-79e7404eb50f#1"] }, ), ], ) awsbstat.main(["-c", "cluster"] + args) assert capsys.readouterr().out == read_text(test_datadir / expected)
def test_retry_on_boto3_throttling(boto3_stubber, mocker): sleep_mock = mocker.patch("pcluster.utils.time.sleep") mocked_requests = [ MockedBoto3Request( method="describe_stack_resources", response="Error", expected_params={"StackName": FAKE_STACK_NAME}, generate_error=True, error_code="Throttling", ), MockedBoto3Request( method="describe_stack_resources", response="Error", expected_params={"StackName": FAKE_STACK_NAME}, generate_error=True, error_code="Throttling", ), MockedBoto3Request(method="describe_stack_resources", response={}, expected_params={"StackName": FAKE_STACK_NAME}), ] client = boto3_stubber("cloudformation", mocked_requests) utils.retry_on_boto3_throttling(client.describe_stack_resources, StackName=FAKE_STACK_NAME) sleep_mock.assert_called_with(5)
def test_verify_stack_creation_retry(boto3_stubber, mocker): sleep_mock = mocker.patch("pcluster.utils.time.sleep") mocker.patch( "pcluster.utils.get_stack", side_effect=[{ "StackStatus": "CREATE_IN_PROGRESS" }, { "StackStatus": "CREATE_FAILED" }], ) mocked_requests = [ MockedBoto3Request( method="describe_stack_events", response="Error", expected_params={"StackName": FAKE_STACK_NAME}, generate_error=True, error_code="Throttling", ), MockedBoto3Request( method="describe_stack_events", response={"StackEvents": [_generate_stack_event()]}, expected_params={"StackName": FAKE_STACK_NAME}, ), ] client = boto3_stubber("cloudformation", mocked_requests * 2) assert_that(utils.verify_stack_creation(FAKE_STACK_NAME, client)).is_false() sleep_mock.assert_called_with(5)
def test_single_array_job(self, args, expected, capsys, boto3_stubber, test_datadir, shared_datadir): response_parent = json.loads( read_text(shared_datadir / "aws_api_responses/batch_describe-jobs_single_array_job.json") ) response_children = json.loads( read_text(shared_datadir / "aws_api_responses/batch_describe-jobs_single_array_job_children.json") ) boto3_stubber( "batch", [ MockedBoto3Request( method="describe_jobs", response=response_parent, expected_params={"jobs": ["3286a19c-68a9-47c9-8000-427d23ffc7ca"]}, ), MockedBoto3Request( method="describe_jobs", response=response_children, expected_params={ "jobs": ["3286a19c-68a9-47c9-8000-427d23ffc7ca:0", "3286a19c-68a9-47c9-8000-427d23ffc7ca:1"] }, ), ], ) awsbstat.main(["-c", "cluster"] + args) assert capsys.readouterr().out == read_text(test_datadir / expected)
def test_get_stack_retry(boto3_stubber, mocker): sleep_mock = mocker.patch("pcluster.utils.time.sleep") expected_stack = { "StackName": FAKE_STACK_NAME, "CreationTime": 0, "StackStatus": "CREATED" } mocked_requests = [ MockedBoto3Request( method="describe_stacks", response="Error", expected_params={"StackName": FAKE_STACK_NAME}, generate_error=True, error_code="Throttling", ), MockedBoto3Request( method="describe_stacks", response={"Stacks": [expected_stack]}, expected_params={"StackName": FAKE_STACK_NAME}, ), ] boto3_stubber("cloudformation", mocked_requests) stack = utils.get_stack(FAKE_STACK_NAME) assert_that(stack).is_equal_to(expected_stack) sleep_mock.assert_called_with(5)
def test_default_ordering_by_id(self, args, expected, capsys, boto3_stubber, test_datadir, shared_datadir): parent_jobs_response = {"jobs": []} for file in [ "batch_describe-jobs_single_mnp_job.json", "batch_describe-jobs_single_array_job.json", "batch_describe-jobs_single_job.json", ]: parent_jobs_response["jobs"].extend( json.loads(read_text(shared_datadir / "aws_api_responses/{0}".format(file)))["jobs"] ) children_jobs_response = {"jobs": []} for file in [ "batch_describe-jobs_single_mnp_job_children.json", "batch_describe-jobs_single_array_job_children.json", ]: children_jobs_response["jobs"].extend( json.loads(read_text(shared_datadir / "aws_api_responses/{0}".format(file)))["jobs"] ) boto3_stubber( "batch", [ MockedBoto3Request( method="describe_jobs", response=parent_jobs_response, expected_params={ "jobs": [ "3286a19c-68a9-47c9-8000-427d23ffc7ca", "ab2cd019-1d84-43c7-a016-9772dd963f3b", "6abf3ecd-07a8-4faa-8a65-79e7404eb50f", ] }, ), MockedBoto3Request( method="describe_jobs", response=children_jobs_response, expected_params={ "jobs": [ "6abf3ecd-07a8-4faa-8a65-79e7404eb50f#0", "6abf3ecd-07a8-4faa-8a65-79e7404eb50f#1", "3286a19c-68a9-47c9-8000-427d23ffc7ca:0", "3286a19c-68a9-47c9-8000-427d23ffc7ca:1", ] }, ), ], ) awsbstat.main(["-c", "cluster"] + args) assert capsys.readouterr().out == read_text(test_datadir / expected)
def test_all_status_detailed(self, capsys, boto3_stubber, test_datadir, shared_datadir): mocked_requests = [] jobs_ids = [] describe_jobs_response = {"jobs": []} for status in ALL_JOB_STATUS: list_jobs_response = json.loads( read_text(shared_datadir / "aws_api_responses/batch_list-jobs_{0}.json".format(status)) ) describe_jobs_response["jobs"].extend( json.loads(read_text(shared_datadir / "aws_api_responses/batch_describe-jobs_{0}.json".format(status)))[ "jobs" ] ) jobs_ids.extend([job["jobId"] for job in list_jobs_response["jobSummaryList"]]) mocked_requests.append( MockedBoto3Request( method="list_jobs", response=list_jobs_response, expected_params={ "jobQueue": DEFAULT_AWSBATCHCLICONFIG_MOCK_CONFIG["job_queue"], "jobStatus": status, "nextToken": "", }, ) ) mocked_requests.append( MockedBoto3Request( method="describe_jobs", response=describe_jobs_response, expected_params={"jobs": jobs_ids} ) ) boto3_stubber("batch", mocked_requests) awsbstat.main(["-c", "cluster", "-s", "ALL", "-d"]) # describe-jobs api validation made by the Stubber requires startedAt to be always present. # Removing it from output when value is default (1970-01-01T00:00:00+00:00) since this is the # behavior for not stubbed calls. output = capsys.readouterr().out.replace("1970-01-01T00:00:00+00:00", "-") expcted_jobs_count_by_status = { "SUBMITTED": 2, "PENDING": 1, "RUNNABLE": 2, "STARTING": 2, "RUNNING": 2, "SUCCEEDED": 3, "FAILED": 3, } for status, count in expcted_jobs_count_by_status.items(): assert output.count(status) == count assert output.count("jobId") == 15 assert output == read_text(test_datadir / "expected_output.txt")
def test_ec2_vpc_id_validator(mocker, boto3_stubber): mocked_requests = [] # mock describe_vpc boto3 call describe_vpc_response = { "Vpcs": [ { "VpcId": "vpc-12345678", "InstanceTenancy": "default", "Tags": [{"Value": "Default VPC", "Key": "Name"}], "State": "available", "DhcpOptionsId": "dopt-4ef69c2a", "CidrBlock": "172.31.0.0/16", "IsDefault": True, } ] } mocked_requests.append( MockedBoto3Request( method="describe_vpcs", response=describe_vpc_response, expected_params={"VpcIds": ["vpc-12345678"]} ) ) # mock describe_vpc_attribute boto3 call describe_vpc_attribute_response = { "VpcId": "vpc-12345678", "EnableDnsSupport": {"Value": True}, "EnableDnsHostnames": {"Value": True}, } mocked_requests.append( MockedBoto3Request( method="describe_vpc_attribute", response=describe_vpc_attribute_response, expected_params={"VpcId": "vpc-12345678", "Attribute": "enableDnsSupport"}, ) ) mocked_requests.append( MockedBoto3Request( method="describe_vpc_attribute", response=describe_vpc_attribute_response, expected_params={"VpcId": "vpc-12345678", "Attribute": "enableDnsHostnames"}, ) ) boto3_stubber("ec2", mocked_requests) # TODO mock and test invalid vpc-id for vpc_id, expected_message in [("vpc-12345678", None)]: config_parser_dict = {"cluster default": {"vpc_settings": "default"}, "vpc default": {"vpc_id": vpc_id}} utils.assert_param_validator(mocker, config_parser_dict, expected_message)
def test_efa_validator_with_vpc_security_group( boto3_stubber, mocker, ip_permissions, ip_permissions_egress, expected_message ): mocker.patch("pcluster.config.validators.get_supported_features", return_value={"instances": ["t2.micro"]}) describe_security_groups_response = { "SecurityGroups": [ { "IpPermissionsEgress": ip_permissions_egress, "Description": "My security group", "IpPermissions": ip_permissions, "GroupName": "MySecurityGroup", "OwnerId": "123456789012", "GroupId": "sg-12345678", } ] } mocked_requests = [ MockedBoto3Request( method="describe_security_groups", response=describe_security_groups_response, expected_params={"GroupIds": ["sg-12345678"]}, ) ] * 2 # it is called two times, for vpc_security_group_id validation and to validate efa boto3_stubber("ec2", mocked_requests) config_parser_dict = { "cluster default": {"enable_efa": "compute", "placement_group": "DYNAMIC", "vpc_settings": "default"}, "vpc default": {"vpc_security_group_id": "sg-12345678"}, } utils.assert_param_validator(mocker, config_parser_dict, expected_message)
def test_ec2_ebs_snapshot_validator(mocker, boto3_stubber): describe_snapshots_response = { "Snapshots": [ { "Description": "This is my snapshot", "Encrypted": False, "VolumeId": "vol-049df61146c4d7901", "State": "completed", "VolumeSize": 8, "StartTime": "2014-02-28T21:28:32.000Z", "Progress": "100%", "OwnerId": "012345678910", "SnapshotId": "snap-1234567890abcdef0", } ] } mocked_requests = [ MockedBoto3Request( method="describe_snapshots", response=describe_snapshots_response, expected_params={"SnapshotIds": ["snap-1234567890abcdef0"]}, ) ] boto3_stubber("ec2", mocked_requests) # TODO test with invalid key config_parser_dict = { "cluster default": {"ebs_settings": "default"}, "ebs default": {"shared_dir": "test", "ebs_snapshot_id": "snap-1234567890abcdef0"}, } utils.assert_param_validator(mocker, config_parser_dict)
def test_get_stack_events_retry(boto3_stubber, mocker): sleep_mock = mocker.patch("pcluster.utils.time.sleep") expected_events = [_generate_stack_event()] mocked_requests = [ MockedBoto3Request( method="describe_stack_events", response="Error", expected_params={"StackName": FAKE_STACK_NAME}, generate_error=True, error_code="Throttling", ), MockedBoto3Request( method="describe_stack_events", response={"StackEvents": expected_events}, expected_params={"StackName": FAKE_STACK_NAME}, ), ] boto3_stubber("cloudformation", mocked_requests) assert_that( utils.get_stack_events(FAKE_STACK_NAME)).is_equal_to(expected_events) sleep_mock.assert_called_with(5)
def test_stack_exists(boto3_stubber, response, is_error): """Verify that utils.stack_exists behaves as expected.""" mocked_requests = [ MockedBoto3Request( method="describe_stacks", response=response, expected_params={"StackName": FAKE_STACK_NAME}, generate_error=is_error, ) ] boto3_stubber("cloudformation", mocked_requests) should_exist = not is_error assert_that(utils.stack_exists(FAKE_STACK_NAME)).is_equal_to(should_exist)
def test_single_job_detailed(self, capsys, boto3_stubber, test_datadir, shared_datadir): response = json.loads(read_text(shared_datadir / "aws_api_responses/batch_describe-jobs_single_job.json")) boto3_stubber( "batch", MockedBoto3Request( method="describe_jobs", response=response, expected_params={"jobs": ["ab2cd019-1d84-43c7-a016-9772dd963f3b"]}, ), ) awsbstat.main(["-c", "cluster", "ab2cd019-1d84-43c7-a016-9772dd963f3b"]) assert capsys.readouterr().out == read_text(test_datadir / "expected_output.txt")
def test_ec2_key_pair_validator(mocker, boto3_stubber): describe_key_pairs_response = { "KeyPairs": [ {"KeyFingerprint": "12:bf:7c:56:6c:dd:4f:8c:24:45:75:f1:1b:16:54:89:82:09:a4:26", "KeyName": "key1"} ] } mocked_requests = [ MockedBoto3Request( method="describe_key_pairs", response=describe_key_pairs_response, expected_params={"KeyNames": ["key1"]} ) ] boto3_stubber("ec2", mocked_requests) # TODO test with invalid key config_parser_dict = {"cluster default": {"key_name": "key1"}} utils.assert_param_validator(mocker, config_parser_dict)
def test_placement_group_validator(mocker, boto3_stubber): describe_placement_groups_response = { "PlacementGroups": [{"GroupName": "my-cluster", "State": "available", "Strategy": "cluster"}] } mocked_requests = [ MockedBoto3Request( method="describe_placement_groups", response=describe_placement_groups_response, expected_params={"GroupNames": ["my-cluster"]}, ) ] boto3_stubber("ec2", mocked_requests) # TODO test with invalid group name config_parser_dict = {"cluster default": {"placement_group": "my-cluster"}} utils.assert_param_validator(mocker, config_parser_dict)
def test_succeeded_status(self, capsys, boto3_stubber, test_datadir, shared_datadir): response = json.loads(read_text(shared_datadir / "aws_api_responses/batch_list-jobs_SUCCEEDED.json")) boto3_stubber( "batch", MockedBoto3Request( method="list_jobs", response=response, expected_params={ "jobQueue": DEFAULT_AWSBATCHCLICONFIG_MOCK_CONFIG["job_queue"], "jobStatus": "SUCCEEDED", "nextToken": "", }, ), ) awsbstat.main(["-c", "cluster", "-s", "SUCCEEDED"]) assert capsys.readouterr().out == read_text(test_datadir / "expected_output.txt")
def test_ec2_ami_validator(mocker, boto3_stubber): describe_images_response = { "Images": [ { "VirtualizationType": "paravirtual", "Name": "My server", "Hypervisor": "xen", "ImageId": "ami-12345678", "RootDeviceType": "ebs", "State": "available", "BlockDeviceMappings": [ { "DeviceName": "/dev/sda1", "Ebs": { "DeleteOnTermination": True, "SnapshotId": "snap-1234567890abcdef0", "VolumeSize": 8, "VolumeType": "standard", }, } ], "Architecture": "x86_64", "ImageLocation": "123456789012/My server", "KernelId": "aki-88aa75e1", "OwnerId": "123456789012", "RootDeviceName": "/dev/sda1", "Public": False, "ImageType": "machine", "Description": "An AMI for my server", } ] } mocked_requests = [ MockedBoto3Request( method="describe_images", response=describe_images_response, expected_params={"ImageIds": ["ami-12345678"]} ) ] boto3_stubber("ec2", mocked_requests) # TODO test with invalid key config_parser_dict = {"cluster default": {"custom_ami": "ami-12345678"}} utils.assert_param_validator(mocker, config_parser_dict)
def test_no_jobs_all_status(self, capsys, boto3_stubber, test_datadir): empty_response = {"jobSummaryList": []} mocked_requests = [] for status in ALL_JOB_STATUS: mocked_requests.append( MockedBoto3Request( method="list_jobs", response=empty_response, expected_params={ "jobQueue": DEFAULT_AWSBATCHCLICONFIG_MOCK_CONFIG["job_queue"], "jobStatus": status, "nextToken": "", }, ) ) boto3_stubber("batch", mocked_requests) awsbstat.main(["-c", "cluster", "-s", "ALL"]) assert capsys.readouterr().out == read_text(test_datadir / "expected_output.txt")
def test_children_by_ids(self, args, expected, capsys, boto3_stubber, test_datadir, shared_datadir): boto3_stubber( "batch", MockedBoto3Request( method="describe_jobs", response=json.loads( read_text(shared_datadir / "aws_api_responses/batch_describe-jobs_children_jobs.json") ), expected_params={ "jobs": [ "3286a19c-68a9-47c9-8000-427d23ffc7ca:0", "ab2cd019-1d84-43c7-a016-9772dd963f3b", "6abf3ecd-07a8-4faa-8a65-79e7404eb50f#1", ] }, ), ) awsbstat.main(["-c", "cluster"] + args) assert capsys.readouterr().out == read_text(test_datadir / expected)
def test_ec2_volume_validator(mocker, boto3_stubber): describe_volumes_response = { "Volumes": [ { "AvailabilityZone": "us-east-1a", "Attachments": [ { "AttachTime": "2013-12-18T22:35:00.000Z", "InstanceId": "i-1234567890abcdef0", "VolumeId": "vol-12345678", "State": "attached", "DeleteOnTermination": True, "Device": "/dev/sda1", } ], "Encrypted": False, "VolumeType": "gp2", "VolumeId": "vol-049df61146c4d7901", "State": "available", # TODO add test with "in-use" "SnapshotId": "snap-1234567890abcdef0", "CreateTime": "2013-12-18T22:35:00.084Z", "Size": 8, } ] } mocked_requests = [ MockedBoto3Request( method="describe_volumes", response=describe_volumes_response, expected_params={"VolumeIds": ["vol-12345678"]}, ) ] boto3_stubber("ec2", mocked_requests) # TODO test with invalid key config_parser_dict = { "cluster default": {"ebs_settings": "default"}, "ebs default": {"shared_dir": "test", "ebs_volume_id": "vol-12345678"}, } utils.assert_param_validator(mocker, config_parser_dict)
def test_url_validator(mocker, boto3_stubber): head_object_response = { "AcceptRanges": "bytes", "ContentType": "text/html", "LastModified": "Thu, 16 Apr 2015 18:19:14 GMT", "ContentLength": 77, "VersionId": "null", "ETag": '"30a6ec7e1a9ad79c203d05a589c8b400"', "Metadata": {}, } mocked_requests = [ MockedBoto3Request( method="head_object", response=head_object_response, expected_params={"Bucket": "test", "Key": "test.json"} ) ] boto3_stubber("s3", mocked_requests) mocker.patch("pcluster.config.validators.urllib.request.urlopen") tests = [("s3://test/test.json", None), ("http://test/test.json", None)] for template_url, expected_message in tests: config_parser_dict = {"cluster default": {"template_url": template_url}} utils.assert_param_validator(mocker, config_parser_dict, expected_message)
def test_get_stack_template(boto3_stubber, template_body, error_message): """Verify that utils.get_stack_template behaves as expected.""" response = { "TemplateBody": json.dumps(template_body) } if template_body is not None else error_message mocked_requests = [ MockedBoto3Request( method="get_template", response=response, expected_params={"StackName": FAKE_STACK_NAME}, generate_error=template_body is None, ) ] boto3_stubber("cloudformation", mocked_requests) if error_message: with pytest.raises(SystemExit, match=error_message) as sysexit: utils.get_stack_template(stack_name=FAKE_STACK_NAME) assert_that(sysexit.value.code).is_not_equal_to(0) else: assert_that( utils.get_stack_template(stack_name=FAKE_STACK_NAME)).is_equal_to( response.get("TemplateBody"))
def test_get_default_instance(boto3_stubber, region, free_tier_instance_type, default_instance_type, stub_boto3): os.environ["AWS_DEFAULT_REGION"] = region if free_tier_instance_type: response = {"InstanceTypes": [{"InstanceType": free_tier_instance_type}]} else: response = {"InstanceTypes": []} if stub_boto3: mocked_requests = [ MockedBoto3Request( method="describe_instance_types", response=response, expected_params={ "Filters": [ {"Name": "free-tier-eligible", "Values": ["true"]}, {"Name": "current-generation", "Values": ["true"]}, ] }, ) ] boto3_stubber("ec2", mocked_requests) assert_that(get_default_instance_type()).is_equal_to(default_instance_type)
def test_get_latest_alinux_ami_id(mocker, boto3_stubber, path, boto3_response, expected_message): mocked_requests = [ MockedBoto3Request( method="get_parameters_by_path", response=boto3_response, expected_params={"Path": path}, generate_error=True if expected_message else False, ) ] boto3_stubber("ssm", mocked_requests) pcluster_config = get_mocked_pcluster_config(mocker) if expected_message: with pytest.raises(SystemExit, match=expected_message): _ = pcluster_config._PclusterConfig__get_latest_alinux_ami_id() else: latest_linux_ami_id = pcluster_config._PclusterConfig__get_latest_alinux_ami_id( ) assert_that(latest_linux_ami_id).is_equal_to( boto3_response.get("Parameters")[0].get("Value"))
def test_all_status(self, capsys, boto3_stubber, test_datadir, shared_datadir): mocked_requests = [] for status in ALL_JOB_STATUS: response = json.loads( read_text(shared_datadir / "aws_api_responses/batch_list-jobs_{0}.json".format(status)) ) mocked_requests.append( MockedBoto3Request( method="list_jobs", response=response, expected_params={ "jobQueue": DEFAULT_AWSBATCHCLICONFIG_MOCK_CONFIG["job_queue"], "jobStatus": status, "nextToken": "", }, ) ) boto3_stubber("batch", mocked_requests) awsbstat.main(["-c", "cluster", "-s", "ALL"]) assert capsys.readouterr().out == read_text(test_datadir / "expected_output.txt")
def test_get_stack_resources(boto3_stubber, resources, error_message): """Verify that utils.get_stack_resources behaves as expected.""" if error_message is None: response = {"StackResources": resources} else: response = "Unable to get {stack_name}'s resources: {error_message}".format( stack_name=FAKE_STACK_NAME, error_message=error_message) mocked_requests = [ MockedBoto3Request( method="describe_stack_resources", response=response, expected_params={"StackName": FAKE_STACK_NAME}, generate_error=error_message is not None, ) ] boto3_stubber("cloudformation", mocked_requests) if error_message is None: assert_that( utils.get_stack_resources(FAKE_STACK_NAME)).is_equal_to(resources) else: with pytest.raises(SystemExit, match=response) as sysexit: utils.get_stack_resources(FAKE_STACK_NAME) assert_that(sysexit.value.code).is_not_equal_to(0)
def test_ec2_security_group_validator(mocker, boto3_stubber): describe_security_groups_response = { "SecurityGroups": [ { "IpPermissionsEgress": [], "Description": "My security group", "IpPermissions": [ { "PrefixListIds": [], "FromPort": 22, "IpRanges": [{"CidrIp": "203.0.113.0/24"}], "ToPort": 22, "IpProtocol": "tcp", "UserIdGroupPairs": [], } ], "GroupName": "MySecurityGroup", "OwnerId": "123456789012", "GroupId": "sg-12345678", } ] } mocked_requests = [ MockedBoto3Request( method="describe_security_groups", response=describe_security_groups_response, expected_params={"GroupIds": ["sg-12345678"]}, ) ] boto3_stubber("ec2", mocked_requests) # TODO test with invalid key config_parser_dict = { "cluster default": {"vpc_settings": "default"}, "vpc default": {"vpc_security_group_id": "sg-12345678"}, } utils.assert_param_validator(mocker, config_parser_dict)
def _mock_boto3(boto3_stubber, expected_json_params, head_node_instance_type=None): """Mock the boto3 client based on the expected json configuration.""" expected_json_queue_settings = expected_json_params["cluster"].get("queue_settings", {}) mocked_requests = [] instance_types = [] # One describe_instance_type for the Head node if head_node_instance_type: instance_types.append(head_node_instance_type) # One describe_instance_type per compute resource for _, queue in expected_json_queue_settings.items(): for _, compute_resource in queue.get("compute_resource_settings", {}).items(): if compute_resource["instance_type"] not in instance_types: instance_types.append(compute_resource["instance_type"]) for instance_type in instance_types: mocked_requests.append( MockedBoto3Request( method="describe_instance_types", response=DESCRIBE_INSTANCE_TYPES_RESPONSES[instance_type], expected_params={"InstanceTypes": [instance_type]}, ) ) boto3_stubber("ec2", mocked_requests)
def test_ec2_subnet_id_validator(mocker, boto3_stubber): describe_subnets_response = { "Subnets": [ { "AvailabilityZone": "us-east-2c", "AvailabilityZoneId": "use2-az3", "AvailableIpAddressCount": 248, "CidrBlock": "10.0.1.0/24", "DefaultForAz": False, "MapPublicIpOnLaunch": False, "State": "available", "SubnetId": "subnet-12345678", "VpcId": "vpc-06e4ab6c6cEXAMPLE", "OwnerId": "111122223333", "AssignIpv6AddressOnCreation": False, "Ipv6CidrBlockAssociationSet": [], "Tags": [{"Key": "Name", "Value": "MySubnet"}], "SubnetArn": "arn:aws:ec2:us-east-2:111122223333:subnet/subnet-12345678", } ] } mocked_requests = [ MockedBoto3Request( method="describe_subnets", response=describe_subnets_response, expected_params={"SubnetIds": ["subnet-12345678"]}, ) ] boto3_stubber("ec2", mocked_requests) # TODO test with invalid key config_parser_dict = { "cluster default": {"vpc_settings": "default"}, "vpc default": {"master_subnet_id": "subnet-12345678"}, } utils.assert_param_validator(mocker, config_parser_dict)
def test_update_stack_template(mocker, boto3_stubber, error_message): """Verify that utils.update_stack_template behaves as expected.""" template_body = {"TemplateKey": "TemplateValue"} cfn_params = [{"ParameterKey": "Key", "ParameterValue": "Value"}] expected_params = { "StackName": FAKE_STACK_NAME, "TemplateBody": json.dumps(template_body, indent=2), "Parameters": cfn_params, "Capabilities": ["CAPABILITY_IAM"], } response = error_message or {"StackId": "stack ID"} mocked_requests = [ MockedBoto3Request( method="update_stack", response=response, expected_params=expected_params, generate_error=error_message is not None, ) ] boto3_stubber("cloudformation", mocked_requests) mocker.patch("pcluster.utils._wait_for_update") if error_message is None or "no updates are to be performed" in error_message.lower( ): utils.update_stack_template(FAKE_STACK_NAME, template_body, cfn_params) if error_message is None or "no updates are to be performed" not in error_message.lower( ): utils._wait_for_update.assert_called_with(FAKE_STACK_NAME) else: assert_that(utils._wait_for_update.called).is_false() else: full_error_message = "Unable to update stack template for stack {stack_name}: {emsg}".format( stack_name=FAKE_STACK_NAME, emsg=error_message) with pytest.raises(SystemExit, match=full_error_message) as sysexit: utils.update_stack_template(FAKE_STACK_NAME, template_body, cfn_params) assert_that(sysexit.value.code).is_not_equal_to(0)