def main(): argument_spec = dict( vpc_id=dict(), peer_vpc_id=dict(), peer_region=dict(), peering_id=dict(), peer_owner_id=dict(), tags=dict(required=False, type='dict'), purge_tags=dict(default=True, type='bool'), state=dict(default='present', choices=['present', 'absent', 'accept', 'reject']), wait=dict(default=False, type='bool'), ) required_if = [ ('state', 'present', ['vpc_id', 'peer_vpc_id']), ('state', 'accept', ['peering_id']), ('state', 'reject', ['peering_id']) ] module = AnsibleAWSModule(argument_spec=argument_spec, required_if=required_if) state = module.params.get('state') peering_id = module.params.get('peering_id') vpc_id = module.params.get('vpc_id') peer_vpc_id = module.params.get('peer_vpc_id') try: client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Failed to connect to AWS') if state == 'present': (changed, results) = create_peer_connection(client, module) elif state == 'absent': if not peering_id and (not vpc_id or not peer_vpc_id): module.fail_json(msg='state is absent but one of the following is missing: peering_id or [vpc_id, peer_vpc_id]') remove_peer_connection(client, module) else: (changed, results) = accept_reject(state, client, module) formatted_results = camel_dict_to_snake_dict(results) # Turn the resource tags from boto3 into an ansible friendly tag dictionary formatted_results['tags'] = boto3_tag_list_to_ansible_dict(formatted_results.get('tags', [])) module.exit_json(changed=changed, vpc_peering_connection=formatted_results, peering_id=results['VpcPeeringConnectionId'])
def main(): argument_spec = dict(instance_ids=dict(default=[], type='list', elements='str'), filters=dict(default={}, type='dict')) module = AnsibleAWSModule( argument_spec=argument_spec, mutually_exclusive=[['instance_ids', 'filters']], supports_check_mode=True, ) if module._name == 'ec2_instance_facts': module.deprecate( "The 'ec2_instance_facts' module has been renamed to 'ec2_instance_info'", date='2021-12-01', collection_name='community.aws') region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) if region: connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params) else: module.fail_json(msg="region must be specified") list_ec2_instances(connection, module)
def main(): argument_spec = dict( name=dict(required=True, type='str'), adjustment_type=dict(type='str', choices=['ChangeInCapacity', 'ExactCapacity', 'PercentChangeInCapacity']), asg_name=dict(required=True, type='str'), scaling_adjustment=dict(type='int'), min_adjustment_step=dict(type='int'), cooldown=dict(type='int'), state=dict(default='present', choices=['present', 'absent']), ) module = AnsibleAWSModule(argument_spec=argument_spec, check_boto3=False) if not HAS_BOTO: module.fail_json(msg='boto required for this module') region, ec2_url, aws_connect_params = get_aws_connection_info(module) state = module.params.get('state') try: connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params) except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: module.fail_json(msg=str(e)) if state == 'present': create_scaling_policy(connection, module) elif state == 'absent': delete_scaling_policy(connection, module)
def main(): event_types = [ 's3:ObjectCreated:*', 's3:ObjectCreated:Put', 's3:ObjectCreated:Post', 's3:ObjectCreated:Copy', 's3:ObjectCreated:CompleteMultipartUpload', 's3:ObjectRemoved:*', 's3:ObjectRemoved:Delete', 's3:ObjectRemoved:DeleteMarkerCreated', 's3:ObjectRestore:Post', 's3:ObjectRestore:Completed', 's3:ReducedRedundancyLostObject' ] argument_spec = dict( state=dict(default='present', choices=['present', 'absent']), event_name=dict(required=True), lambda_function_arn=dict(aliases=['function_arn']), bucket_name=dict(required=True), events=dict(type='list', default=[], choices=event_types, elements='str'), prefix=dict(default=''), suffix=dict(default=''), lambda_alias=dict(), lambda_version=dict(type='int', default=0), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[['lambda_alias', 'lambda_version']], required_if=[['state', 'present', ['events']]]) bucket = AmazonBucket(module.client('s3'), module.params['bucket_name']) current = bucket.current_config(module.params['event_name']) desired = Config.from_params(**module.params) notification_configuration = [cfg.raw for cfg in bucket.full_config()] state = module.params['state'] try: if (state == 'present' and current == desired) or (state == 'absent' and not current): changed = False elif module.check_mode: changed = True elif state == 'present': changed = True notification_configuration = bucket.apply_config(desired) elif state == 'absent': changed = True notification_configuration = bucket.delete_config(desired) except (ClientError, BotoCoreError) as e: module.fail_json(msg='{0}'.format(e)) module.exit_json(**dict(changed=changed, notification_configuration=[ camel_dict_to_snake_dict(cfg) for cfg in notification_configuration ]))
def main(): """ Main entry point. :return dict: ansible facts """ argument_spec = dict( state=dict(required=False, default='present', choices=['present', 'absent']), job_definition_name=dict(required=True), job_definition_arn=dict(), type=dict(required=True), parameters=dict(type='dict'), image=dict(required=True), vcpus=dict(type='int', required=True), memory=dict(type='int', required=True), command=dict(type='list', default=[]), job_role_arn=dict(), volumes=dict(type='list', default=[]), environment=dict(type='list', default=[]), mount_points=dict(type='list', default=[]), readonly_root_filesystem=dict(), privileged=dict(), ulimits=dict(type='list', default=[]), user=dict(), attempts=dict(type='int') ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True ) batch_client = module.client('batch') validate_params(module, batch_client) results = manage_state(module, batch_client) module.exit_json(**camel_dict_to_snake_dict(results))
def main(): """Produce a list of function suffixes which handle lambda events.""" source_choices = ["stream", "sqs"] argument_spec = dict( state=dict(required=False, default='present', choices=['present', 'absent']), lambda_function_arn=dict(required=True, aliases=['function_name', 'function_arn']), event_source=dict(required=False, default="stream", choices=source_choices), source_params=dict(type='dict', required=True), alias=dict(required=False, default=None), version=dict(type='int', required=False, default=0), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[['alias', 'version']], required_together=[], ) aws = AWSConnection(module, ['lambda']) validate_params(module, aws) if module.params['event_source'].lower() in ('stream', 'sqs'): results = lambda_event_stream(module, aws) else: module.fail_json(msg='Please select `stream` or `sqs` as the event type') module.exit_json(**results)
def main(): argument_spec = dict(instance_id={ "required": True, "type": "str" }, get_unused_target_groups={ "required": False, "default": True, "type": "bool" }) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, ) if module._name == 'elb_target_facts': module.deprecate( "The 'elb_target_facts' module has been renamed to 'elb_target_info'", date='2021-12-01', collection_name='community.aws') instance_id = module.params["instance_id"] get_unused_target_groups = module.params["get_unused_target_groups"] tg_gatherer = TargetInfoGatherer(module, instance_id, get_unused_target_groups) instance_target_groups = [each.to_dict() for each in tg_gatherer.tgs] module.exit_json(instance_target_groups=instance_target_groups)
def main(): argument_spec = dict(load_balancer_arns=dict(type='list', elements='str'), names=dict(type='list', elements='str')) module = AnsibleAWSModule( argument_spec=argument_spec, mutually_exclusive=[['load_balancer_arns', 'names']], supports_check_mode=True, ) if module._name == 'elb_application_lb_facts': module.deprecate( "The 'elb_application_lb_facts' module has been renamed to 'elb_application_lb_info'", date='2021-12-01', collection_name='community.aws') region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) if region: connection = boto3_conn(module, conn_type='client', resource='elbv2', region=region, endpoint=ec2_url, **aws_connect_params) else: module.fail_json(msg="region must be specified") list_load_balancers(connection, module)
def main(): argument_spec = dict( name=dict(required=True), registry_id=dict(required=False), state=dict(required=False, choices=['present', 'absent'], default='present'), force_set_policy=dict(required=False, type='bool', default=False), policy=dict(required=False, type='json'), image_tag_mutability=dict(required=False, choices=['mutable', 'immutable'], default='mutable'), purge_policy=dict(required=False, type='bool', aliases=['delete_policy'], deprecated_aliases=[dict(name='delete_policy', date='2022-06-01', collection_name='community.aws')]), lifecycle_policy=dict(required=False, type='json'), purge_lifecycle_policy=dict(required=False, type='bool'), scan_on_push=(dict(required=False, type='bool', default=False)) ) mutually_exclusive = [ ['policy', 'purge_policy'], ['lifecycle_policy', 'purge_lifecycle_policy']] module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=mutually_exclusive) ecr = EcsEcr(module) passed, result = run(ecr, module.params) if passed: module.exit_json(**result) else: module.fail_json(**result)
def main(): argument_spec = dict( state=dict(default='present', choices=['present', 'absent']), name=dict(), vpn_gateway_id=dict(), vpc_id=dict(), asn=dict(type='int'), wait_timeout=dict(type='int', default=320), type=dict(default='ipsec.1', choices=['ipsec.1']), tags=dict(default=None, required=False, type='dict', aliases=['resource_tags']), ) module = AnsibleAWSModule(argument_spec=argument_spec, required_if=[['state', 'present', ['name']]]) state = module.params.get('state').lower() try: region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) client = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs) except botocore.exceptions.NoCredentialsError as e: module.fail_json(msg="Can't authorize connection - %s" % to_native(e), exception=traceback.format_exc()) if state == 'present': (changed, results) = ensure_vgw_present(client, module) else: (changed, results) = ensure_vgw_absent(client, module) module.exit_json(changed=changed, vgw=results)
def main(): module_args = dict( name=dict(type="str", required=True), description=dict(type="str", default=""), state=dict(choices=["present", "absent"], default="present"), config=dict(type="dict", aliases=["configuration"], default={}), kafka_versions=dict(type="list", elements="str"), ) module = AnsibleAWSModule(argument_spec=module_args, supports_check_mode=True) client = module.client("kafka", retry_decorator=AWSRetry.jittered_backoff()) if module.params["state"] == "present": changed, response = create_config(client, module) elif module.params["state"] == "absent": changed, response = delete_config(client, module) # return some useless staff in check mode if configuration doesn't exists # can be useful when these options are referenced by other modules during check mode run if module.check_mode and not response.get("Arn"): arn = "arn:aws:kafka:region:account:configuration/name/id" revision = 1 server_properties = "" else: arn = response.get("Arn") revision = response.get("Revision") server_properties = response.get("ServerProperties", "") module.exit_json( changed=changed, arn=arn, revision=revision, server_properties=server_properties, response=camel_dict_to_snake_dict(response), )
def main(): """ Main entry point. :return dict: changed, batch_compute_environment_action, response """ argument_spec = dict( state=dict(default='present', choices=['present', 'absent']), compute_environment_name=dict(required=True), type=dict(required=True, choices=['MANAGED', 'UNMANAGED']), compute_environment_state=dict(required=False, default='ENABLED', choices=['ENABLED', 'DISABLED']), service_role=dict(required=True), compute_resource_type=dict(required=True, choices=['EC2', 'SPOT']), minv_cpus=dict(type='int', required=True), maxv_cpus=dict(type='int', required=True), desiredv_cpus=dict(type='int'), instance_types=dict(type='list', required=True), image_id=dict(), subnets=dict(type='list', required=True), security_group_ids=dict(type='list', required=True), ec2_key_pair=dict(), instance_role=dict(required=True), tags=dict(type='dict'), bid_percentage=dict(type='int'), spot_iam_fleet_role=dict(), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) client = module.client('batch') validate_params(module) results = manage_state(module, client) module.exit_json(**camel_dict_to_snake_dict(results, ignore_list=['Tags']))
def test_require_botocore_at_least(self, monkeypatch, stdin, desired_version, compare_version, at_least, capfd): monkeypatch.setattr(botocore, "__version__", compare_version) # Set boto3 version to a known value (tests are on both sides) to make # sure we're comparing the right library monkeypatch.setattr(boto3, "__version__", DUMMY_VERSION) # Create a minimal module that we can call module = AnsibleAWSModule(argument_spec=dict()) with pytest.raises(SystemExit) as e: module.require_botocore_at_least(desired_version) module.exit_json() out, err = capfd.readouterr() return_val = json.loads(out) assert return_val.get("exception") is None assert return_val.get("invocation") is not None if at_least: assert return_val.get("failed") is None else: assert return_val.get("failed") # The message is generated by Ansible, don't test for an exact # message assert desired_version in return_val.get("msg") assert "botocore" in return_val.get("msg") assert return_val.get("boto3_version") == DUMMY_VERSION assert return_val.get("botocore_version") == compare_version
def main(): argument_spec = dict(log_group_name=dict(), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) if module._name == 'cloudwatchlogs_log_group_facts': module.deprecate( "The 'cloudwatchlogs_log_group_facts' module has been renamed to 'cloudwatchlogs_log_group_info'", date='2021-12-01', collection_name='community.aws') region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) logs = boto3_conn(module, conn_type='client', resource='logs', region=region, endpoint=ec2_url, **aws_connect_kwargs) desc_log_group = describe_log_group( client=logs, log_group_name=module.params['log_group_name'], module=module) final_log_group_snake = [] for log_group in desc_log_group['logGroups']: final_log_group_snake.append(camel_dict_to_snake_dict(log_group)) desc_log_group_result = dict(changed=False, log_groups=final_log_group_snake) module.exit_json(**desc_log_group_result)
def main(): argument_spec = dict( name=dict(type='str', required=True), state=dict(type='str', required=True, choices=['present', 'absent']), suffix=dict(type='str', required=False, default='index.html'), error_key=dict(type='str', required=False), redirect_all_requests=dict(type='str', required=False), ) module = AnsibleAWSModule( argument_spec=argument_spec, mutually_exclusive=[['redirect_all_requests', 'suffix'], ['redirect_all_requests', 'error_key']], ) try: client_connection = module.client('s3') resource_connection = module.resource('s3') except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Failed to connect to AWS') state = module.params.get("state") if state == 'present': enable_or_update_bucket_as_website(client_connection, resource_connection, module) elif state == 'absent': disable_bucket_as_website(client_connection, module)
def main(): argument_spec = dict( filters=dict(type='dict'), pending_deletion=dict(type='bool', default=False), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) if module._name == 'aws_kms_facts': module.deprecate("The 'aws_kms_facts' module has been renamed to 'aws_kms_info'", date='2021-12-01', collection_name='community.aws') try: connection = module.client('kms') except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Failed to connect to AWS') all_keys = get_kms_info(connection, module) module.exit_json(keys=[key for key in all_keys if key_matches_filters(key, module.params['filters'])])
def main(): argument_spec = dict( name=dict(required=True, type='str'), password=dict(type='str', no_log=True), password_reset_required=dict(type='bool', default=False, no_log=False), update_password=dict(default='always', choices=['always', 'on_create'], no_log=False), remove_password=dict(type='bool', no_log=False), managed_policies=dict(default=[], type='list', aliases=['managed_policy'], elements='str'), state=dict(choices=['present', 'absent'], required=True), purge_policies=dict(default=False, type='bool', aliases=['purge_policy', 'purge_managed_policies']), tags=dict(type='dict'), purge_tags=dict(type='bool', default=True), wait=dict(type='bool', default=True), wait_timeout=dict(default=120, type='int'), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[['password', 'remove_password']], ) connection = module.client('iam') state = module.params.get("state") if state == 'present': create_or_update_user(connection, module) else: destroy_user(connection, module)
def main(): module_args = dict( name=dict(type='str', required=True), definition=dict(type='json'), role_arn=dict(type='str'), state=dict(choices=['present', 'absent'], default='present'), tags=dict(default=None, type='dict'), purge_tags=dict(default=True, type='bool'), ) module = AnsibleAWSModule( argument_spec=module_args, required_if=[('state', 'present', ['role_arn']), ('state', 'present', ['definition'])], supports_check_mode=True ) sfn_client = module.client('stepfunctions', retry_decorator=AWSRetry.jittered_backoff(retries=5)) state = module.params.get('state') try: manage_state_machine(state, sfn_client, module) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg='Failed to manage state machine')
def main(): argument_spec = dict( name=dict(required=True, type='str'), managed_policies=dict(default=[], type='list', aliases=['managed_policy']), state=dict(choices=['present', 'absent'], required=True), purge_policies=dict(default=False, type='bool', aliases=['purge_policy', 'purge_managed_policies']) ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True ) connection = module.client('iam') state = module.params.get("state") if state == 'present': create_or_update_user(connection, module) else: destroy_user(connection, module)
def main(): argument_spec = dict( name=dict(required=False), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) if module._name == 'elasticache_facts': module.deprecate("The 'elasticache_facts' module has been renamed to 'elasticache_info'", date='2021-12-01', collection_name='community.aws') client = module.client('elasticache') module.exit_json(elasticache_clusters=get_elasticache_clusters(client, module))
def setup_module_object(): argument_spec = ec2_argument_spec() argument_spec.update( dict( instance_id=dict(required=True), key_file=dict(required=False, default=None, type='path'), key_passphrase=dict(no_log=True, default=None, required=False), key_data=dict(no_log=True, default=None, required=False), wait=dict(type='bool', default=False, required=False), wait_timeout=dict(default=120, required=False, type='int'), )) module = AnsibleAWSModule(argument_spec=argument_spec) return module
def main(): argument_spec = dict( name=dict(required=False, default=[], type='list', elements='str'), sort=dict(required=False, default=None, choices=['launch_configuration_name', 'image_id', 'created_time', 'instance_type', 'kernel_id', 'ramdisk_id', 'key_name']), sort_order=dict(required=False, default='ascending', choices=['ascending', 'descending']), sort_start=dict(required=False, type='int'), sort_end=dict(required=False, type='int'), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, ) try: connection = module.client('autoscaling') except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Failed to connect to AWS') list_launch_configs(connection, module)
def setup_module_object(): argument_spec = dict( instance_id=dict(required=True), key_file=dict(required=False, default=None, type='path'), key_passphrase=dict(no_log=True, default=None, required=False), key_data=dict(no_log=True, default=None, required=False), wait=dict(type='bool', default=False, required=False), wait_timeout=dict(default=120, required=False, type='int'), ) mutually_exclusive = [['key_file', 'key_data']] module = AnsibleAWSModule(argument_spec=argument_spec, mutually_exclusive=mutually_exclusive) return module
def main(): argument_spec = dict( name=dict(type='str', required=True), state=dict(type='str', default='present', choices=['present', 'absent']), active=dict(type='bool'), force=dict(type='bool', default=False), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) state = module.params.get('state') # SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs. # Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but # the ansible build runs multiple instances of the test in parallel that's caused throttling # failures so apply a jittered backoff to call SES calls. client = module.client('ses', retry_decorator=AWSRetry.jittered_backoff()) if state == 'absent': remove_rule_set(client, module) else: create_or_update_rule_set(client, module)
def test_fail_botocore_minimal(self, monkeypatch, stdin, capfd): monkeypatch.setattr(botocore, "__version__", "1.2.3") monkeypatch.setattr(boto3, "__version__", "1.2.4") # Create a minimal module that we can call module = AnsibleAWSModule(argument_spec=dict()) try: raise botocore.exceptions.BotoCoreError() except botocore.exceptions.BotoCoreError as e: with pytest.raises(SystemExit) as ctx: module.fail_json_aws(e) assert ctx.value.code == 1 out, err = capfd.readouterr() return_val = json.loads(out) assert return_val.get("msg") == self.DEFAULT_CORE_MSG assert return_val.get("boto3_version") == "1.2.4" assert return_val.get("botocore_version") == "1.2.3" assert return_val.get("exception") is not None assert return_val.get("failed") assert "response_metadata" not in return_val assert "error" not in return_val
def main(): argument_spec = dict( name=dict(required=True), target_bucket=dict(required=False, default=None), target_prefix=dict(required=False, default=""), state=dict(required=False, default='present', choices=['present', 'absent']), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) connection = module.client('s3', retry_decorator=AWSRetry.jittered_backoff()) state = module.params.get("state") if state == 'present': enable_bucket_logging(connection, module) elif state == 'absent': disable_bucket_logging(connection, module)
def test_warn_boto3_and_botocore(self, monkeypatch, stdin, capfd): monkeypatch.setattr(botocore, "__version__", self.OLD_BOTOCORE) monkeypatch.setattr(boto3, "__version__", self.OLD_BOTO3) # Create a minimal module that we can call module = AnsibleAWSModule(argument_spec=dict()) with pytest.raises(SystemExit) as e: module.exit_json() out, err = capfd.readouterr() return_val = json.loads(out) pprint(out) pprint(err) pprint(return_val) assert return_val.get("exception") is None assert return_val.get("invocation") is not None assert return_val.get("failed") is None assert return_val.get("error") is None assert return_val.get("warnings") is not None warnings = return_val.get("warnings") assert len(warnings) == 2 warning_dict = dict() for warning in warnings: if 'boto3' in warning: warning_dict['boto3'] = warning if 'botocore' in warning: warning_dict['botocore'] = warning # Assert that we have a warning about the version but be # relaxed about the exact message assert warning_dict.get('boto3') is not None assert self.MINIMAL_BOTO3 in warning_dict.get('boto3') assert warning_dict.get('botocore') is not None assert self.MINIMAL_BOTOCORE in warning_dict.get('botocore')
def main(): argument_spec = dict( state=dict(type='str', default='present', choices=['present', 'absent']), filters=dict(type='dict', default={}), vpn_gateway_id=dict(type='str'), tags=dict(default={}, type='dict'), connection_type=dict(default='ipsec.1', type='str'), tunnel_options=dict(no_log=True, type='list', default=[], elements='dict'), static_only=dict(default=False, type='bool'), customer_gateway_id=dict(type='str'), vpn_connection_id=dict(type='str'), purge_tags=dict(type='bool', default=False), routes=dict(type='list', default=[], elements='str'), purge_routes=dict(type='bool', default=False), wait_timeout=dict(type='int', default=600), delay=dict(type='int', default=15), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) connection = module.client( 'ec2', retry_decorator=VPNRetry.jittered_backoff(retries=10)) state = module.params.get('state') parameters = dict(module.params) try: if state == 'present': changed, response = ensure_present(connection, parameters, module.check_mode) elif state == 'absent': changed, response = ensure_absent(connection, parameters, module.check_mode) except VPNConnectionException as e: if e.exception: module.fail_json_aws(e.exception, msg=e.msg) else: module.fail_json(msg=e.msg) module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
def main(): argument_spec = dict( vpc_id=dict(), vpc_endpoint_type=dict(default='Gateway', choices=['Interface', 'Gateway', 'GatewayLoadBalancer']), service=dict(), policy=dict(type='json'), policy_file=dict(type='path', aliases=['policy_path']), state=dict(default='present', choices=['present', 'absent']), wait=dict(type='bool', default=False), wait_timeout=dict(type='int', default=320, required=False), route_table_ids=dict(type='list', elements='str'), vpc_endpoint_id=dict(), client_token=dict(), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[['policy', 'policy_file']], required_if=[ ['state', 'present', ['vpc_id', 'service']], ['state', 'absent', ['vpc_endpoint_id']], ], ) # Validate Requirements state = module.params.get('state') if module.params.get('policy_file'): module.deprecate('The policy_file option has been deprecated and' ' will be removed after 2022-12-01', date='2022-12-01', collection_name='community.aws') try: ec2 = module.client('ec2') except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Failed to connect to AWS') # Ensure resource is present if state == 'present': (changed, results) = setup_creation(ec2, module) else: (changed, results) = setup_removal(ec2, module) module.exit_json(changed=changed, result=results)
def main(): argument_spec = (dict(availability_zone=dict(type='str'), catalog_id=dict(type='str'), connection_properties=dict(type='dict'), connection_type=dict(type='str', default='JDBC', choices=[ 'CUSTOM', 'JDBC', 'KAFKA', 'MARKETPLACE', 'MONGODB', 'NETWORK' ]), description=dict(type='str'), match_criteria=dict(type='list', elements='str'), name=dict(required=True, type='str'), security_groups=dict(type='list', elements='str'), state=dict(required=True, choices=['present', 'absent'], type='str'), subnet_id=dict(type='str'))) module = AnsibleAWSModule( argument_spec=argument_spec, required_if=[('state', 'present', ['connection_properties']), ('connection_type', 'NETWORK', ['availability_zone', 'security_groups', 'subnet_id'])], supports_check_mode=True) retry_decorator = AWSRetry.jittered_backoff(retries=10) connection_glue = module.client('glue', retry_decorator=retry_decorator) connection_ec2 = module.client('ec2', retry_decorator=retry_decorator) glue_connection = _get_glue_connection(connection_glue, module) if module.params.get("state") == 'present': create_or_update_glue_connection(connection_glue, connection_ec2, module, glue_connection) else: delete_glue_connection(connection_glue, module, glue_connection)