def get_account_id(module, region=None, endpoint=None, **aws_connect_kwargs): """return the account id we are currently working on get_account_id tries too find out the account that we are working on. It's not guaranteed that this will be easy so we try in several different ways. Giving either IAM or STS privilages to the account should be enough to permit this. """ account_id = None try: sts_client = boto3_conn(module, conn_type='client', resource='sts', region=region, endpoint=endpoint, **aws_connect_kwargs) account_id = sts_client.get_caller_identity().get('Account') except ClientError: try: iam_client = boto3_conn(module, conn_type='client', resource='iam', region=region, endpoint=endpoint, **aws_connect_kwargs) account_id = iam_client.get_user()['User']['Arn'].split(':')[4] except ClientError as e: if (e.response['Error']['Code'] == 'AccessDenied'): except_msg = to_native(e.message) account_id = except_msg.search(r"arn:aws:iam::([0-9]{12,32}):\w+/").group(1) if account_id is None: module.fail_json_aws(e, msg="getting account information") except Exception as e: module.fail_json_aws(e, msg="getting account information") return account_id
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( access_logs_enabled=dict(type='bool'), access_logs_s3_bucket=dict(type='str'), access_logs_s3_prefix=dict(type='str'), deletion_protection=dict(default=False, type='bool'), idle_timeout=dict(type='int'), listeners=dict(type='list'), name=dict(required=True, type='str'), purge_listeners=dict(default=True, type='bool'), purge_tags=dict(default=True, type='bool'), subnets=dict(type='list'), security_groups=dict(type='list'), scheme=dict(default='internet-facing', choices=['internet-facing', 'internal']), state=dict(choices=['present', 'absent'], type='str'), tags=dict(default={}, type='dict'), wait_timeout=dict(type='int'), wait=dict(type='bool') ) ) module = AnsibleModule(argument_spec=argument_spec, required_if=[ ('state', 'present', ['subnets', 'security_groups']) ], required_together=( ['access_logs_enabled', 'access_logs_s3_bucket', 'access_logs_s3_prefix'] ) ) # Quick check of listeners parameters listeners = module.params.get("listeners") if listeners is not None: for listener in listeners: for key in listener.keys(): if key not in ['Protocol', 'Port', 'SslPolicy', 'Certificates', 'DefaultActions', 'Rules']: module.fail_json(msg="listeners parameter contains invalid dict keys. Should be one of 'Protocol', " "'Port', 'SslPolicy', 'Certificates', 'DefaultActions', 'Rules'.") if not HAS_BOTO3: module.fail_json(msg='boto3 required for this module') region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) if region: connection = boto3_conn(module, conn_type='client', resource='elbv2', region=region, endpoint=ec2_url, **aws_connect_params) connection_ec2 = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params) else: module.fail_json(msg="region must be specified") state = module.params.get("state") if state == 'present': create_or_update_elb(connection, connection_ec2, module) else: delete_elb(connection, module)
def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( state=dict(default='present', choices=['present', 'absent']), name=dict(required=True, type='str'), hash_key_name=dict(type='str'), hash_key_type=dict(default='STRING', type='str', choices=['STRING', 'NUMBER', 'BINARY']), range_key_name=dict(type='str'), range_key_type=dict(default='STRING', type='str', choices=['STRING', 'NUMBER', 'BINARY']), read_capacity=dict(default=1, type='int'), write_capacity=dict(default=1, type='int'), indexes=dict(default=[], type='list'), tags=dict(type='dict'), wait_for_active_timeout=dict(default=60, type='int'), )) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True) if not HAS_BOTO: module.fail_json(msg='boto required for this module') if not HAS_BOTO3 and module.params.get('tags'): module.fail_json(msg='boto3 required when using tags for this module') region, ec2_url, aws_connect_params = get_aws_connection_info(module) if not region: module.fail_json(msg='region must be specified') try: connection = connect_to_aws(boto.dynamodb2, region, **aws_connect_params) except (NoAuthHandlerFound, AnsibleAWSError) as e: module.fail_json(msg=str(e)) if module.params.get('tags'): try: region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) boto3_dynamodb = boto3_conn(module, conn_type='client', resource='dynamodb', region=region, endpoint=ec2_url, **aws_connect_kwargs) if not hasattr(boto3_dynamodb, 'tag_resource'): module.fail_json(msg='boto3 connection does not have tag_resource(), likely due to using an old version') boto3_sts = boto3_conn(module, conn_type='client', resource='sts', region=region, endpoint=ec2_url, **aws_connect_kwargs) except botocore.exceptions.NoCredentialsError as e: module.fail_json(msg='cannot connect to AWS', exception=traceback.format_exc(e)) else: boto3_dynamodb = None boto3_sts = None state = module.params.get('state') if state == 'present': create_or_update_dynamo_table(connection, module, boto3_dynamodb, boto3_sts, region) elif state == 'absent': delete_dynamo_table(connection, module)
def __init__(self, module): region, ec2_url, aws_connect_kwargs = \ get_aws_connection_info(module, boto3=True) self.ecr = boto3_conn(module, conn_type='client', resource='ecr', region=region, endpoint=ec2_url, **aws_connect_kwargs) self.sts = boto3_conn(module, conn_type='client', resource='sts', region=region, endpoint=ec2_url, **aws_connect_kwargs) self.check_mode = module.check_mode self.changed = False self.skipped = False
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( identity=dict(required=True, type='str'), state=dict(default='present', choices=['present', 'absent']), bounce_notifications=dict(type='dict'), complaint_notifications=dict(type='dict'), delivery_notifications=dict(type='dict'), feedback_forwarding=dict(default=True, type='bool'), ) ) module = AnsibleModule( argument_spec=argument_spec, ) if not HAS_BOTO3: module.fail_json(msg='boto3 required for this module') for notification_type in ('bounce', 'complaint', 'delivery'): param_name = notification_type + '_notifications' arg_dict = module.params.get(param_name) if arg_dict: extra_keys = [x for x in arg_dict.keys() if x not in ('topic', 'include_headers')] if extra_keys: module.fail_json(msg='Unexpected keys ' + str(extra_keys) + ' in ' + param_name + ' valid keys are topic or include_headers') region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) # Allow up to 10 attempts to call the SES APIs before giving up (9 retries). # SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs. # Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but # the ansible build runs multiple instances of the test in parallel. # As a result there are build failures due to throttling that exceeds boto's default retries. # The back-off is exponential, so upping the retry attempts allows multiple parallel runs # to succeed. boto_core_config = Config(retries={'max_attempts': 9}) connection = boto3_conn(module, conn_type='client', resource='ses', region=region, endpoint=ec2_url, config=boto_core_config, **aws_connect_params) state = module.params.get("state") if state == 'present': sts = boto3_conn(module, conn_type='client', resource='sts', region=region, endpoint=ec2_url, **aws_connect_params) account_id = get_account_id(sts) create_or_update_identity(connection, module, region, account_id) else: destroy_identity(connection, module)
def main(): """ Get list of S3 buckets :return: """ # Ensure we have an empty dict result = {} # Including ec2 argument spec module = AnsibleModule(argument_spec=ec2_argument_spec(), supports_check_mode=True) # Verify Boto3 is used if not HAS_BOTO3: module.fail_json(msg='boto3 required for this module') # Set up connection region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=HAS_BOTO3) # Set up connection if region: try: connection = boto3_conn(module, conn_type='client', resource='s3', region=region, endpoint=ec2_url, **aws_connect_params) except (botocore.exceptions.NoCredentialsError, botocore.exceptions.ProfileNotFound) as e: module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) else: module.fail_json(msg="AWS region must be specified (like: us-east-1)") # Gather results result['buckets'] = get_bucket_list(module, connection) # Send exit module.exit_json(msg="Retrieved s3 facts.", ansible_facts=result)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( vpc_id=dict(), service=dict(), policy=dict(type='json'), policy_file=dict(type='path'), state=dict(default='present', choices=['present', 'absent']), wait=dict(type='bool', default=False), wait_timeout=dict(type='int', default=320, required=False), route_table_ids=dict(type='list'), vpc_endpoint_id=dict(), client_token=dict(), ) ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[['policy', 'policy_file']], required_if=[ ['state', 'present', ['vpc_id', 'service']], ['state', 'absent', ['vpc_endpoint_id']], ] ) # Validate Requirements if not HAS_BOTO3: module.fail_json(msg='botocore and boto3 are required for this module') state = module.params.get('state') try: region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) except NameError as e: # Getting around the get_aws_connection_info boto reliance for region if "global name 'boto' is not defined" in e.message: module.params['region'] = botocore.session.get_session().get_config_variable('region') if not module.params['region']: module.fail_json(msg="Error - no region provided") else: module.fail_json(msg="Can't retrieve connection information - " + str(e), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) try: region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) ec2 = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs) except botocore.exceptions.NoCredentialsError as e: module.fail_json(msg="Failed to connect to AWS due to wrong or missing credentials: %s" % str(e), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) # Ensure resource is present if state == 'present': (changed, results) = setup_creation(ec2, module) else: (changed, results) = setup_removal(ec2, module) module.exit_json(changed=changed, result=results)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( snapshot_ids=dict(default=[], type='list'), owner_ids=dict(default=[], type='list'), restorable_by_user_ids=dict(default=[], type='list'), filters=dict(default={}, type='dict') ) ) module = AnsibleModule(argument_spec=argument_spec, mutually_exclusive=[ ['snapshot_ids', 'owner_ids', 'restorable_by_user_ids', 'filters'] ] ) if not HAS_BOTO3: module.fail_json(msg='boto3 required for this module') region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) if region: connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params) else: module.fail_json(msg="region must be specified") list_ec2_snapshots(connection, module)
def core(module): region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) if not region: module.fail_json(msg='region must be specified') client = None try: client = boto3_conn(module, conn_type='client', resource='lightsail', region=region, endpoint=ec2_url, **aws_connect_kwargs) except (botocore.exceptions.ClientError, botocore.exceptions.ValidationError) as e: module.fail_json('Failed while connecting to the lightsail service: %s' % e, exception=traceback.format_exc()) changed = False state = module.params['state'] name = module.params['name'] if state == 'absent': changed, instance_dict = delete_instance(module, client, name) elif state in ('running', 'stopped'): changed, instance_dict = startstop_instance(module, client, name, state) elif state == 'restarted': changed, instance_dict = restart_instance(module, client, name) elif state == 'present': changed, instance_dict = create_instance(module, client, name) module.exit_json(changed=changed, instance=camel_dict_to_snake_dict(instance_dict))
def __init__(self, module): self.module = module region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) self.client = boto3_conn(module, conn_type='client', resource='cloudfront', region=region, endpoint=ec2_url, **aws_connect_kwargs)
def setup_client(module): region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) if region: connection = boto3_conn(module, conn_type='client', resource='lambda', region=region, endpoint=ec2_url, **aws_connect_params) else: module.fail_json(msg="region must be specified") return connection
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( filters=dict(default={}, type='dict'), nat_gateway_ids=dict(default=[], type='list'), ) ) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) # Validate Requirements if not HAS_BOTO3: module.fail_json(msg='botocore/boto3 is required.') try: region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) if region: connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params) else: module.fail_json(msg="region must be specified") except botocore.exceptions.NoCredentialsError as e: module.fail_json(msg=str(e)) results = get_nat_gateways(connection, module) module.exit_json(result=results)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( vpc_id=dict(), peer_vpc_id=dict(), peer_region=dict(), peering_id=dict(), peer_owner_id=dict(), tags=dict(required=False, type='dict'), profile=dict(), state=dict(default='present', choices=['present', 'absent', 'accept', 'reject']) ) ) module = AnsibleModule(argument_spec=argument_spec) if not HAS_BOTO3: module.fail_json(msg='json, botocore and boto3 are required.') state = module.params.get('state') try: region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) client = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs) except botocore.exceptions.NoCredentialsError as e: module.fail_json(msg="Can't authorize connection - " + str(e)) if state == 'present': (changed, results) = create_peer_connection(client, module) module.exit_json(changed=changed, peering_id=results) elif state == 'absent': remove_peer_connection(client, module) else: (changed, results) = accept_reject(state, client, module) module.exit_json(changed=changed, peering_id=results)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( role_arn=dict(required=True, default=None), role_session_name=dict(required=True, default=None), duration_seconds=dict(required=False, default=None, type='int'), external_id=dict(required=False, default=None), policy=dict(required=False, default=None), mfa_serial_number=dict(required=False, default=None), mfa_token=dict(required=False, default=None) ) ) module = AnsibleAWSModule(argument_spec=argument_spec) region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) if region: connection = boto3_conn(module, conn_type='client', resource='sts', region=region, endpoint=ec2_url, **aws_connect_kwargs) else: module.fail_json(msg="region must be specified") assume_role_policy(connection, module)
def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( source_region=dict(required=True), source_snapshot_id=dict(required=True), description=dict(default=''), encrypted=dict(type='bool', default=False, required=False), kms_key_id=dict(type='str', required=False), wait=dict(type='bool', default=False), tags=dict(type='dict'))) module = AnsibleModule(argument_spec=argument_spec) if not HAS_BOTO3: module.fail_json(msg='botocore and boto3 are required.') region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) if not region: module.fail_json(msg="Region must be provided.") try: client = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs) except (NoCredentialsError, ProfileNotFound) as e: module.fail_json(msg="Can't authorize connection - %s" % to_native(e)) copy_snapshot(module, client)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( name=dict(type='str', required=True), path=dict(type='str', default="/"), assume_role_policy_document=dict(type='json'), managed_policy=dict(type='list', aliases=['managed_policies']), state=dict(type='str', choices=['present', 'absent'], default='present'), description=dict(type='str'), create_instance_profile=dict(type='bool', default=True), purge_policies=dict(type='bool', default=True), ) ) module = AnsibleModule(argument_spec=argument_spec, required_if=[('state', 'present', ['assume_role_policy_document'])], supports_check_mode=True) if not HAS_BOTO3: module.fail_json(msg='boto3 required for this module') region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) connection = boto3_conn(module, conn_type='client', resource='iam', region=region, endpoint=ec2_url, **aws_connect_params) state = module.params.get("state") if state == 'present': create_or_update_role(connection, module) else: destroy_role(connection, module)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( name=dict(required=True), default_action=dict(choices=['block', 'allow', 'count']), metric_name=dict(), state=dict(default='present', choices=['present', 'absent']), rules=dict(type='list'), purge_rules=dict(type='bool', default=False) ), ) module = AnsibleAWSModule(argument_spec=argument_spec, required_if=[['state', 'present', ['default_action', 'rules']]]) state = module.params.get('state') region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) client = boto3_conn(module, conn_type='client', resource='waf', region=region, endpoint=ec2_url, **aws_connect_kwargs) if state == 'present': (changed, results) = ensure_web_acl_present(client, module) else: (changed, results) = ensure_web_acl_absent(client, module) module.exit_json(changed=changed, web_acl=camel_dict_to_snake_dict(results))
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( autoscaling_group_name=dict(required=True, type='str'), lifecycle_hook_name=dict(required=True, type='str'), transition=dict(type='str', choices=['autoscaling:EC2_INSTANCE_TERMINATING', 'autoscaling:EC2_INSTANCE_LAUNCHING']), role_arn=dict(type='str'), notification_target_arn=dict(type='str'), notification_meta_data=dict(type='str'), heartbeat_timeout=dict(type='int'), default_result=dict(default='ABANDON', choices=['ABANDON', 'CONTINUE']), state=dict(default='present', choices=['present', 'absent']) ) ) module = AnsibleAWSModule(argument_spec=argument_spec, required_if=[['state', 'present', ['transition']]]) state = module.params.get('state') region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) connection = boto3_conn(module, conn_type='client', resource='autoscaling', region=region, endpoint=ec2_url, **aws_connect_params) changed = False if state == 'present': changed = create_lifecycle_hook(connection, module) elif state == 'absent': changed = delete_lifecycle_hook(connection, module) module.exit_json(changed=changed)
def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( vpc_id=dict(), name=dict(), nacl_id=dict(), subnets=dict(required=False, type='list', default=list()), tags=dict(required=False, type='dict'), ingress=dict(required=False, type='list', default=list()), egress=dict(required=False, type='list', default=list()), state=dict(default='present', choices=['present', 'absent']), ), ) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_one_of=[['name', 'nacl_id']], required_if=[['state', 'present', ['vpc_id']]]) if not HAS_BOTO3: module.fail_json(msg='json, botocore and boto3 are required.') state = module.params.get('state').lower() try: region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) client = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs) except botocore.exceptions.NoCredentialsError as e: module.fail_json(msg="Can't authorize connection - %s" % str(e)) invocations = { "present": setup_network_acl, "absent": remove_network_acl } (changed, results) = invocations[state](client, module) module.exit_json(changed=changed, nacl_id=results)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( query=dict(choices=['services', 'endpoints'], required=True), filters=dict(default={}, type='dict'), vpc_endpoint_ids=dict(type='list'), ) ) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) # Validate Requirements if not HAS_BOTO3: module.fail_json(msg='botocore and boto3 are required.') try: region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) if region: connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params) else: module.fail_json(msg="region must be specified") except botocore.exceptions.NoCredentialsError as e: module.fail_json(msg=str(e)) invocations = { 'services': get_supported_services, 'endpoints': get_endpoints, } results = invocations[module.params.get('query')](connection, module) module.exit_json(**results)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( load_balancer_arn=dict(type='str'), target_group_arns=dict(type='list'), names=dict(type='list') ) ) module = AnsibleModule(argument_spec=argument_spec, mutually_exclusive=['load_balancer_arn', 'target_group_arns', 'names'], supports_check_mode=True ) if not HAS_BOTO3: module.fail_json(msg='boto3 required for this module') region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) if region: connection = boto3_conn(module, conn_type='client', resource='elbv2', region=region, endpoint=ec2_url, **aws_connect_params) else: module.fail_json(msg="region must be specified") list_target_groups(connection, module)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( filters = dict(default=None, type='dict') ) ) module = AnsibleModule(argument_spec=argument_spec) if not HAS_BOTO: module.fail_json(msg='boto required for this module') if HAS_BOTO3: region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) if region: connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params) else: module.fail_json(msg="region must be specified") list_ec2_snapshots_boto3(connection, module) else: region, ec2_url, aws_connect_params = get_aws_connection_info(module) if region: try: connection = connect_to_aws(boto.ec2, region, **aws_connect_params) except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: module.fail_json(msg=str(e)) else: module.fail_json(msg="region must be specified") list_eni(connection, module)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( name=dict(required=True, type='str'), managed_policy=dict(default=[], type='list'), state=dict(choices=['present', 'absent'], required=True), purge_policy=dict(default=False, type='bool') ) ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True ) if not HAS_BOTO3: module.fail_json(msg='boto3 required for this module') region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) connection = boto3_conn(module, conn_type='client', resource='iam', region=region, endpoint=ec2_url, **aws_connect_params) state = module.params.get("state") if state == 'present': create_or_update_user(connection, module) else: destroy_user(connection, module)
def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict(state=dict(default='present', choices=['present', 'absent']), name=dict(), amazon_asn=dict(), virtual_gateway_id=dict(), direct_connect_gateway_id=dict(), wait_timeout=dict(type='int', default=320))) required_if = [('state', 'present', ['name', 'amazon_asn']), ('state', 'absent', ['direct_connect_gateway_id'])] module = AnsibleModule(argument_spec=argument_spec, required_if=required_if) if not HAS_BOTO3: module.fail_json(msg='boto3 is required for this module') state = module.params.get('state') region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) client = boto3_conn(module, conn_type='client', resource='directconnect', region=region, endpoint=ec2_url, **aws_connect_kwargs) if state == 'present': (changed, results) = ensure_present(client, module) elif state == 'absent': changed = ensure_absent(client, module) results = {} module.exit_json(changed=changed, **camel_dict_to_snake_dict(results))
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( name=dict(required=True), key_material=dict(), force=dict(type='bool', default=True), state=dict(default='present', choices=['present', 'absent']), wait=dict(type='bool', default=False), wait_timeout=dict(default=300) ) ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) ec2_client = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params) name = module.params['name'] state = module.params.get('state') key_material = module.params.get('key_material') force = module.params.get('force') if state == 'absent': delete_key_pair(module, ec2_client, name) elif state == 'present': create_key_pair(module, ec2_client, name, key_material, force)
def main(): """ Main entry point. :return dict: ansible facts """ argument_spec = dict( function_name=dict(required=False, default=None, aliases=['function', 'name']), query=dict(required=False, choices=['aliases', 'all', 'config', 'mappings', 'policy', 'versions'], default='all'), event_source_arn=dict(required=False, default=None) ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[], required_together=[] ) # validate function_name if present function_name = module.params['function_name'] if function_name: if not re.search(r"^[\w\-:]+$", function_name): module.fail_json( msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name) ) if len(function_name) > 64: module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name)) try: region, endpoint, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) aws_connect_kwargs.update(dict(region=region, endpoint=endpoint, conn_type='client', resource='lambda' )) client = boto3_conn(module, **aws_connect_kwargs) except ClientError as e: module.fail_json_aws(e, "trying to set up boto connection") this_module = sys.modules[__name__] invocations = dict( aliases='alias_details', all='all_details', config='config_details', mappings='mapping_details', policy='policy_details', versions='version_details', ) this_module_function = getattr(this_module, invocations[module.params['query']]) all_facts = fix_return(this_module_function(client, module)) results = dict(ansible_facts={'lambda_facts': {'function': all_facts}}, changed=False) if module.check_mode: results['msg'] = 'Check mode set but ignored for fact gathering only.' module.exit_json(**results)
def main(): argument_spec = ec2_argument_spec() module = AnsibleModule(argument_spec=argument_spec) if not HAS_BOTO3: module.fail_json(msg='Python module "boto3" is missing, please install it') if not HAS_BOTOCORE: module.fail_json(msg='Python module "botocore" is missing, please install it') try: region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) client = None try: client = boto3_conn(module, conn_type='client', resource='lightsail', region=region, endpoint=ec2_url, **aws_connect_kwargs) except (botocore.exceptions.ClientError, botocore.exceptions.ValidationError) as e: module.fail_json(msg='Failed while connecting to the lightsail service: %s' % e, exception=traceback.format_exc()) response = client.get_regions( includeAvailabilityZones=False ) module.exit_json(changed=False, results=response) except (botocore.exceptions.ClientError, Exception) as e: module.fail_json(msg=str(e), exception=traceback.format_exc())
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( name=dict(required=False), ) ) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) if not HAS_BOTO3: module.fail_json(msg='boto3 and botocore are required.') try: region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) client = boto3_conn(module, conn_type='client', resource='waf', region=region, endpoint=ec2_url, **aws_connect_kwargs) except botocore.exceptions.NoCredentialsError as e: module.fail_json(msg="Can't authorize connection - " + str(e)) web_acls = list_web_acls(client, module) name = module.params['name'] if name: web_acls = [web_acl for web_acl in web_acls if web_acl['Name'] == name] if not web_acls: module.fail_json(msg="WAF named %s not found" % name) module.exit_json(wafs=[get_web_acl(client, module, web_acl['WebACLId']) for web_acl in web_acls])
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( name=dict(required=True, type='str'), rules=dict(type='list'), state=dict(type='str', choices=['present', 'absent'], required=True) ) ) module = AnsibleModule(argument_spec=argument_spec) if not HAS_BOTO3: module.fail_json(msg='boto3 is required.') region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) client = boto3_conn(module, conn_type='client', resource='s3', region=region, endpoint=ec2_url, **aws_connect_kwargs) state = module.params.get("state") if state == 'present': create_or_update_bucket_cors(client, module) elif state == 'absent': destroy_bucket_cors(client, module)
def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( source_region=dict(required=True), source_image_id=dict(required=True), name=dict(required=True), description=dict(default=''), encrypted=dict(type='bool', required=False), kms_key_id=dict(type='str', required=False), wait=dict(type='bool', default=False, required=False), tags=dict(type='dict'))) module = AnsibleModule(argument_spec=argument_spec) if not HAS_BOTO: module.fail_json(msg='boto required for this module') # TODO: Check botocore version region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) if HAS_BOTO3: try: ec2 = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params) except NoRegionError: module.fail_json(msg='AWS Region is required') else: module.fail_json(msg='boto3 required for this module') copy_image(ec2, module)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict(instance_id=dict(), image_id=dict(), architecture=dict(default='x86_64'), kernel_id=dict(), virtualization_type=dict(default='hvm'), root_device_name=dict(), delete_snapshot=dict(default=False, type='bool'), name=dict(), wait=dict(type='bool', default=False), wait_timeout=dict(default=900, type='int'), description=dict(default=''), no_reboot=dict(default=False, type='bool'), state=dict(default='present', choices=['present', 'absent']), device_mapping=dict(type='list'), tags=dict(type='dict'), launch_permissions=dict(type='dict'), image_location=dict(), enhanced_networking=dict(type='bool'), billing_products=dict(type='list'), ramdisk_id=dict(), sriov_net_support=dict(), purge_tags=dict(type='bool', default=False))) module = AnsibleAWSModule(argument_spec=argument_spec, required_if=[ ['state', 'absent', ['image_id']], ]) # Using a required_one_of=[['name', 'image_id']] overrides the message that should be provided by # the required_if for state=absent, so check manually instead if not any([module.params['image_id'], module.params['name']]): module.fail_json( msg="one of the following is required: name, image_id") try: region, ec2_url, aws_connect_kwargs = get_aws_connection_info( module, boto3=True) connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs) except botocore.exceptions.NoRegionError: module.fail_json(msg=( "Region must be specified as a parameter in AWS_DEFAULT_REGION environment variable or in boto configuration file." )) if module.params.get('state') == 'absent': deregister_image(module, connection) elif module.params.get('state') == 'present': if module.params.get('image_id'): update_image(module, connection, module.params.get('image_id')) if not module.params.get('instance_id') and not module.params.get( 'device_mapping'): module.fail_json( msg= "The parameters instance_id or device_mapping (register from EBS snapshot) are required for a new image." ) create_image(module, connection)
def find_asgs(conn, module, name=None, tags=None): """ Args: conn (boto3.AutoScaling.Client): Valid Boto3 ASG client. name (str): Optional name of the ASG you are looking for. tags (dict): Optional dictionary of tags and values to search for. Basic Usage: >>> name = 'public-webapp-production' >>> tags = { 'env': 'production' } >>> conn = boto3.client('autoscaling', region_name='us-west-2') >>> results = find_asgs(name, conn) Returns: List [ { "auto_scaling_group_arn": ( "arn:aws:autoscaling:us-west-2:275977225706:autoScalingGroup:58abc686-9783-4528-b338-3ad6f1cbbbaf:" "autoScalingGroupName/public-webapp-production" ), "auto_scaling_group_name": "public-webapp-production", "availability_zones": ["us-west-2c", "us-west-2b", "us-west-2a"], "created_time": "2016-02-02T23:28:42.481000+00:00", "default_cooldown": 300, "desired_capacity": 2, "enabled_metrics": [], "health_check_grace_period": 300, "health_check_type": "ELB", "instances": [ { "availability_zone": "us-west-2c", "health_status": "Healthy", "instance_id": "i-047a12cb", "launch_configuration_name": "public-webapp-production-1", "lifecycle_state": "InService", "protected_from_scale_in": false }, { "availability_zone": "us-west-2a", "health_status": "Healthy", "instance_id": "i-7a29df2c", "launch_configuration_name": "public-webapp-production-1", "lifecycle_state": "InService", "protected_from_scale_in": false } ], "launch_config_name": "public-webapp-production-1", "launch_configuration_name": "public-webapp-production-1", "load_balancer_names": ["public-webapp-production-lb"], "max_size": 4, "min_size": 2, "new_instances_protected_from_scale_in": false, "placement_group": None, "status": None, "suspended_processes": [], "tags": [ { "key": "Name", "propagate_at_launch": true, "resource_id": "public-webapp-production", "resource_type": "auto-scaling-group", "value": "public-webapp-production" }, { "key": "env", "propagate_at_launch": true, "resource_id": "public-webapp-production", "resource_type": "auto-scaling-group", "value": "production" } ], "target_group_names": [], "target_group_arns": [], "termination_policies": [ "Default" ], "vpc_zone_identifier": [ "subnet-a1b1c1d1", "subnet-a2b2c2d2", "subnet-a3b3c3d3" ] } ] """ try: asgs_paginator = conn.get_paginator('describe_auto_scaling_groups') asgs = asgs_paginator.paginate().build_full_result() except ClientError as e: module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response)) if not asgs: return asgs try: region, ec2_url, aws_connect_kwargs = get_aws_connection_info( module, boto3=True) elbv2 = boto3_conn(module, conn_type='client', resource='elbv2', region=region, endpoint=ec2_url, **aws_connect_kwargs) except ClientError as e: # This is nice to have, not essential elbv2 = None matched_asgs = [] if name is not None: # if the user didn't specify a name name_prog = re.compile(r'^' + name) for asg in asgs['AutoScalingGroups']: if name: matched_name = name_prog.search(asg['AutoScalingGroupName']) else: matched_name = True if tags: matched_tags = match_asg_tags(tags, asg) else: matched_tags = True if matched_name and matched_tags: asg = camel_dict_to_snake_dict(asg) # compatibility with ec2_asg module asg['launch_config_name'] = asg['launch_configuration_name'] # workaround for https://github.com/ansible/ansible/pull/25015 if 'target_group_ar_ns' in asg: asg['target_group_arns'] = asg['target_group_ar_ns'] del (asg['target_group_ar_ns']) if asg.get('target_group_arns'): if elbv2: try: tg_paginator = elbv2.get_paginator( 'describe_target_groups') tg_result = tg_paginator.paginate( TargetGroupArns=asg['target_group_arns'] ).build_full_result() asg['target_group_names'] = [ tg['TargetGroupName'] for tg in tg_result['TargetGroups'] ] except ClientError as e: if e.response['Error'][ 'Code'] == 'TargetGroupNotFound': asg['target_group_names'] = [] else: asg['target_group_names'] = [] matched_asgs.append(asg) return matched_asgs
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( group_family=dict(type='str', choices=['memcached1.4', 'memcached1.5', 'redis2.6', 'redis2.8', 'redis3.2', 'redis4.0', 'redis5.0']), name=dict(required=True, type='str'), description=dict(default='', type='str'), state=dict(required=True, choices=['present', 'absent', 'reset']), values=dict(type='dict'), ) ) module = AnsibleModule(argument_spec=argument_spec) if not HAS_BOTO3: module.fail_json(msg='boto required for this module') parameter_group_family = module.params.get('group_family') parameter_group_name = module.params.get('name') group_description = module.params.get('description') state = module.params.get('state') values = module.params.get('values') # Retrieve any AWS settings from the environment. region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) if not region: module.fail_json(msg="Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set.") connection = boto3_conn(module, conn_type='client', resource='elasticache', region=region, endpoint=ec2_url, **aws_connect_kwargs) exists = get_info(connection, parameter_group_name) # check that the needed requirements are available if state == 'present' and not (exists or parameter_group_family): module.fail_json(msg="Creating a group requires a family group.") elif state == 'reset' and not exists: module.fail_json(msg="No group %s to reset. Please create the group before using the state 'reset'." % parameter_group_name) # Taking action changed = False if state == 'present': if exists: # confirm that the group exists without any actions if not values: response = exists changed = False # modify existing group else: modifiable_params = make_current_modifiable_param_dict(module, connection, parameter_group_name) changed, values = check_valid_modification(module, values, modifiable_params) response = modify(module, connection, parameter_group_name, values) # create group else: response, changed = create(module, connection, parameter_group_name, parameter_group_family, group_description) if values: modifiable_params = make_current_modifiable_param_dict(module, connection, parameter_group_name) changed, values = check_valid_modification(module, values, modifiable_params) response = modify(module, connection, parameter_group_name, values) elif state == 'absent': if exists: # delete group response, changed = delete(module, connection, parameter_group_name) else: response = {} changed = False elif state == 'reset': response, changed = reset(module, connection, parameter_group_name, values) facts_result = dict(changed=changed, elasticache=camel_dict_to_snake_dict(response)) module.exit_json(**facts_result)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict(name=dict(), group_id=dict(), description=dict(), vpc_id=dict(), rules=dict(type='list'), rules_egress=dict(type='list'), state=dict(default='present', type='str', choices=['present', 'absent']), purge_rules=dict(default=True, required=False, type='bool'), purge_rules_egress=dict(default=True, required=False, type='bool'), tags=dict(required=False, type='dict', aliases=['resource_tags']), purge_tags=dict(default=True, required=False, type='bool'))) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, required_one_of=[['name', 'group_id']], required_if=[['state', 'present', ['name']]], ) if not HAS_BOTO3: module.fail_json(msg='boto3 required for this module') name = module.params['name'] group_id = module.params['group_id'] description = module.params['description'] vpc_id = module.params['vpc_id'] rules = deduplicate_rules_args( rules_expand_sources(rules_expand_ports(module.params['rules']))) rules_egress = deduplicate_rules_args( rules_expand_sources(rules_expand_ports( module.params['rules_egress']))) state = module.params.get('state') purge_rules = module.params['purge_rules'] purge_rules_egress = module.params['purge_rules_egress'] tags = module.params['tags'] purge_tags = module.params['purge_tags'] if state == 'present' and not description: module.fail_json(msg='Must provide description when state is present.') changed = False region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) client = boto3_conn(module, conn_type='client', resource='ec2', endpoint=ec2_url, region=region, **aws_connect_params) if not has_rule_description_attr(client): all_rules = rules if rules else [] + rules_egress if rules_egress else [] if any('rule_desc' in rule for rule in all_rules): module.fail_json( msg= "Using rule descriptions requires botocore version >= 1.7.2.") group = None groups = dict() security_groups = [] # do get all security groups # find if the group is present try: response = get_security_groups_with_backoff(client) security_groups = response.get('SecurityGroups', []) except botocore.exceptions.NoCredentialsError as e: module.fail_json(msg="Error in describe_security_groups: %s" % "Unable to locate credentials", exception=traceback.format_exc()) except botocore.exceptions.ClientError as e: module.fail_json(msg="Error in describe_security_groups: %s" % e, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) for sg in security_groups: groups[sg['GroupId']] = sg groupName = sg['GroupName'] if groupName in groups: # Prioritise groups from the current VPC # even if current VPC is EC2-Classic if groups[groupName].get('VpcId') == vpc_id: # Group saved already matches current VPC, change nothing pass elif vpc_id is None and groups[groupName].get('VpcId') is None: # We're in EC2 classic, and the group already saved is as well # No VPC groups can be used alongside EC2 classic groups pass else: # the current SG stored has no direct match, so we can replace it groups[groupName] = sg else: groups[groupName] = sg if group_id and sg['GroupId'] == group_id: group = sg elif groupName == name and (vpc_id is None or sg.get('VpcId') == vpc_id): group = sg # Ensure requested group is absent if state == 'absent': if group: # found a match, delete it try: if not module.check_mode: client.delete_security_group(GroupId=group['GroupId']) except botocore.exceptions.ClientError as e: module.fail_json( msg="Unable to delete security group '%s' - %s" % (group, e), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) else: group = None changed = True else: # no match found, no changes required pass # Ensure requested group is present elif state == 'present': if group: # existing group if group['Description'] != description: module.warn( "Group description does not match existing group. Descriptions cannot be changed without deleting " "and re-creating the security group. Try using state=absent to delete, then rerunning this task." ) # if the group doesn't exist, create it now else: # no match found, create it if not module.check_mode: params = dict(GroupName=name, Description=description) if vpc_id: params['VpcId'] = vpc_id group = client.create_security_group(**params) # When a group is created, an egress_rule ALLOW ALL # to 0.0.0.0/0 is added automatically but it's not # reflected in the object returned by the AWS API # call. We re-read the group for getting an updated object # amazon sometimes takes a couple seconds to update the security group so wait till it exists while True: group = get_security_groups_with_backoff( client, GroupIds=[group['GroupId']])['SecurityGroups'][0] if group.get( 'VpcId') and not group.get('IpPermissionsEgress'): pass else: break changed = True if tags is not None: current_tags = boto3_tag_list_to_ansible_dict(group.get( 'Tags', [])) tags_need_modify, tags_to_delete = compare_aws_tags( current_tags, tags, purge_tags) if tags_to_delete: try: client.delete_tags(Resources=[group['GroupId']], Tags=[{ 'Key': tag } for tag in tags_to_delete]) except botocore.exceptions.ClientError as e: module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) changed = True # Add/update tags if tags_need_modify: try: client.create_tags( Resources=[group['GroupId']], Tags=ansible_dict_to_boto3_tag_list(tags_need_modify)) except botocore.exceptions.ClientError as e: module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) changed = True else: module.fail_json(msg="Unsupported state requested: %s" % state) # create a lookup for all existing rules on the group ip_permission = [] if group: # Manage ingress rules groupRules = {} add_rules_to_lookup(group['IpPermissions'], group['GroupId'], 'in', groupRules) # Now, go through all provided rules and ensure they are there. if rules is not None: for rule in rules: validate_rule(module, rule) group_id, ip, ipv6, target_group_created = get_target_from_rule( module, client, rule, name, group, groups, vpc_id) if target_group_created: changed = True if rule['proto'] in ('all', '-1', -1): rule['proto'] = -1 rule['from_port'] = None rule['to_port'] = None if group_id: rule_id = make_rule_key('in', rule, group['GroupId'], group_id) if rule_id in groupRules: changed = check_rule_desc_update_for_group_grant( client, module, rule, group, groupRules, rule_id, group_id, rule_type='in', changed=changed) del groupRules[rule_id] else: if not module.check_mode: ip_permission = serialize_group_grant( group_id, rule) if ip_permission: ips = ip_permission if vpc_id: [ useridpair.update({'VpcId': vpc_id}) for useridpair in ip_permission.get( 'UserIdGroupPairs', []) ] try: client.authorize_security_group_ingress( GroupId=group['GroupId'], IpPermissions=[ips]) except botocore.exceptions.ClientError as e: module.fail_json( msg= "Unable to authorize ingress for group %s security group '%s' - %s" % (group_id, group['GroupName'], e), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) changed = True elif ip: # Convert ip to list we can iterate over if ip and not isinstance(ip, list): ip = [ip] changed, ip_permission = authorize_ip( "in", changed, client, group, groupRules, ip, ip_permission, module, rule, "ipv4") elif ipv6: # Convert ip to list we can iterate over if not isinstance(ipv6, list): ipv6 = [ipv6] # If rule already exists, don't later delete it changed, ip_permission = authorize_ip( "in", changed, client, group, groupRules, ipv6, ip_permission, module, rule, "ipv6") # Finally, remove anything left in the groupRules -- these will be defunct rules if purge_rules: for (rule, grant) in groupRules.values(): ip_permission = serialize_revoke(grant, rule) if not module.check_mode: try: client.revoke_security_group_ingress( GroupId=group['GroupId'], IpPermissions=[ip_permission]) except botocore.exceptions.ClientError as e: module.fail_json( msg= "Unable to revoke ingress for security group '%s' - %s" % (group['GroupName'], e), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) changed = True # Manage egress rules groupRules = {} add_rules_to_lookup(group['IpPermissionsEgress'], group['GroupId'], 'out', groupRules) # Now, go through all provided rules and ensure they are there. if rules_egress is not None: for rule in rules_egress: validate_rule(module, rule) group_id, ip, ipv6, target_group_created = get_target_from_rule( module, client, rule, name, group, groups, vpc_id) if target_group_created: changed = True if rule['proto'] in ('all', '-1', -1): rule['proto'] = -1 rule['from_port'] = None rule['to_port'] = None if group_id: rule_id = make_rule_key('out', rule, group['GroupId'], group_id) if rule_id in groupRules: changed = check_rule_desc_update_for_group_grant( client, module, rule, group, groupRules, rule_id, group_id, rule_type='out', changed=changed) del groupRules[rule_id] else: if not module.check_mode: ip_permission = serialize_group_grant( group_id, rule) if ip_permission: ips = ip_permission if vpc_id: [ useridpair.update({'VpcId': vpc_id}) for useridpair in ip_permission.get( 'UserIdGroupPairs', []) ] try: client.authorize_security_group_egress( GroupId=group['GroupId'], IpPermissions=[ips]) except botocore.exceptions.ClientError as e: module.fail_json( msg= "Unable to authorize egress for group %s security group '%s' - %s" % (group_id, group['GroupName'], e), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) changed = True elif ip: # Convert ip to list we can iterate over if not isinstance(ip, list): ip = [ip] changed, ip_permission = authorize_ip( "out", changed, client, group, groupRules, ip, ip_permission, module, rule, "ipv4") elif ipv6: # Convert ip to list we can iterate over if not isinstance(ipv6, list): ipv6 = [ipv6] # If rule already exists, don't later delete it changed, ip_permission = authorize_ip( "out", changed, client, group, groupRules, ipv6, ip_permission, module, rule, "ipv6") elif vpc_id is not None: # when no egress rules are specified and we're in a VPC, # we add in a default allow all out rule, which was the # default behavior before egress rules were added default_egress_rule = 'out--1-None-None-' + group[ 'GroupId'] + '-0.0.0.0/0' if default_egress_rule not in groupRules: if not module.check_mode: ip_permission = [{ 'IpProtocol': '-1', 'IpRanges': [{ 'CidrIp': '0.0.0.0/0' }] }] try: client.authorize_security_group_egress( GroupId=group['GroupId'], IpPermissions=ip_permission) except botocore.exceptions.ClientError as e: module.fail_json( msg= "Unable to authorize egress for ip %s security group '%s' - %s" % ('0.0.0.0/0', group['GroupName'], e), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) changed = True else: # make sure the default egress rule is not removed del groupRules[default_egress_rule] # Finally, remove anything left in the groupRules -- these will be defunct rules if purge_rules_egress and vpc_id is not None: for (rule, grant) in groupRules.values(): # we shouldn't be revoking 0.0.0.0 egress if grant != '0.0.0.0/0': ip_permission = serialize_revoke(grant, rule) if not module.check_mode: try: client.revoke_security_group_egress( GroupId=group['GroupId'], IpPermissions=[ip_permission]) except botocore.exceptions.ClientError as e: module.fail_json( msg= "Unable to revoke egress for ip %s security group '%s' - %s" % (grant, group['GroupName'], e), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) changed = True if group: security_group = get_security_groups_with_backoff( client, GroupIds=[group['GroupId']])['SecurityGroups'][0] security_group = camel_dict_to_snake_dict(security_group) security_group['tags'] = boto3_tag_list_to_ansible_dict( security_group.get('tags', []), tag_name_key_name='key', tag_value_key_name='value') module.exit_json(changed=changed, **security_group) else: module.exit_json(changed=changed, group_id=None)
def resource(self, service): region, ec2_url, aws_connect_kwargs = get_aws_connection_info(self, boto3=True) return boto3_conn(self, conn_type='resource', resource=service, region=region, endpoint=ec2_url, **aws_connect_kwargs)
def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( stack_name=dict(required=True), template_parameters=dict(required=False, type='dict', default={}), state=dict(default='present', choices=['present', 'absent']), template=dict(default=None, required=False, type='path'), notification_arns=dict(default=None, required=False), stack_policy=dict(default=None, required=False), disable_rollback=dict(default=False, type='bool'), on_create_failure=dict(default=None, required=False, choices=['DO_NOTHING', 'ROLLBACK', 'DELETE']), create_timeout=dict(default=None, type='int'), template_url=dict(default=None, required=False), template_body=dict(default=None, required=False), template_format=dict(removed_in_version='2.14'), create_changeset=dict(default=False, type='bool'), changeset_name=dict(default=None, required=False), role_arn=dict(default=None, required=False), tags=dict(default=None, type='dict'), termination_protection=dict(default=None, type='bool'), events_limit=dict(default=200, type='int'), backoff_retries=dict(type='int', default=10, required=False), backoff_delay=dict(type='int', default=3, required=False), backoff_max_delay=dict(type='int', default=30, required=False), capabilities=dict(type='list', default=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM']) ) ) module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive=[['template_url', 'template', 'template_body']], supports_check_mode=True ) if not HAS_BOTO3: module.fail_json(msg='boto3 and botocore are required for this module') invalid_capabilities = [] user_capabilities = module.params.get('capabilities') for user_cap in user_capabilities: if user_cap not in ['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM', 'CAPABILITY_AUTO_EXPAND']: invalid_capabilities.append(user_cap) if invalid_capabilities: module.fail_json(msg="Specified capabilities are invalid : %r," " please check documentation for valid capabilities" % invalid_capabilities) # collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around. stack_params = { 'Capabilities': user_capabilities, 'ClientRequestToken': to_native(uuid.uuid4()), } state = module.params['state'] stack_params['StackName'] = module.params['stack_name'] if module.params['template'] is not None: with open(module.params['template'], 'r') as template_fh: stack_params['TemplateBody'] = template_fh.read() elif module.params['template_body'] is not None: stack_params['TemplateBody'] = module.params['template_body'] elif module.params['template_url'] is not None: stack_params['TemplateURL'] = module.params['template_url'] if module.params.get('notification_arns'): stack_params['NotificationARNs'] = module.params['notification_arns'].split(',') else: stack_params['NotificationARNs'] = [] # can't check the policy when verifying. if module.params['stack_policy'] is not None and not module.check_mode and not module.params['create_changeset']: with open(module.params['stack_policy'], 'r') as stack_policy_fh: stack_params['StackPolicyBody'] = stack_policy_fh.read() template_parameters = module.params['template_parameters'] stack_params['Parameters'] = [] for k, v in template_parameters.items(): if isinstance(v, dict): # set parameter based on a dict to allow additional CFN Parameter Attributes param = dict(ParameterKey=k) if 'value' in v: param['ParameterValue'] = str(v['value']) if 'use_previous_value' in v and bool(v['use_previous_value']): param['UsePreviousValue'] = True param.pop('ParameterValue', None) stack_params['Parameters'].append(param) else: # allow default k/v configuration to set a template parameter stack_params['Parameters'].append({'ParameterKey': k, 'ParameterValue': str(v)}) if isinstance(module.params.get('tags'), dict): stack_params['Tags'] = ansible_dict_to_boto3_tag_list(module.params['tags']) if module.params.get('role_arn'): stack_params['RoleARN'] = module.params['role_arn'] result = {} try: region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) cfn = boto3_conn(module, conn_type='client', resource='cloudformation', region=region, endpoint=ec2_url, **aws_connect_kwargs) except botocore.exceptions.NoCredentialsError as e: module.fail_json(msg=boto_exception(e)) # Wrap the cloudformation client methods that this module uses with # automatic backoff / retry for throttling error codes backoff_wrapper = AWSRetry.jittered_backoff( retries=module.params.get('backoff_retries'), delay=module.params.get('backoff_delay'), max_delay=module.params.get('backoff_max_delay') ) cfn.describe_stack_events = backoff_wrapper(cfn.describe_stack_events) cfn.create_stack = backoff_wrapper(cfn.create_stack) cfn.list_change_sets = backoff_wrapper(cfn.list_change_sets) cfn.create_change_set = backoff_wrapper(cfn.create_change_set) cfn.update_stack = backoff_wrapper(cfn.update_stack) cfn.describe_stacks = backoff_wrapper(cfn.describe_stacks) cfn.list_stack_resources = backoff_wrapper(cfn.list_stack_resources) cfn.delete_stack = backoff_wrapper(cfn.delete_stack) if boto_supports_termination_protection(cfn): cfn.update_termination_protection = backoff_wrapper(cfn.update_termination_protection) stack_info = get_stack_facts(cfn, stack_params['StackName']) if module.check_mode: if state == 'absent' and stack_info: module.exit_json(changed=True, msg='Stack would be deleted', meta=[]) elif state == 'absent' and not stack_info: module.exit_json(changed=False, msg='Stack doesn\'t exist', meta=[]) elif state == 'present' and not stack_info: module.exit_json(changed=True, msg='New stack would be created', meta=[]) else: module.exit_json(**check_mode_changeset(module, stack_params, cfn)) if state == 'present': if not stack_info: result = create_stack(module, stack_params, cfn, module.params.get('events_limit')) elif module.params.get('create_changeset'): result = create_changeset(module, stack_params, cfn, module.params.get('events_limit')) else: if module.params.get('termination_protection') is not None: update_termination_protection(module, cfn, stack_params['StackName'], bool(module.params.get('termination_protection'))) result = update_stack(module, stack_params, cfn, module.params.get('events_limit')) # format the stack output stack = get_stack_facts(cfn, stack_params['StackName']) if stack is not None: if result.get('stack_outputs') is None: # always define stack_outputs, but it may be empty result['stack_outputs'] = {} for output in stack.get('Outputs', []): result['stack_outputs'][output['OutputKey']] = output['OutputValue'] stack_resources = [] reslist = cfn.list_stack_resources(StackName=stack_params['StackName']) for res in reslist.get('StackResourceSummaries', []): stack_resources.append({ "logical_resource_id": res['LogicalResourceId'], "physical_resource_id": res.get('PhysicalResourceId', ''), "resource_type": res['ResourceType'], "last_updated_time": res['LastUpdatedTimestamp'], "status": res['ResourceStatus'], "status_reason": res.get('ResourceStatusReason') # can be blank, apparently }) result['stack_resources'] = stack_resources elif state == 'absent': # absent state is different because of the way delete_stack works. # problem is it it doesn't give an error if stack isn't found # so must describe the stack first try: stack = get_stack_facts(cfn, stack_params['StackName']) if not stack: result = {'changed': False, 'output': 'Stack not found.'} else: if stack_params.get('RoleARN') is None: cfn.delete_stack(StackName=stack_params['StackName']) else: cfn.delete_stack(StackName=stack_params['StackName'], RoleARN=stack_params['RoleARN']) result = stack_operation(cfn, stack_params['StackName'], 'DELETE', module.params.get('events_limit'), stack_params.get('ClientRequestToken', None)) except Exception as err: module.fail_json(msg=boto_exception(err), exception=traceback.format_exc()) module.exit_json(**result)
def setup_client(module): region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) if not region: module.fail_json(msg="region must be specified") return boto3_conn(module, 'client', 'rds', region, **aws_connect_params)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( state=dict(default='present', choices=['present', 'absent', 'enabled', 'disabled']), name=dict(default='default'), enable_logging=dict(default=True, type='bool'), s3_bucket_name=dict(), s3_key_prefix=dict(), sns_topic_name=dict(), is_multi_region_trail=dict(default=False, type='bool'), enable_log_file_validation=dict( type='bool', aliases=['log_file_validation_enabled']), include_global_events=dict( default=True, type='bool', aliases=['include_global_service_events']), cloudwatch_logs_role_arn=dict(), cloudwatch_logs_log_group_arn=dict(), kms_key_id=dict(), tags=dict(default={}, type='dict'), )) required_if = [('state', 'present', ['s3_bucket_name']), ('state', 'enabled', ['s3_bucket_name'])] required_together = [('cloudwatch_logs_role_arn', 'cloudwatch_logs_log_group_arn')] module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together, required_if=required_if) if not HAS_BOTO3: module.fail_json(msg='boto3 is required for this module') # collect parameters if module.params['state'] in ('present', 'enabled'): state = 'present' elif module.params['state'] in ('absent', 'disabled'): state = 'absent' tags = module.params['tags'] enable_logging = module.params['enable_logging'] ct_params = dict( Name=module.params['name'], S3BucketName=module.params['s3_bucket_name'], IncludeGlobalServiceEvents=module.params['include_global_events'], IsMultiRegionTrail=module.params['is_multi_region_trail'], ) if module.params['s3_key_prefix']: ct_params['S3KeyPrefix'] = module.params['s3_key_prefix'].rstrip('/') if module.params['sns_topic_name']: ct_params['SnsTopicName'] = module.params['sns_topic_name'] if module.params['cloudwatch_logs_role_arn']: ct_params['CloudWatchLogsRoleArn'] = module.params[ 'cloudwatch_logs_role_arn'] if module.params['cloudwatch_logs_log_group_arn']: ct_params['CloudWatchLogsLogGroupArn'] = module.params[ 'cloudwatch_logs_log_group_arn'] if module.params['enable_log_file_validation'] is not None: ct_params['EnableLogFileValidation'] = module.params[ 'enable_log_file_validation'] if module.params['kms_key_id']: ct_params['KmsKeyId'] = module.params['kms_key_id'] try: region, ec2_url, aws_connect_params = get_aws_connection_info( module, boto3=True) client = boto3_conn(module, conn_type='client', resource='cloudtrail', region=region, endpoint=ec2_url, **aws_connect_params) except ClientError as err: module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response)) results = dict(changed=False, exists=False) # Get existing trail facts trail = get_trail_facts(module, client, ct_params['Name']) # If the trail exists set the result exists variable if trail is not None: results['exists'] = True if state == 'absent' and results['exists']: # If Trail exists go ahead and delete results['changed'] = True results['exists'] = False results['trail'] = dict() if not module.check_mode: delete_trail(module, client, trail['TrailARN']) elif state == 'present' and results['exists']: # If Trail exists see if we need to update it do_update = False for key in ct_params: tkey = str(key) # boto3 has inconsistent parameter naming so we handle it here if key == 'EnableLogFileValidation': tkey = 'LogFileValidationEnabled' # We need to make an empty string equal None if ct_params.get(key) == '': val = None else: val = ct_params.get(key) if val != trail.get(tkey): do_update = True results['changed'] = True # If we are in check mode copy the changed values to the trail facts in result output to show what would change. if module.check_mode: trail.update({tkey: ct_params.get(key)}) if not module.check_mode and do_update: update_trail(module, client, ct_params) trail = get_trail_facts(module, client, ct_params['Name']) # Check if we need to start/stop logging if enable_logging and not trail['IsLogging']: results['changed'] = True trail['IsLogging'] = True if not module.check_mode: set_logging(module, client, name=ct_params['Name'], action='start') if not enable_logging and trail['IsLogging']: results['changed'] = True trail['IsLogging'] = False if not module.check_mode: set_logging(module, client, name=ct_params['Name'], action='stop') # Check if we need to update tags on resource tag_dry_run = False if module.check_mode: tag_dry_run = True tags_changed = tag_trail(module, client, tags=tags, trail_arn=trail['TrailARN'], curr_tags=trail['tags'], dry_run=tag_dry_run) if tags_changed: results['changed'] = True trail['tags'] = tags # Populate trail facts in output results['trail'] = camel_dict_to_snake_dict(trail) elif state == 'present' and not results['exists']: # Trail doesn't exist just go create it results['changed'] = True if not module.check_mode: # If we aren't in check_mode then actually create it created_trail = create_trail(module, client, ct_params) # Apply tags tag_trail(module, client, tags=tags, trail_arn=created_trail['TrailARN']) # Get the trail status try: status_resp = client.get_trail_status( Name=created_trail['Name']) except ClientError as err: module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response)) # Set the logging state for the trail to desired value if enable_logging and not status_resp['IsLogging']: set_logging(module, client, name=ct_params['Name'], action='start') if not enable_logging and status_resp['IsLogging']: set_logging(module, client, name=ct_params['Name'], action='stop') # Get facts for newly created Trail trail = get_trail_facts(module, client, ct_params['Name']) # If we are in check mode create a fake return structure for the newly minted trail if module.check_mode: acct_id = '123456789012' try: sts_client = boto3_conn(module, conn_type='client', resource='sts', region=region, endpoint=ec2_url, **aws_connect_params) acct_id = sts_client.get_caller_identity()['Account'] except ClientError: pass trail = dict() trail.update(ct_params) if 'EnableLogFileValidation' not in ct_params: ct_params['EnableLogFileValidation'] = False trail['EnableLogFileValidation'] = ct_params[ 'EnableLogFileValidation'] trail.pop('EnableLogFileValidation') fake_arn = 'arn:aws:cloudtrail:' + region + ':' + acct_id + ':trail/' + ct_params[ 'Name'] trail['HasCustomEventSelectors'] = False trail['HomeRegion'] = region trail['TrailARN'] = fake_arn trail['IsLogging'] = enable_logging trail['tags'] = tags # Populate trail facts in output results['trail'] = camel_dict_to_snake_dict(trail) module.exit_json(**results)
def create_launch_config(connection, module): name = module.params.get('name') vpc_id = module.params.get('vpc_id') try: region, ec2_url, aws_connect_kwargs = get_aws_connection_info( module, boto3=True) ec2_connection = boto3_conn(module, 'client', 'ec2', region, ec2_url, **aws_connect_kwargs) security_groups = get_ec2_security_group_ids_from_names( module.params.get('security_groups'), ec2_connection, vpc_id=vpc_id, boto3=True) except botocore.exceptions.ClientError as e: module.fail_json(msg="Failed to get Security Group IDs", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) except ValueError as e: module.fail_json(msg="Failed to get Security Group IDs", exception=traceback.format_exc()) user_data = module.params.get('user_data') user_data_path = module.params.get('user_data_path') volumes = module.params['volumes'] instance_monitoring = module.params.get('instance_monitoring') assign_public_ip = module.params.get('assign_public_ip') instance_profile_name = module.params.get('instance_profile_name') ebs_optimized = module.params.get('ebs_optimized') classic_link_vpc_id = module.params.get('classic_link_vpc_id') classic_link_vpc_security_groups = module.params.get( 'classic_link_vpc_security_groups') block_device_mapping = {} convert_list = [ 'image_id', 'instance_type', 'instance_type', 'instance_id', 'placement_tenancy', 'key_name', 'kernel_id', 'ramdisk_id', 'instance_profile_name', 'spot_price' ] launch_config = (snake_dict_to_camel_dict( dict((k.capitalize(), str(v)) for k, v in module.params.items() if v is not None and k in convert_list))) if user_data_path: try: with open(user_data_path, 'r') as user_data_file: user_data = user_data_file.read() except IOError as e: module.fail_json(msg="Failed to open file for reading", exception=traceback.format_exc()) if volumes: for volume in volumes: if 'device_name' not in volume: module.fail_json(msg='Device name must be set for volume') # Minimum volume size is 1GB. We'll use volume size explicitly set to 0 to be a signal not to create this volume if 'volume_size' not in volume or int(volume['volume_size']) > 0: block_device_mapping.update( create_block_device_meta(module, volume)) try: launch_configs = connection.describe_launch_configurations( LaunchConfigurationNames=[name]).get('LaunchConfigurations') except botocore.exceptions.ClientError as e: module.fail_json(msg="Failed to describe launch configuration by name", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) changed = False result = {} launch_config['LaunchConfigurationName'] = name if security_groups is not None: launch_config['SecurityGroups'] = security_groups if classic_link_vpc_id is not None: launch_config['ClassicLinkVPCId'] = classic_link_vpc_id if instance_monitoring: launch_config['InstanceMonitoring'] = {'Enabled': instance_monitoring} if classic_link_vpc_security_groups is not None: launch_config[ 'ClassicLinkVPCSecurityGroups'] = classic_link_vpc_security_groups if block_device_mapping: launch_config['BlockDeviceMappings'] = [block_device_mapping] if instance_profile_name is not None: launch_config['IamInstanceProfile'] = instance_profile_name if assign_public_ip is not None: launch_config['AssociatePublicIpAddress'] = assign_public_ip if user_data is not None: launch_config['UserData'] = user_data if ebs_optimized is not None: launch_config['EbsOptimized'] = ebs_optimized if len(launch_configs) == 0: try: connection.create_launch_configuration(**launch_config) launch_configs = connection.describe_launch_configurations( LaunchConfigurationNames=[name]).get('LaunchConfigurations') changed = True if launch_configs: launch_config = launch_configs[0] except botocore.exceptions.ClientError as e: module.fail_json(msg="Failed to create launch configuration", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) result = (dict((k, v) for k, v in launch_config.items() if k not in [ 'Connection', 'CreatedTime', 'InstanceMonitoring', 'BlockDeviceMappings' ])) result['CreatedTime'] = to_text(launch_config.get('CreatedTime')) try: result['InstanceMonitoring'] = module.boolean( launch_config.get('InstanceMonitoring').get('Enabled')) except AttributeError: result['InstanceMonitoring'] = False result['BlockDeviceMappings'] = [] for block_device_mapping in launch_config.get('BlockDeviceMappings', []): result['BlockDeviceMappings'].append( dict(device_name=block_device_mapping.get('DeviceName'), virtual_name=block_device_mapping.get('VirtualName'))) if block_device_mapping.get('Ebs') is not None: result['BlockDeviceMappings'][-1]['ebs'] = dict( snapshot_id=block_device_mapping.get('Ebs').get('SnapshotId'), volume_size=block_device_mapping.get('Ebs').get('VolumeSize')) if user_data_path: result[ 'UserData'] = "hidden" # Otherwise, we dump binary to the user's terminal return_object = { 'Name': result.get('LaunchConfigurationName'), 'CreatedTime': result.get('CreatedTime'), 'ImageId': result.get('ImageId'), 'Arn': result.get('LaunchConfigurationARN'), 'SecurityGroups': result.get('SecurityGroups'), 'InstanceType': result.get('InstanceType'), 'Result': result } module.exit_json(changed=changed, **camel_dict_to_snake_dict(return_object))
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( key_id=dict(), description=dict(), alias=dict(), enabled=dict(type='bool', default=True), tags=dict(type='dict', default={}), purge_tags=dict(type='bool', default=False), grants=dict(type='list', default=[]), purge_grants=dict(type='bool', default=False), state=dict(required=True, choices=['present', 'absent']), policy=dict(), )) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=False, # FIXME: currently so false required_one_of=[['key_id', 'alias']]) if not HAS_BOTO3: module.fail_json(msg='boto3 and botocore are required for this module') region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) if region: connection = boto3_conn(module, conn_type='client', resource='kms', region=region, endpoint=ec2_url, **aws_connect_params) else: module.fail_json(msg="region must be specified") all_keys = get_kms_facts(connection, module) key_id = module.params.get('key_id') alias = module.params.get('alias') if key_id: filtr = ('key-id', key_id) elif module.params.get('alias'): filtr = ('alias', alias) candidate_keys = [ key for key in all_keys if key_matches_filter(key, filtr) ] if module.params.get('state') == 'present': if candidate_keys: update_key(connection, module, candidate_keys[0]) else: if module.params.get('key-id'): module.fail_json(msg="Could not find key with id %s to update") else: create_key(connection, module) else: if candidate_keys: delete_key(connection, module, candidate_keys[0]) else: module.exit_json(changed=False)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict(app_name=dict(type='str', required=False), description=dict(type='str', required=False), state=dict(choices=['present', 'absent', 'list'], default='present')), ) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) if not HAS_BOTO3: module.fail_json(msg='boto3 required for this module') app_name = module.params['app_name'] description = module.params['description'] state = module.params['state'] if app_name is None: if state != 'list': module.fail_json( msg= 'Module parameter "app_name" is required if "state" is not "list"' ) result = {} region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) if region: ebs = boto3_conn(module, conn_type='client', resource='elasticbeanstalk', region=region, endpoint=ec2_url, **aws_connect_params) else: module.fail_json(msg='region must be specified') app = describe_app(ebs, app_name) if module.check_mode and state != 'list': check_app(ebs, app, module) module.fail_json( msg='ASSERTION FAILURE: check_app() should not return control.') if state == 'present': if app is None: create_app = ebs.create_application(**filter_empty( ApplicationName=app_name, Description=description)) app = describe_app(ebs, app_name) result = dict(changed=True, app=app) else: if app.get("Description", None) != description: ebs.update_application(ApplicationName=app_name, Description=description) app = describe_app(ebs, app_name) result = dict(changed=True, app=app) else: result = dict(changed=False, app=app) elif state == 'absent': if app is None: result = dict(changed=False, output='Application not found') else: ebs.delete_application(ApplicationName=app_name) result = dict(changed=True, app=app) else: apps = list_apps(ebs, app_name) result = dict(changed=False, apps=apps) module.exit_json(**result)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( query=dict(choices=[ 'change', 'checker_ip_range', 'health_check', 'hosted_zone', 'record_sets', 'reusable_delegation_set', ], required=True), change_id=dict(), hosted_zone_id=dict(), max_items=dict(type='str'), next_marker=dict(), delegation_set_id=dict(), start_record_name=dict(), type=dict(choices=[ 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'CAA', 'NS' ]), dns_name=dict(), resource_id=dict(type='list', aliases=['resource_ids']), health_check_id=dict(), hosted_zone_method=dict( choices=['details', 'list', 'list_by_name', 'count', 'tags'], default='list'), health_check_method=dict(choices=[ 'list', 'details', 'status', 'failure_reason', 'count', 'tags', ], default='list'), )) module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive=[ ['hosted_zone_method', 'health_check_method'], ], ) # Validate Requirements if not (HAS_BOTO or HAS_BOTO3): module.fail_json(msg='json and boto/boto3 is required.') try: region, ec2_url, aws_connect_kwargs = get_aws_connection_info( module, boto3=True) route53 = boto3_conn(module, conn_type='client', resource='route53', region=region, endpoint=ec2_url, **aws_connect_kwargs) except boto.exception.NoAuthHandlerFound as e: module.fail_json(msg="Can't authorize connection - %s " % str(e)) invocations = { 'change': change_details, 'checker_ip_range': checker_ip_range_details, 'health_check': health_check_details, 'hosted_zone': hosted_zone_details, 'record_sets': record_sets_details, 'reusable_delegation_set': reusable_delegation_set_details, } results = invocations[module.params.get('query')](route53, module) module.exit_json(**results)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( mode=dict(choices=['push'], default='push'), file_change_strategy=dict( choices=['force', 'date_size', 'checksum'], default='date_size'), bucket=dict(required=True), key_prefix=dict(required=False, default=''), file_root=dict(required=True, type='path'), permission=dict(required=False, choices=[ 'private', 'public-read', 'public-read-write', 'authenticated-read', 'aws-exec-read', 'bucket-owner-read', 'bucket-owner-full-control' ]), retries=dict(required=False), mime_map=dict(required=False, type='dict'), exclude=dict(required=False, default=".*"), include=dict(required=False, default="*"), cache_control=dict(required=False, default=''), delete=dict(required=False, type='bool', default=False), # future options: encoding, metadata, storage_class, retries )) module = AnsibleModule(argument_spec=argument_spec, ) if not HAS_DATEUTIL: module.fail_json(msg='dateutil required for this module') if not HAS_BOTO3: module.fail_json(msg='boto3 required for this module') result = {} mode = module.params['mode'] region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) if not region: module.fail_json(msg="Region must be specified") s3 = boto3_conn(module, conn_type='client', resource='s3', region=region, endpoint=ec2_url, **aws_connect_kwargs) if mode == 'push': try: result['filelist_initial'] = gather_files( module.params['file_root'], exclude=module.params['exclude'], include=module.params['include']) result['filelist_typed'] = determine_mimetypes( result['filelist_initial'], module.params.get('mime_map')) result['filelist_s3'] = calculate_s3_path( result['filelist_typed'], module.params['key_prefix']) result['filelist_local_etag'] = calculate_local_etag( result['filelist_s3']) result['filelist_actionable'] = filter_list( s3, module.params['bucket'], result['filelist_local_etag'], module.params['file_change_strategy']) result['uploads'] = upload_files(s3, module.params['bucket'], result['filelist_actionable'], module.params) if module.params['delete']: result['removed'] = remove_files(s3, result['filelist_local_etag'], module.params) # mark changed if we actually upload something. if result.get('uploads') or result.get('removed'): result['changed'] = True # result.update(filelist=actionable_filelist) except botocore.exceptions.ClientError as err: error_msg = boto_exception(err) module.fail_json(msg=error_msg, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response)) module.exit_json(**result)
def main(): # Get arguments. argument_spec = ec2_argument_spec() argument_spec.update( dict( state=dict(required=True, choices=['present', 'absent', 'empty']), name=dict(required=True, type='str'), force=dict(default=False, type='bool'), )) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) state = module.params['state'] name = module.params['name'] force = module.params['force'] # Verify package requirements. if not HAS_BOTO3: module.fail_json(msg='boto3 is required.') # Create ECR client. region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) if region: ecr = boto3_conn(module, conn_type='client', resource='ecr', region=region, endpoint=ec2_url, **aws_connect_kwargs) else: module.fail_json(msg="region must be specified") # Handle case present -> present. if state == 'present': repository = get_repository(ecr, name, module) if repository: module.exit_json(changed=False, repository=repository, state='present') # Handle case absent -> present. else: if not module.check_mode: try: repository = ecr.create_repository( repositoryName=name)['repository'] except ClientError as e: module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response)) module.exit_json(changed=True, repository=repository, state='present') else: module.exit_json(changed=True, state='present') # Handle case present -> absent. if state == 'absent': repository = get_repository(ecr, name, module) if repository: if not module.check_mode: try: repository = ecr.delete_repository( repositoryName=name, force=force)['repository'] except ClientError as e: module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response)) module.exit_json(changed=True, repository=repository, state='absent') # Handle case absent -> absent. else: module.exit_json(changed=False, state='absent')
def client(self, service, retry_decorator=None): region, ec2_url, aws_connect_kwargs = get_aws_connection_info(self, boto3=True) conn = boto3_conn(self, conn_type='client', resource=service, region=region, endpoint=ec2_url, **aws_connect_kwargs) return conn if retry_decorator is None else _RetryingBotoClientWrapper(conn, retry_decorator)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( state=dict(require=False, type='str', default="present", choices=['present', 'absent']), name=dict(require=True, type='str'), stream_type=dict(require=True, type='str', choices=['DirectPut', 'KinesisStreamAsSource']), role_arn=dict(require=True, type='str'), dest_arn=dict(require=True, type='str'), prefix=dict(require=False, type='str', default=""), compression=dict(require=False, type='str', default="UNCOMPRESSED", choices=['UNCOMPRESSED', 'SNAPPY', 'ZIP', 'GZIP']), buffering_second=dict(require=False, type='int', default=300), buffering_mb=dict(require=False, type='int', default=5), )) module = AnsibleModule(argument_spec=argument_spec) state = module.params['state'] name = module.params['name'] stream_type = module.params['stream_type'] role_arn = module.params['role_arn'] dest_arn = module.params['dest_arn'] prefix = module.params['prefix'] compression = module.params['compression'] buffering_second = module.params['buffering_second'] buffering_mb = module.params['buffering_mb'] changed = False if not HAS_BOTO3: module.fail_json(msg='boto3 required for this module') if not HAS_BOTOCORE: module.fail_json(msg='botocore required for this module') # Connect to AWS try: region, ec2_url, aws_connect_kwargs = get_aws_connection_info( module, boto3=True) conn = boto3_conn(module, conn_type="client", resource="firehose", region=region, **aws_connect_kwargs) except NoCredentialsError as ex: module.fail_json(msg=ex.message) if state == "absent": try: conn.delete_delivery_stream(DeliveryStreamName=name) changed = True except ClientError as ex: if ex.response['Error']['Code'] == "ResourceNotFoundException": changed = False else: module.fail_json(msg=ex.response['Error']['Message']) module.exit_json(changed=changed) desired_config = { "RoleARN": role_arn, "BucketARN": dest_arn, "Prefix": prefix, "CompressionFormat": compression, "BufferingHints": { "IntervalInSeconds": buffering_second, "SizeInMBs": buffering_mb }, "S3BackupMode": "Disabled", } try: current = conn.describe_delivery_stream(DeliveryStreamName=name) except ClientError as ex: # Create delivery stream if ex.response['Error']['Code'] == "ResourceNotFoundException": try: conn.create_delivery_stream( DeliveryStreamName=name, DeliveryStreamType=stream_type, ExtendedS3DestinationConfiguration=desired_config) module.exit_json(changed=True) except ClientError as ex2: module.fail_json(msg=ex2.response['Error']['Message']) # Update current_config = current['DeliveryStreamDescription']['Destinations'][0][ 'ExtendedS3DestinationDescription'] planned_config = copy.deepcopy(current_config) # planned_config['S3BackupUpdate'] = planned_config.pop('S3BackupDescription') for k in desired_config.keys(): planned_config[k] = desired_config[k] if current_config == planned_config: changed = False else: # planned_config.pop('S3BackupMode') try: conn.update_destination( DeliveryStreamName=name, CurrentDeliveryStreamVersionId=current[ 'DeliveryStreamDescription']['VersionId'], DestinationId=current['DeliveryStreamDescription'] ['Destinations'][0]['DestinationId'], ExtendedS3DestinationUpdate=planned_config) changed = True except ClientError as ex: module.fail_json(msg=ex.response['Error']['Message']) module.exit_json(changed=changed)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( name=dict(), function_arn=dict(), wait=dict(default=True, type='bool'), tail_log=dict(default=False, type='bool'), dry_run=dict(default=False, type='bool'), version_qualifier=dict(), payload=dict(default={}, type='dict'), )) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[ ['name', 'function_arn'], ]) if not HAS_BOTO3: module.fail_json(msg='boto3 required for this module') name = module.params.get('name') function_arn = module.params.get('function_arn') await_return = module.params.get('wait') dry_run = module.params.get('dry_run') tail_log = module.params.get('tail_log') version_qualifier = module.params.get('version_qualifier') payload = module.params.get('payload') if not HAS_BOTO3: module.fail_json( msg='Python module "boto3" is missing, please install it') if not (name or function_arn): module.fail_json( msg="Must provide either a function_arn or a name to invoke.") region, ec2_url, aws_connect_kwargs = get_aws_connection_info( module, boto3=HAS_BOTO3) if not region: module.fail_json(msg="The AWS region must be specified as an " "environment variable or in the AWS credentials " "profile.") try: client = boto3_conn(module, conn_type='client', resource='lambda', region=region, endpoint=ec2_url, **aws_connect_kwargs) except (botocore.exceptions.ClientError, botocore.exceptions.ValidationError) as e: module.fail_json(msg="Failure connecting boto3 to AWS: %s" % to_native(e), exception=traceback.format_exc()) invoke_params = {} if await_return: # await response invoke_params['InvocationType'] = 'RequestResponse' else: # fire and forget invoke_params['InvocationType'] = 'Event' if dry_run or module.check_mode: # dry_run overrides invocation type invoke_params['InvocationType'] = 'DryRun' if tail_log and await_return: invoke_params['LogType'] = 'Tail' elif tail_log and not await_return: module.fail_json(msg="The `tail_log` parameter is only available if " "the invocation waits for the function to complete. " "Set `wait` to true or turn off `tail_log`.") else: invoke_params['LogType'] = 'None' if version_qualifier: invoke_params['Qualifier'] = version_qualifier if payload: invoke_params['Payload'] = json.dumps(payload) if function_arn: invoke_params['FunctionName'] = function_arn elif name: invoke_params['FunctionName'] = name try: response = client.invoke(**invoke_params) except botocore.exceptions.ClientError as ce: if ce.response['Error']['Code'] == 'ResourceNotFoundException': module.fail_json(msg="Could not find Lambda to execute. Make sure " "the ARN is correct and your profile has " "permissions to execute this function.", exception=traceback.format_exc()) module.fail_json( msg= "Client-side error when invoking Lambda, check inputs and specific error", exception=traceback.format_exc()) except botocore.exceptions.ParamValidationError as ve: module.fail_json(msg="Parameters to `invoke` failed to validate", exception=traceback.format_exc()) except Exception as e: module.fail_json( msg="Unexpected failure while invoking Lambda function", exception=traceback.format_exc()) results = { 'logs': '', 'status': response['StatusCode'], 'output': '', } if response.get('LogResult'): try: # logs are base64 encoded in the API response results['logs'] = base64.b64decode(response.get('LogResult', '')) except Exception as e: module.fail_json(msg="Failed while decoding logs", exception=traceback.format_exc()) if invoke_params['InvocationType'] == 'RequestResponse': try: results['output'] = json.loads( response['Payload'].read().decode('utf8')) except Exception as e: module.fail_json(msg="Failed while decoding function return value", exception=traceback.format_exc()) if isinstance(results.get('output'), dict) and any([ results['output'].get('stackTrace'), results['output'].get('errorMessage') ]): # AWS sends back stack traces and error messages when a function failed # in a RequestResponse (synchronous) context. template = ( "Function executed, but there was an error in the Lambda function. " "Message: {errmsg}, Type: {type}, Stack Trace: {trace}") error_data = { # format the stacktrace sent back as an array into a multiline string 'trace': '\n'.join([ ' '.join([ str(x) for x in line # cast line numbers to strings ]) for line in results.get('output', {}).get( 'stackTrace', []) ]), 'errmsg': results['output'].get('errorMessage'), 'type': results['output'].get('errorType') } module.fail_json(msg=template.format(**error_data), result=results) module.exit_json(changed=True, result=results)
def create_client(self, resource): try: region, ec2_url, aws_connect_kwargs = get_aws_connection_info(self.module, boto3=True) self.client = boto3_conn(self.module, conn_type='client', resource=resource, region=region, endpoint=ec2_url, **aws_connect_kwargs) except (ClientError, BotoCoreError) as e: self.module.fail_json_aws(e, msg="Unable to establish connection.")
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( api_id=dict(type='str', required=False), state=dict(type='str', default='present', choices=['present', 'absent']), swagger_file=dict(type='path', default=None, aliases=['src', 'api_file']), swagger_dict=dict(type='json', default=None), swagger_text=dict(type='str', default=None), stage=dict(type='str', default=None), deploy_desc=dict(type='str', default="Automatic deployment by Ansible."), )) mutually_exclusive = [['swagger_file', 'swagger_dict', 'swagger_text']] # noqa: F841 module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, mutually_exclusive=mutually_exclusive) api_id = module.params.get('api_id') state = module.params.get('state') # noqa: F841 swagger_file = module.params.get('swagger_file') swagger_dict = module.params.get('swagger_dict') swagger_text = module.params.get('swagger_text') stage = module.params.get('stage') deploy_desc = module.params.get('deploy_desc') # check_mode = module.check_mode changed = False if not HAS_BOTO3: module.fail_json( msg='Python module "boto3" is missing, please install boto3') if not HAS_BOTOCORE: module.fail_json( msg='Python module "botocore" is missing, please install it') region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) try: client = boto3_conn(module, conn_type='client', resource='apigateway', region=region, endpoint=ec2_url, **aws_connect_kwargs) except botocore.exceptions.NoRegionError: module.fail_json( msg="Region must be specified as a parameter, in " "AWS_DEFAULT_REGION environment variable or in boto configuration file" ) except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as e: fail_json_aws(module, e, msg="connecting to AWS") changed = True # for now it will stay that way until we can sometimes avoid change conf_res = None dep_res = None del_res = None if state == "present": if api_id is None: api_id = create_empty_api(module, client) api_data = get_api_definitions(module, swagger_file=swagger_file, swagger_dict=swagger_dict, swagger_text=swagger_text) conf_res, dep_res = ensure_api_in_correct_state( module, client, api_id=api_id, api_data=api_data, stage=stage, deploy_desc=deploy_desc) if state == "absent": del_res = delete_rest_api(module, client, api_id) exit_args = {"changed": changed, "api_id": api_id} if conf_res is not None: exit_args['configure_response'] = camel_dict_to_snake_dict(conf_res) if dep_res is not None: exit_args['deploy_response'] = camel_dict_to_snake_dict(dep_res) if del_res is not None: exit_args['delete_response'] = camel_dict_to_snake_dict(del_res) module.exit_json(**exit_args)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( name=dict(required=True, type='str'), state=dict(required=True, type='str', choices=['present', 'absent', 'copy']), replication_id=dict(type='str'), cluster_id=dict(type='str'), target=dict(type='str'), bucket=dict(type='str'), )) module = AnsibleModule(argument_spec=argument_spec) if not HAS_BOTO3: module.fail_json(msg='boto required for this module') name = module.params.get('name') state = module.params.get('state') replication_id = module.params.get('replication_id') cluster_id = module.params.get('cluster_id') target = module.params.get('target') bucket = module.params.get('bucket') # Retrieve any AWS settings from the environment. region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) if not region: module.fail_json(msg=str( "Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set." )) connection = boto3_conn(module, conn_type='client', resource='elasticache', region=region, endpoint=ec2_url, **aws_connect_kwargs) changed = False response = {} if state == 'present': if not all((replication_id, cluster_id)): module.fail_json( msg= "The state 'present' requires options: 'replication_id' and 'cluster_id'" ) response, changed = create(module, connection, replication_id, cluster_id, name) elif state == 'absent': response, changed = delete(module, connection, name) elif state == 'copy': if not all((target, bucket)): module.fail_json( msg="The state 'copy' requires options: 'target' and 'bucket'." ) response, changed = copy(module, connection, name, target, bucket) facts_result = dict(changed=changed, **camel_dict_to_snake_dict(response)) module.exit_json(**facts_result)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( vpc_id=dict(), service=dict(), policy=dict(type='json'), policy_file=dict(type='path', aliases=['policy_path']), state=dict(default='present', choices=['present', 'absent']), wait=dict(type='bool', default=False), wait_timeout=dict(type='int', default=320, required=False), route_table_ids=dict(type='list'), vpc_endpoint_id=dict(), client_token=dict(), )) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[['policy', 'policy_file']], required_if=[ ['state', 'present', ['vpc_id', 'service']], ['state', 'absent', ['vpc_endpoint_id']], ]) # Validate Requirements if not HAS_BOTO3: module.fail_json(msg='botocore and boto3 are required for this module') state = module.params.get('state') try: region, ec2_url, aws_connect_kwargs = get_aws_connection_info( module, boto3=True) except NameError as e: # Getting around the get_aws_connection_info boto reliance for region if "global name 'boto' is not defined" in e.message: module.params['region'] = botocore.session.get_session( ).get_config_variable('region') if not module.params['region']: module.fail_json(msg="Error - no region provided") else: module.fail_json(msg="Can't retrieve connection information - " + str(e), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) try: region, ec2_url, aws_connect_kwargs = get_aws_connection_info( module, boto3=True) ec2 = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs) except botocore.exceptions.NoCredentialsError as e: module.fail_json( msg= "Failed to connect to AWS due to wrong or missing credentials: %s" % str(e), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) # Ensure resource is present if state == 'present': (changed, results) = setup_creation(ec2, module) else: (changed, results) = setup_removal(ec2, module) module.exit_json(changed=changed, result=results)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( policy_name=dict(required=True), policy_description=dict(default=''), policy=dict(type='json'), make_default=dict(type='bool', default=True), only_version=dict(type='bool', default=False), fail_on_delete=dict(type='bool', default=True), state=dict(required=True, choices=['present', 'absent']), )) module = AnsibleModule(argument_spec=argument_spec, required_if=[['state', 'present', ['policy']]]) if not HAS_BOTO3: module.fail_json(msg='boto3 is required for this module') name = module.params.get('policy_name') description = module.params.get('policy_description') state = module.params.get('state') default = module.params.get('make_default') only = module.params.get('only_version') policy = None if module.params.get('policy') is not None: policy = json.dumps(json.loads(module.params.get('policy'))) try: region, ec2_url, aws_connect_kwargs = get_aws_connection_info( module, boto3=True) iam = boto3_conn(module, conn_type='client', resource='iam', region=region, endpoint=ec2_url, **aws_connect_kwargs) except (botocore.exceptions.NoCredentialsError, botocore.exceptions.ProfileNotFound) as e: module.fail_json( msg= "Can't authorize connection. Check your credentials and profile.", exceptions=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) p = get_policy_by_name(module, iam, name) if state == 'present': if p is None: # No Policy so just create one try: rvalue = iam.create_policy(PolicyName=name, Path='/', PolicyDocument=policy, Description=description) except: module.fail_json(msg="Couldn't create policy %s: %s" % (name, str(e)), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) module.exit_json(changed=True, policy=camel_dict_to_snake_dict(rvalue['Policy'])) else: policy_version, changed = get_or_create_policy_version( module, iam, p, policy) changed = set_if_default(module, iam, p, policy_version, default) or changed changed = set_if_only(module, iam, p, policy_version, only) or changed # If anything has changed we needto refresh the policy if changed: try: p = iam.get_policy(PolicyArn=p['Arn'])['Policy'] except: module.fail_json(msg="Couldn't get policy: %s" % str(e), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) module.exit_json(changed=changed, policy=camel_dict_to_snake_dict(p)) else: # Check for existing policy if p: # Detach policy detach_all_entities(module, iam, p) # Delete Versions try: versions = iam.list_policy_versions( PolicyArn=p['Arn'])['Versions'] except botocore.exceptions.ClientError as e: module.fail_json(msg="Couldn't list policy versions: %s" % str(e), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) for v in versions: if not v['IsDefaultVersion']: try: iam.delete_policy_version(PolicyArn=p['Arn'], VersionId=v['VersionId']) except botocore.exceptions.ClientError as e: module.fail_json( msg="Couldn't delete policy version %s: %s" % (v['VersionId'], str(e)), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) # Delete policy try: iam.delete_policy(PolicyArn=p['Arn']) except: module.fail_json(msg="Couldn't delete policy %s: %s" % (p['PolicyName'], str(e)), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) # This is the one case where we will return the old policy module.exit_json(changed=True, policy=camel_dict_to_snake_dict(p)) else: module.exit_json(changed=False, policy=None)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict(app_name=dict(type='str', required=True), env_name=dict(type='str', required=False), version_label=dict(type='str', required=False), description=dict(type='str', required=False), state=dict(choices=['present', 'absent', 'list', 'details'], default='present'), wait_timeout=dict(default=900, type='int'), template_name=dict(type='str', required=False), solution_stack_name=dict(type='str', required=False), cname_prefix=dict(type='str', required=False), vpc=dict(type='str', required=False), vpc_subnets=dict(type='str', required=False), option_settings=dict(type='list', default=[]), tags=dict(type='dict', default=dict()), options_to_remove=dict(type='list', default=[]), tier_name=dict(default='WebServer', choices=['WebServer', 'Worker']), force_terminate=dict(type='bool', required=False, default=False)), ) module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive=[['solution_stack_name', 'template_name']], supports_check_mode=True) if not HAS_BOTO3: module.fail_json(msg='boto3 required for this module') app_name = module.params['app_name'] env_name = module.params['env_name'] version_label = module.params['version_label'] description = module.params['description'] state = module.params['state'] wait_timeout = module.params['wait_timeout'] template_name = module.params['template_name'] solution_stack_name = module.params['solution_stack_name'] cname_prefix = module.params['cname_prefix'] vpc = module.params['vpc'] vpc_subnets = module.params['vpc_subnets'] tags = module.params['tags'] option_settings = module.params['option_settings'] options_to_remove = module.params['options_to_remove'] force_terminate = module.params['force_terminate'] tier_type = 'Standard' tier_name = module.params['tier_name'] if tier_name == 'Worker': tier_type = 'SQS/HTTP' region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) if region: ebs = boto3_conn(module, conn_type='client', resource='elasticbeanstalk', region=region, endpoint=ec2_url, **aws_connect_params) else: module.fail_json(msg='region must be specified') if vpc: vpc_id = getVpcId(region, vpc, option_settings, module) module.debug("found vpc_id='{}'".format(vpc_id)) else: vpc_id = None if vpc_subnets: checkVpcSubnetIds(region, vpc_subnets.split(','), vpc_id, option_settings, module) update = False result = {} if state == 'list': try: env = describe_env(ebs, app_name, env_name, []) result = dict(changed=False, env=[] if env is None else env) except ClientError as e: module.fail_json(msg=str(e), **camel_dict_to_snake_dict(e.response)) if state == 'details': try: env = describe_env_config_settings(ebs, app_name, env_name) result = dict(changed=False, env=env) except ClientError as e: module.fail_json(msg=str(e), **camel_dict_to_snake_dict(e.response)) if module.check_mode and (state != 'list' or state != 'details'): check_env(ebs, app_name, env_name, module) module.fail_json( 'ASSERTION FAILURE: check_version() should not return control.') if state == 'present': try: tags_to_apply = [{'Key': k, 'Value': v} for k, v in tags.items()] ebs.create_environment( **filter_empty(ApplicationName=app_name, EnvironmentName=env_name, VersionLabel=version_label, TemplateName=template_name, Tags=tags_to_apply, SolutionStackName=solution_stack_name, CNAMEPrefix=cname_prefix, Description=description, OptionSettings=option_settings, Tier={ 'Name': tier_name, 'Type': tier_type, 'Version': '1.0' })) env = wait_for(ebs, app_name, env_name, wait_timeout, status_is_ready) result = dict(changed=True, env=env) except ClientError as e: if 'Environment %s already exists' % env_name in str(e): update = True else: module.fail_json(msg=str(e), **camel_dict_to_snake_dict(e.response)) if update: try: env = describe_env(ebs, app_name, env_name, []) updates = update_required(ebs, env, module.params) if len(updates) > 0: ebs.update_environment( **filter_empty(EnvironmentName=env_name, VersionLabel=version_label, TemplateName=template_name, Description=description, OptionSettings=option_settings)) env = wait_for( ebs, app_name, env_name, wait_timeout, lambda environment: status_is_ready(environment) and version_is_updated(version_label, environment)) result = dict(changed=True, env=env, updates=updates) else: result = dict(changed=False, env=env) except ClientError as e: module.fail_json(msg=str(e), **camel_dict_to_snake_dict(e.response)) if state == 'absent': try: ebs.terminate_environment(EnvironmentName=env_name, ForceTerminate=force_terminate) env = wait_for(ebs, app_name, env_name, wait_timeout, terminated) result = dict(changed=True, env=env) except ClientError as e: if 'No Environment found for EnvironmentName = \'%s\'' % env_name in str( e): result = dict( changed=False, warnings='Environment {} not found'.format(env_name)) else: module.fail_json(msg=str(e), **camel_dict_to_snake_dict(e.response)) module.exit_json(**result)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( name=dict(default=None, required=True), shards=dict(default=None, required=False, type='int'), retention_period=dict(default=None, required=False, type='int'), tags=dict(default=None, required=False, type='dict', aliases=['resource_tags']), wait=dict(default=True, required=False, type='bool'), wait_timeout=dict(default=300, required=False, type='int'), state=dict(default='present', choices=['present', 'absent']), )) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, ) retention_period = module.params.get('retention_period') stream_name = module.params.get('name') shards = module.params.get('shards') state = module.params.get('state') tags = module.params.get('tags') wait = module.params.get('wait') wait_timeout = module.params.get('wait_timeout') if state == 'present' and not shards: module.fail_json(msg='Shards is required when state == present.') if retention_period: if retention_period < 24: module.fail_json( msg='Retention period can not be less than 24 hours.') if not HAS_BOTO3: module.fail_json(msg='boto3 is required.') check_mode = module.check_mode try: region, ec2_url, aws_connect_kwargs = (get_aws_connection_info( module, boto3=True)) client = (boto3_conn(module, conn_type='client', resource='kinesis', region=region, endpoint=ec2_url, **aws_connect_kwargs)) except botocore.exceptions.ClientError as e: err_msg = 'Boto3 Client Error - {0}'.format(to_native(e.msg)) module.fail_json(success=False, changed=False, result={}, msg=err_msg) if state == 'present': success, changed, err_msg, results = (create_stream( client, stream_name, shards, retention_period, tags, wait, wait_timeout, check_mode)) elif state == 'absent': success, changed, err_msg, results = (delete_stream( client, stream_name, wait, wait_timeout, check_mode)) if success: module.exit_json(success=success, changed=changed, msg=err_msg, **results) else: module.fail_json(success=success, changed=changed, msg=err_msg, result=results)
def main(): """ Main entry point. :return dict: ansible facts """ argument_spec = dict(function_name=dict(required=False, default=None, aliases=['function', 'name']), query=dict(required=False, choices=[ 'aliases', 'all', 'config', 'mappings', 'policy', 'versions' ], default='all'), event_source_arn=dict(required=False, default=None)) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[], required_together=[]) # validate function_name if present function_name = module.params['function_name'] if function_name: if not re.search(r"^[\w\-:]+$", function_name): module.fail_json( msg= 'Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.' .format(function_name)) if len(function_name) > 64: module.fail_json( msg='Function name "{0}" exceeds 64 character limit'.format( function_name)) try: region, endpoint, aws_connect_kwargs = get_aws_connection_info( module, boto3=True) aws_connect_kwargs.update( dict(region=region, endpoint=endpoint, conn_type='client', resource='lambda')) client = boto3_conn(module, **aws_connect_kwargs) except ClientError as e: module.fail_json_aws(e, "trying to set up boto connection") this_module = sys.modules[__name__] invocations = dict( aliases='alias_details', all='all_details', config='config_details', mappings='mapping_details', policy='policy_details', versions='version_details', ) this_module_function = getattr(this_module, invocations[module.params['query']]) all_facts = fix_return(this_module_function(client, module)) results = dict(ansible_facts={'lambda_facts': { 'function': all_facts }}, changed=False) if module.check_mode: results['msg'] = 'Check mode set but ignored for fact gathering only.' module.exit_json(**results)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( state=dict(required=True, choices=['present', 'absent']), name=dict(), link_aggregation_group_id=dict(), num_connections=dict(type='int'), min_links=dict(type='int'), location=dict(), bandwidth=dict(), connection_id=dict(), delete_with_disassociation=dict(type='bool', default=False), force_delete=dict(type='bool', default=False), wait=dict(type='bool', default=False), wait_timeout=dict(type='int', default=120), )) module = AnsibleModule(argument_spec=argument_spec, required_one_of=[('link_aggregation_group_id', 'name')], required_if=[('state', 'present', ('location', 'bandwidth'))]) if not HAS_BOTO3: module.fail_json(msg='boto3 required for this module') region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) if not region: module.fail_json( msg= "Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set." ) connection = boto3_conn(module, conn_type='client', resource='directconnect', region=region, endpoint=ec2_url, **aws_connect_kwargs) state = module.params.get('state') try: if state == 'present': changed, lag_id = ensure_present( connection, num_connections=module.params.get("num_connections"), lag_id=module.params.get("link_aggregation_group_id"), lag_name=module.params.get("name"), location=module.params.get("location"), bandwidth=module.params.get("bandwidth"), connection_id=module.params.get("connection_id"), min_links=module.params.get("min_links"), wait=module.params.get("wait"), wait_timeout=module.params.get("wait_timeout")) response = lag_status(connection, lag_id) elif state == "absent": changed = ensure_absent( connection, lag_id=module.params.get("link_aggregation_group_id"), lag_name=module.params.get("name"), force_delete=module.params.get("force_delete"), delete_with_disassociation=module.params.get( "delete_with_disassociation"), wait=module.params.get('wait'), wait_timeout=module.params.get('wait_timeout')) response = {} except DirectConnectError as e: if e.response: module.fail_json(msg=e.msg, exception=e.last_traceback, **e.response) elif e.last_traceback: module.fail_json(msg=e.msg, exception=e.last_traceback) else: module.fail_json(msg=e.msg) module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
def main(): argument_spec = dict( name=dict(required=True), state=dict(default='present', choices=['present', 'absent']), runtime=dict(), role=dict(), handler=dict(), zip_file=dict(aliases=['src']), s3_bucket=dict(), s3_key=dict(), s3_object_version=dict(), description=dict(default=''), timeout=dict(type='int', default=3), memory_size=dict(type='int', default=128), vpc_subnet_ids=dict(type='list'), vpc_security_group_ids=dict(type='list'), environment_variables=dict(type='dict'), dead_letter_arn=dict(), tags=dict(type='dict'), ) mutually_exclusive = [['zip_file', 's3_key'], ['zip_file', 's3_bucket'], ['zip_file', 's3_object_version']] required_together = [['s3_key', 's3_bucket'], ['vpc_subnet_ids', 'vpc_security_group_ids']] required_if = [['state', 'present', ['runtime', 'handler', 'role']]] module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=mutually_exclusive, required_together=required_together, required_if=required_if) name = module.params.get('name') state = module.params.get('state').lower() runtime = module.params.get('runtime') role = module.params.get('role') handler = module.params.get('handler') s3_bucket = module.params.get('s3_bucket') s3_key = module.params.get('s3_key') s3_object_version = module.params.get('s3_object_version') zip_file = module.params.get('zip_file') description = module.params.get('description') timeout = module.params.get('timeout') memory_size = module.params.get('memory_size') vpc_subnet_ids = module.params.get('vpc_subnet_ids') vpc_security_group_ids = module.params.get('vpc_security_group_ids') environment_variables = module.params.get('environment_variables') dead_letter_arn = module.params.get('dead_letter_arn') tags = module.params.get('tags') check_mode = module.check_mode changed = False region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) if not region: module.fail_json(msg='region must be specified') try: client = boto3_conn(module, conn_type='client', resource='lambda', region=region, endpoint=ec2_url, **aws_connect_kwargs) except (ClientError, ValidationError) as e: module.fail_json_aws(e, msg="Trying to connect to AWS") if state == 'present': if role.startswith('arn:aws:iam'): role_arn = role else: # get account ID and assemble ARN account_id = get_account_id(module, region=region, endpoint=ec2_url, **aws_connect_kwargs) role_arn = 'arn:aws:iam::{0}:role/{1}'.format(account_id, role) # Get function configuration if present, False otherwise current_function = get_current_function(client, name) # Update existing Lambda function if state == 'present' and current_function: # Get current state current_config = current_function['Configuration'] current_version = None if 'Version' in current_config: current_version = current_config['Version'] # Update function configuration func_kwargs = {'FunctionName': name} # Update configuration if needed if role_arn and current_config['Role'] != role_arn: func_kwargs.update({'Role': role_arn}) if handler and current_config['Handler'] != handler: func_kwargs.update({'Handler': handler}) if description and current_config['Description'] != description: func_kwargs.update({'Description': description}) if timeout and current_config['Timeout'] != timeout: func_kwargs.update({'Timeout': timeout}) if memory_size and current_config['MemorySize'] != memory_size: func_kwargs.update({'MemorySize': memory_size}) if (environment_variables is not None) and (current_config.get( 'Environment', {}).get('Variables', {}) != environment_variables): func_kwargs.update({'Environment': {'Variables': environment_variables}}) if dead_letter_arn is not None: if current_config.get('DeadLetterConfig'): if current_config['DeadLetterConfig']['TargetArn'] != dead_letter_arn: func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}}) else: if dead_letter_arn != "": func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}}) # Check for unsupported mutation if current_config['Runtime'] != runtime: module.fail_json(msg='Cannot change runtime. Please recreate the function') # If VPC configuration is desired if vpc_subnet_ids or vpc_security_group_ids: if not vpc_subnet_ids or not vpc_security_group_ids: module.fail_json(msg='vpc connectivity requires at least one security group and one subnet') if 'VpcConfig' in current_config: # Compare VPC config with current config current_vpc_subnet_ids = current_config['VpcConfig']['SubnetIds'] current_vpc_security_group_ids = current_config['VpcConfig']['SecurityGroupIds'] subnet_net_id_changed = sorted(vpc_subnet_ids) != sorted(current_vpc_subnet_ids) vpc_security_group_ids_changed = sorted(vpc_security_group_ids) != sorted(current_vpc_security_group_ids) if 'VpcConfig' not in current_config or subnet_net_id_changed or vpc_security_group_ids_changed: new_vpc_config = {'SubnetIds': vpc_subnet_ids, 'SecurityGroupIds': vpc_security_group_ids} func_kwargs.update({'VpcConfig': new_vpc_config}) else: # No VPC configuration is desired, assure VPC config is empty when present in current config if 'VpcConfig' in current_config and current_config['VpcConfig'].get('VpcId'): func_kwargs.update({'VpcConfig': {'SubnetIds': [], 'SecurityGroupIds': []}}) # Upload new configuration if configuration has changed if len(func_kwargs) > 1: try: if not check_mode: response = client.update_function_configuration(**func_kwargs) current_version = response['Version'] changed = True except (ParamValidationError, ClientError) as e: module.fail_json_aws(e, msg="Trying to update lambda configuration") # Update code configuration code_kwargs = {'FunctionName': name, 'Publish': True} # Update S3 location if s3_bucket and s3_key: # If function is stored on S3 always update code_kwargs.update({'S3Bucket': s3_bucket, 'S3Key': s3_key}) # If S3 Object Version is given if s3_object_version: code_kwargs.update({'S3ObjectVersion': s3_object_version}) # Compare local checksum, update remote code when different elif zip_file: local_checksum = sha256sum(zip_file) remote_checksum = current_config['CodeSha256'] # Only upload new code when local code is different compared to the remote code if local_checksum != remote_checksum: try: with open(zip_file, 'rb') as f: encoded_zip = f.read() code_kwargs.update({'ZipFile': encoded_zip}) except IOError as e: module.fail_json(msg=str(e), exception=traceback.format_exc()) # Tag Function if tags is not None: if set_tag(client, module, tags, current_function): changed = True # Upload new code if needed (e.g. code checksum has changed) if len(code_kwargs) > 2: try: if not check_mode: response = client.update_function_code(**code_kwargs) current_version = response['Version'] changed = True except (ParamValidationError, ClientError) as e: module.fail_json_aws(e, msg="Trying to upload new code") # Describe function code and configuration response = get_current_function(client, name, qualifier=current_version) if not response: module.fail_json(msg='Unable to get function information after updating') # We're done module.exit_json(changed=changed, **camel_dict_to_snake_dict(response)) # Function doesn't exists, create new Lambda function elif state == 'present': if s3_bucket and s3_key: # If function is stored on S3 code = {'S3Bucket': s3_bucket, 'S3Key': s3_key} if s3_object_version: code.update({'S3ObjectVersion': s3_object_version}) elif zip_file: # If function is stored in local zipfile try: with open(zip_file, 'rb') as f: zip_content = f.read() code = {'ZipFile': zip_content} except IOError as e: module.fail_json(msg=str(e), exception=traceback.format_exc()) else: module.fail_json(msg='Either S3 object or path to zipfile required') func_kwargs = {'FunctionName': name, 'Publish': True, 'Runtime': runtime, 'Role': role_arn, 'Code': code, 'Timeout': timeout, 'MemorySize': memory_size, } if description is not None: func_kwargs.update({'Description': description}) if handler is not None: func_kwargs.update({'Handler': handler}) if environment_variables: func_kwargs.update({'Environment': {'Variables': environment_variables}}) if dead_letter_arn: func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}}) # If VPC configuration is given if vpc_subnet_ids or vpc_security_group_ids: if not vpc_subnet_ids or not vpc_security_group_ids: module.fail_json(msg='vpc connectivity requires at least one security group and one subnet') func_kwargs.update({'VpcConfig': {'SubnetIds': vpc_subnet_ids, 'SecurityGroupIds': vpc_security_group_ids}}) # Finally try to create function current_version = None try: if not check_mode: response = client.create_function(**func_kwargs) current_version = response['Version'] changed = True except (ParamValidationError, ClientError) as e: module.fail_json_aws(e, msg="Trying to create function") # Tag Function if tags is not None: if set_tag(client, module, tags, get_current_function(client, name)): changed = True response = get_current_function(client, name, qualifier=current_version) if not response: module.fail_json(msg='Unable to get function information after creating') module.exit_json(changed=changed, **camel_dict_to_snake_dict(response)) # Delete existing Lambda function if state == 'absent' and current_function: try: if not check_mode: client.delete_function(FunctionName=name) changed = True except (ParamValidationError, ClientError) as e: module.fail_json_aws(e, msg="Trying to delete Lambda function") module.exit_json(changed=changed) # Function already absent, do nothing elif state == 'absent': module.exit_json(changed=changed)
def __init__(self, module): self.module = module region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) self.ecs = boto3_conn(module, conn_type='client', resource='ecs', region=region, endpoint=ec2_url, **aws_connect_kwargs)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( subnet_id=dict(type='str'), eip_address=dict(type='str'), allocation_id=dict(type='str'), if_exist_do_not_create=dict(type='bool', default=False), state=dict(default='present', choices=['present', 'absent']), wait=dict(type='bool', default=False), wait_timeout=dict(type='int', default=320, required=False), release_eip=dict(type='bool', default=False), nat_gateway_id=dict(type='str'), client_token=dict(type='str'), )) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[['allocation_id', 'eip_address']], required_if=[['state', 'absent', ['nat_gateway_id']], ['state', 'present', ['subnet_id']]]) # Validate Requirements if not HAS_BOTO3: module.fail_json(msg='botocore/boto3 is required.') state = module.params.get('state').lower() check_mode = module.check_mode subnet_id = module.params.get('subnet_id') allocation_id = module.params.get('allocation_id') eip_address = module.params.get('eip_address') nat_gateway_id = module.params.get('nat_gateway_id') wait = module.params.get('wait') wait_timeout = module.params.get('wait_timeout') release_eip = module.params.get('release_eip') client_token = module.params.get('client_token') if_exist_do_not_create = module.params.get('if_exist_do_not_create') try: region, ec2_url, aws_connect_kwargs = (get_aws_connection_info( module, boto3=True)) client = (boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)) except botocore.exceptions.ClientError as e: module.fail_json(msg="Boto3 Client Error - " + str(e.msg)) changed = False err_msg = '' if state == 'present': success, changed, err_msg, results = (pre_create( client, subnet_id, allocation_id, eip_address, if_exist_do_not_create, wait, wait_timeout, client_token, check_mode=check_mode)) else: success, changed, err_msg, results = (remove(client, nat_gateway_id, wait, wait_timeout, release_eip, check_mode=check_mode)) if not success: module.fail_json(msg=err_msg, success=success, changed=changed) else: module.exit_json(msg=err_msg, success=success, changed=changed, **results)
def main(): argument_spec = dict(certificate=dict(), certificate_arn=dict(alias=['arn']), certificate_chain=dict(), domain_name=dict(alias=['domain']), name_tag=dict(alias=['name']), private_key=dict(no_log=True), state=dict(default='present', choices=['present', 'absent'])) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) acm = ACMServiceManager(module) # Check argument requirements if module.params['state'] == 'present': if not module.params['certificate']: module.fail_json( msg= "Parameter 'certificate' must be specified if 'state' is specified as 'present'" ) elif module.params['certificate_arn']: module.fail_json( msg= "Parameter 'certificate_arn' is only valid if parameter 'state' is specified as 'absent'" ) elif not module.params['name_tag']: module.fail_json( msg= "Parameter 'name_tag' must be specified if parameter 'state' is specified as 'present'" ) elif not module.params['private_key']: module.fail_json( msg= "Parameter 'private_key' must be specified if 'state' is specified as 'present'" ) else: # absent # exactly one of these should be specified absent_args = ['certificate_arn', 'domain_name', 'name_tag'] if sum([(module.params[a] is not None) for a in absent_args]) != 1: for a in absent_args: module.debug("%s is %s" % (a, module.params[a])) module.fail_json( msg= "If 'state' is specified as 'absent' then exactly one of 'name_tag', certificate_arn' or 'domain_name' must be specified" ) if module.params['name_tag']: tags = dict(Name=module.params['name_tag']) else: tags = None region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) client = boto3_conn(module, conn_type='client', resource='acm', region=region, endpoint=ec2_url, **aws_connect_kwargs) # fetch the list of certificates currently in ACM certificates = acm.get_certificates( client=client, module=module, domain_name=module.params['domain_name'], arn=module.params['certificate_arn'], only_tags=tags) module.debug("Found %d corresponding certificates in ACM" % len(certificates)) if module.params['state'] == 'present': if len(certificates) > 1: msg = "More than one certificate with Name=%s exists in ACM in this region" % module.params[ 'name_tag'] module.fail_json(msg=msg, certificates=certificates) elif len(certificates) == 1: # update the existing certificate module.debug("Existing certificate found in ACM") old_cert = certificates[0] # existing cert in ACM if ('tags' not in old_cert) or ('Name' not in old_cert['tags']) or ( old_cert['tags']['Name'] != module.params['name_tag']): # shouldn't happen module.fail_json( msg="Internal error, unsure which certificate to update", certificate=old_cert) if 'certificate' not in old_cert: # shouldn't happen module.fail_json( msg= "Internal error, unsure what the existing cert in ACM is", certificate=old_cert) # Are the existing certificate in ACM and the local certificate the same? same = True same &= chain_compare(module, old_cert['certificate'], module.params['certificate']) if module.params['certificate_chain']: # Need to test this # not sure if Amazon appends the cert itself to the chain when self-signed same &= chain_compare(module, old_cert['certificate_chain'], module.params['certificate_chain']) else: # When there is no chain with a cert # it seems Amazon returns the cert itself as the chain same &= chain_compare(module, old_cert['certificate_chain'], module.params['certificate']) if same: module.debug( "Existing certificate in ACM is the same, doing nothing") domain = acm.get_domain_of_cert( client=client, module=module, arn=old_cert['certificate_arn']) module.exit_json(certificate=dict( domain_name=domain, arn=old_cert['certificate_arn']), changed=False) else: module.debug( "Existing certificate in ACM is different, overwriting") # update cert in ACM arn = acm.import_certificate( client, module, certificate=module.params['certificate'], private_key=module.params['private_key'], certificate_chain=module.params['certificate_chain'], arn=old_cert['certificate_arn'], tags=tags) domain = acm.get_domain_of_cert(client=client, module=module, arn=arn) module.exit_json(certificate=dict(domain_name=domain, arn=arn), changed=True) else: # len(certificates) == 0 module.debug("No certificate in ACM. Creating new one.") arn = acm.import_certificate( client=client, module=module, certificate=module.params['certificate'], private_key=module.params['private_key'], certificate_chain=module.params['certificate_chain'], tags=tags) domain = acm.get_domain_of_cert(client=client, module=module, arn=arn) module.exit_json(certificate=dict(domain_name=domain, arn=arn), changed=True) else: # state == absent for cert in certificates: acm.delete_certificate(client, module, cert['certificate_arn']) module.exit_json( arns=[cert['certificate_arn'] for cert in certificates], changed=(len(certificates) > 0))