def send_request(self, url_path, http_method, body_params=None, path_params=None, query_params=None): url = construct_url_path(url_path, path_params, query_params) data = json.dumps(body_params) if body_params else None try: self._display(http_method, 'url', url) if data: self._display(http_method, 'data', data) response, response_data = self.connection.send( url, data, method=http_method, headers=BASE_HEADERS) value = self._get_response_value(response_data) self._display(http_method, 'response', value) return { ResponseParams.SUCCESS: True, ResponseParams.STATUS_CODE: response.getcode(), ResponseParams.RESPONSE: self._response_to_json(value) } # Being invoked via JSON-RPC, this method does not serialize and pass HTTPError correctly to the method caller. # Thus, in order to handle non-200 responses, we need to wrap them into a simple structure and pass explicitly. except HTTPError as e: error_msg = to_text(e.read()) self._display(http_method, 'error', error_msg) return { ResponseParams.SUCCESS: False, ResponseParams.STATUS_CODE: e.code, ResponseParams.RESPONSE: self._response_to_json(error_msg) }
def send_request(self, method, params): """ Responsible for actual sending of data to the connection httpapi base plugin. Does some formatting as well. :param params: A formatted dictionary that was returned by self.common_datagram_params() before being called here. :param method: The preferred API Request method (GET, ADD, POST, etc....) :type method: basestring :return: Dictionary of status, if it logged in or not. """ try: if self.sid is None and params[0]["url"] != "sys/login/user": raise FMGBaseException( "An attempt was made to login with the SID None and URL != login url." ) except IndexError: raise FMGBaseException( "An attempt was made at communicating with a FMG with " "no valid session and an incorrectly formatted request.") except Exception: raise FMGBaseException( "An attempt was made at communicating with a FMG with " "no valid session and an unexpected error was discovered.") self._update_request_id() json_request = { "method": method, "params": params, "session": self.sid, "id": self.req_id, "verbose": 1 } data = json.dumps(json_request, ensure_ascii=False).replace('\\\\', '\\') try: # Sending URL and Data in Unicode, per Ansible Specifications for Connection Plugins response, response_data = self.connection.send( path=to_text(self._url), data=to_text(data), headers=BASE_HEADERS) # Get Unicode Response - Must convert from StringIO to unicode first so we can do a replace function below result = json.loads(to_text(response_data.getvalue())) self._update_self_from_response(result, self._url, data) return self._handle_response(result) except Exception as err: raise FMGBaseException(err)
def destroy_bucket(s3_client, module): force = module.params.get("force") name = module.params.get("name") try: bucket_is_present = bucket_exists(s3_client, name) except EndpointConnectionError as e: module.fail_json_aws(e, msg="Invalid endpoint provided: %s" % to_text(e)) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Failed to check bucket presence") if not bucket_is_present: module.exit_json(changed=False) if force: # if there are contents then we need to delete them (including versions) before we can delete the bucket try: for key_version_pairs in paginated_versions_list(s3_client, Bucket=name): formatted_keys = [{ 'Key': key, 'VersionId': version } for key, version in key_version_pairs] for fk in formatted_keys: # remove VersionId from cases where they are `None` so that # unversioned objects are deleted using `DeleteObject` # rather than `DeleteObjectVersion`, improving backwards # compatibility with older IAM policies. if not fk.get('VersionId'): fk.pop('VersionId') if formatted_keys: resp = s3_client.delete_objects( Bucket=name, Delete={'Objects': formatted_keys}) if resp.get('Errors'): module.fail_json( msg= 'Could not empty bucket before deleting. Could not delete objects: {0}' .format(', '.join( [k['Key'] for k in resp['Errors']])), errors=resp['Errors'], response=resp) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Failed while deleting bucket") try: delete_bucket(s3_client, name) s3_client.get_waiter('bucket_not_exists').wait(Bucket=name, WaiterConfig=dict( Delay=5, MaxAttempts=60)) except WaiterError as e: module.fail_json_aws( e, msg='An error occurred waiting for the bucket to be deleted.') except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Failed to delete bucket") module.exit_json(changed=True)
def decode_rules_as_hcl_string(rules_as_hcl): """ Converts the given HCL (string) representation of rules into a list of rule domain models. :param rules_as_hcl: the HCL (string) representation of a collection of rules :return: the equivalent domain model to the given rules """ rules_as_hcl = to_text(rules_as_hcl) rules_as_json = hcl.loads(rules_as_hcl) return decode_rules_as_json(rules_as_json)
def get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=False): if s3_url and rgw: # TODO - test this rgw = urlparse(s3_url) params = dict(module=module, conn_type='client', resource='s3', use_ssl=rgw.scheme == 'https', region=location, endpoint=s3_url, **aws_connect_kwargs) elif is_fakes3(s3_url): fakes3 = urlparse(s3_url) port = fakes3.port if fakes3.scheme == 'fakes3s': protocol = "https" if port is None: port = 443 else: protocol = "http" if port is None: port = 80 params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint="%s://%s:%s" % (protocol, fakes3.hostname, to_text(port)), use_ssl=fakes3.scheme == 'fakes3s', **aws_connect_kwargs) else: params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=s3_url, **aws_connect_kwargs) if module.params['mode'] == 'put' and module.params[ 'encryption_mode'] == 'aws:kms': params['config'] = botocore.client.Config(signature_version='s3v4') elif module.params['mode'] in ('get', 'getstr') and sig_4: params['config'] = botocore.client.Config(signature_version='s3v4') if module.params['dualstack']: dualconf = botocore.client.Config( s3={'use_dualstack_endpoint': True}) if 'config' in params: params['config'] = params['config'].merge(dualconf) else: params['config'] = dualconf return boto3_conn(**params)
def send_request(self, request_method, path, payload=None): data = json.dumps(payload) if payload else '{}' try: self._display_request(request_method) response, response_data = self.connection.send( path, payload, method=request_method, headers=BASE_HEADERS, force_basic_auth=True) value = self._get_response_value(response_data) return response.getcode(), self._response_to_json(value) except AnsibleConnectionFailure as e: if to_text('401') in to_text(e): return 401, 'Authentication failure' else: return 404, 'Object not found' except HTTPError as e: error = json.loads(e.read()) return e.code, error
def _send_service_request(self, path, error_msg_prefix, data=None, **kwargs): try: self._ignore_http_errors = True return self.connection.send(path, data, **kwargs) except HTTPError as e: # HttpApi connection does not read the error response from HTTPError, so we do it here and wrap it up in # ConnectionError, so the actual error message is displayed to the user. error_msg = self._response_to_json(to_text(e.read())) raise ConnectionError('%s: %s' % (error_msg_prefix, error_msg), http_code=e.code) finally: self._ignore_http_errors = False
def send_request(self, **message_kwargs): """ Responsible for actual sending of data to the connection httpapi base plugin. :param message_kwargs: A formatted dictionary containing request info: url, data, method :return: Status code and response data. """ url = message_kwargs.get('url', '/') data = message_kwargs.get('data', '') method = message_kwargs.get('method', 'GET') try: response, response_data = self.connection.send(url, data, method=method) response_status = None if hasattr(response, 'status'): response_status = response.status else: response_status = response.headers.status return response_status, to_text(response_data.getvalue()) except Exception as err: raise Exception(err)
def get_s3_client(module, aws_connect_kwargs, location, ceph, s3_url): if s3_url and ceph: # TODO - test this ceph = urlparse(s3_url) params = dict(module=module, conn_type='client', resource='s3', use_ssl=ceph.scheme == 'https', region=location, endpoint=s3_url, **aws_connect_kwargs) elif is_fakes3(s3_url): fakes3 = urlparse(s3_url) port = fakes3.port if fakes3.scheme == 'fakes3s': protocol = "https" if port is None: port = 443 else: protocol = "http" if port is None: port = 80 params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint="%s://%s:%s" % (protocol, fakes3.hostname, to_text(port)), use_ssl=fakes3.scheme == 'fakes3s', **aws_connect_kwargs) else: params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=s3_url, **aws_connect_kwargs) return boto3_conn(**params)
def _get_response_value(self, response_data): return to_text(response_data.getvalue())
def create_or_update_bucket(s3_client, module, location): policy = module.params.get("policy") name = module.params.get("name") requester_pays = module.params.get("requester_pays") tags = module.params.get("tags") purge_tags = module.params.get("purge_tags") versioning = module.params.get("versioning") encryption = module.params.get("encryption") encryption_key_id = module.params.get("encryption_key_id") changed = False result = {} try: bucket_is_present = bucket_exists(s3_client, name) except EndpointConnectionError as e: module.fail_json_aws(e, msg="Invalid endpoint provided: %s" % to_text(e)) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Failed to check bucket presence") if not bucket_is_present: try: bucket_changed = create_bucket(s3_client, name, location) s3_client.get_waiter('bucket_exists').wait(Bucket=name) changed = changed or bucket_changed except WaiterError as e: module.fail_json_aws( e, msg= 'An error occurred waiting for the bucket to become available') except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Failed while creating bucket") # Versioning try: versioning_status = get_bucket_versioning(s3_client, name) except BotoCoreError as exp: module.fail_json_aws(exp, msg="Failed to get bucket versioning") except ClientError as exp: if exp.response['Error'][ 'Code'] != 'NotImplemented' or versioning is not None: module.fail_json_aws(exp, msg="Failed to get bucket versioning") else: if versioning is not None: required_versioning = None if versioning and versioning_status.get('Status') != "Enabled": required_versioning = 'Enabled' elif not versioning and versioning_status.get( 'Status') == "Enabled": required_versioning = 'Suspended' if required_versioning: try: put_bucket_versioning(s3_client, name, required_versioning) changed = True except (BotoCoreError, ClientError) as e: module.fail_json_aws( e, msg="Failed to update bucket versioning") versioning_status = wait_versioning_is_applied( module, s3_client, name, required_versioning) # This output format is there to ensure compatibility with previous versions of the module result['versioning'] = { 'Versioning': versioning_status.get('Status', 'Disabled'), 'MfaDelete': versioning_status.get('MFADelete', 'Disabled'), } # Requester pays try: requester_pays_status = get_bucket_request_payment(s3_client, name) except BotoCoreError as exp: module.fail_json_aws(exp, msg="Failed to get bucket request payment") except ClientError as exp: if exp.response['Error'][ 'Code'] != 'NotImplemented' or requester_pays is not None: module.fail_json_aws(exp, msg="Failed to get bucket request payment") else: if requester_pays: payer = 'Requester' if requester_pays else 'BucketOwner' if requester_pays_status != payer: put_bucket_request_payment(s3_client, name, payer) requester_pays_status = wait_payer_is_applied( module, s3_client, name, payer, should_fail=False) if requester_pays_status is None: # We have seen that it happens quite a lot of times that the put request was not taken into # account, so we retry one more time put_bucket_request_payment(s3_client, name, payer) requester_pays_status = wait_payer_is_applied( module, s3_client, name, payer, should_fail=True) changed = True result['requester_pays'] = requester_pays # Policy try: current_policy = get_bucket_policy(s3_client, name) except BotoCoreError as exp: module.fail_json_aws(exp, msg="Failed to get bucket policy") except ClientError as exp: if exp.response['Error'][ 'Code'] != 'NotImplemented' or policy is not None: module.fail_json_aws(exp, msg="Failed to get bucket policy") else: if policy is not None: if isinstance(policy, string_types): policy = json.loads(policy) if not policy and current_policy: try: delete_bucket_policy(s3_client, name) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Failed to delete bucket policy") current_policy = wait_policy_is_applied( module, s3_client, name, policy) changed = True elif compare_policies(current_policy, policy): try: put_bucket_policy(s3_client, name, policy) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Failed to update bucket policy") current_policy = wait_policy_is_applied(module, s3_client, name, policy, should_fail=False) if current_policy is None: # As for request payement, it happens quite a lot of times that the put request was not taken into # account, so we retry one more time put_bucket_policy(s3_client, name, policy) current_policy = wait_policy_is_applied(module, s3_client, name, policy, should_fail=True) changed = True result['policy'] = current_policy # Tags try: current_tags_dict = get_current_bucket_tags_dict(s3_client, name) except BotoCoreError as exp: module.fail_json_aws(exp, msg="Failed to get bucket tags") except ClientError as exp: if exp.response['Error'][ 'Code'] != 'NotImplemented' or tags is not None: module.fail_json_aws(exp, msg="Failed to get bucket tags") else: if tags is not None: # Tags are always returned as text tags = dict((to_text(k), to_text(v)) for k, v in tags.items()) if not purge_tags: # Ensure existing tags that aren't updated by desired tags remain current_copy = current_tags_dict.copy() current_copy.update(tags) tags = current_copy if current_tags_dict != tags: if tags: try: put_bucket_tagging(s3_client, name, tags) except (BotoCoreError, ClientError) as e: module.fail_json_aws( e, msg="Failed to update bucket tags") else: if purge_tags: try: delete_bucket_tagging(s3_client, name) except (BotoCoreError, ClientError) as e: module.fail_json_aws( e, msg="Failed to delete bucket tags") current_tags_dict = wait_tags_are_applied( module, s3_client, name, tags) changed = True result['tags'] = current_tags_dict # Encryption if hasattr(s3_client, "get_bucket_encryption"): try: current_encryption = get_bucket_encryption(s3_client, name) except (ClientError, BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to get bucket encryption") elif encryption is not None: module.fail_json( msg="Using bucket encryption requires botocore version >= 1.7.41") if encryption is not None: current_encryption_algorithm = current_encryption.get( 'SSEAlgorithm') if current_encryption else None current_encryption_key = current_encryption.get( 'KMSMasterKeyID') if current_encryption else None if encryption == 'none' and current_encryption_algorithm is not None: try: delete_bucket_encryption(s3_client, name) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Failed to delete bucket encryption") current_encryption = wait_encryption_is_applied( module, s3_client, name, None) changed = True elif encryption != 'none' and ( encryption != current_encryption_algorithm) or ( encryption == 'aws:kms' and current_encryption_key != encryption_key_id): expected_encryption = {'SSEAlgorithm': encryption} if encryption == 'aws:kms' and encryption_key_id is not None: expected_encryption.update( {'KMSMasterKeyID': encryption_key_id}) try: put_bucket_encryption(s3_client, name, expected_encryption) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Failed to set bucket encryption") current_encryption = wait_encryption_is_applied( module, s3_client, name, expected_encryption) changed = True result['encryption'] = current_encryption module.exit_json(changed=changed, name=name, **result)
def download_s3str(module, s3, bucket, obj, version=None, validate=True): if module.check_mode: module.exit_json(msg="GET operation skipped - running in check mode", changed=True) try: if version: contents = to_native( s3.get_object(Bucket=bucket, Key=obj, VersionId=version)["Body"].read()) else: contents = to_native( s3.get_object(Bucket=bucket, Key=obj)["Body"].read()) module.exit_json(msg="GET operation complete", contents=contents, changed=True) except botocore.exceptions.ClientError as e: if e.response['Error'][ 'Code'] == 'InvalidArgument' and 'require AWS Signature Version 4' in to_text( e): raise Sigv4Required() else: module.fail_json_aws( e, msg="Failed while getting contents of object %s as a string." % obj) except botocore.exceptions.BotoCoreError as e: module.fail_json_aws( e, msg="Failed while getting contents of object %s as a string." % obj)
def download_s3file(module, s3, bucket, obj, dest, retries, version=None): if module.check_mode: module.exit_json(msg="GET operation skipped - running in check mode", changed=True) # retries is the number of loops; range/xrange needs to be one # more to get that count of loops. try: if version: key = s3.get_object(Bucket=bucket, Key=obj, VersionId=version) else: key = s3.get_object(Bucket=bucket, Key=obj) except botocore.exceptions.ClientError as e: if e.response['Error'][ 'Code'] == 'InvalidArgument' and 'require AWS Signature Version 4' in to_text( e): raise Sigv4Required() elif e.response['Error']['Code'] not in ("403", "404"): # AccessDenied errors may be triggered if 1) file does not exist or 2) file exists but # user does not have the s3:GetObject permission. 404 errors are handled by download_file(). module.fail_json_aws(e, msg="Could not find the key %s." % obj) except botocore.exceptions.BotoCoreError as e: module.fail_json_aws(e, msg="Could not find the key %s." % obj) optional_kwargs = {'ExtraArgs': {'VersionId': version}} if version else {} for x in range(0, retries + 1): try: s3.download_file(bucket, obj, dest, **optional_kwargs) module.exit_json(msg="GET operation complete", changed=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # actually fail on last pass through the loop. if x >= retries: module.fail_json_aws(e, msg="Failed while downloading %s." % obj) # otherwise, try again, this may be a transient timeout. except SSLError as e: # will ClientError catch SSLError? # actually fail on last pass through the loop. if x >= retries: module.fail_json_aws(e, msg="s3 download failed")