def replace_aws_resources(event, context, target_bucket, files, aws_resources): """ Replace all placeholders at deployment time with the newly created resources. Then sends the files to the S3 website bucket """ print(f'Setting up AWS resources to the admin website') try: for webSiteFile in files: with open(webSiteFile) as f: content = f.read() content = content.replace("REPLACE_AWS_REGION", aws_resources['aws_region']) content = content.replace("REPLACE_USER_POOL_ID", aws_resources['user_pool_id']) content = content.replace("REPLACE_APP_CLIENT_ID", aws_resources['app_client_id']) content = content.replace("REPLACE_IDENTITY_POOL_ID", aws_resources['identity_pool_id']) content = content.replace("REPLACE_PINPOINT_APP_ID", aws_resources['pinpoint_app_id']) content = content.replace("REPLACE_APPSYNC_ENDPOINT", aws_resources['appsync_endpoint']) encoded_string = content.encode("utf-8") website_key = os.path.relpath(webSiteFile, '/tmp/website-contents') guessed_mime_type = mimetypes.guess_type(webSiteFile) if website_key.startswith('../'): file_key = website_key[len('../'):] else: file_key = website_key if guessed_mime_type is None: raise Exception("Failed to guess mimetype") mime_type = guessed_mime_type[0] if mime_type is None: mime_type = 'binary/octet-stream' s3.Bucket(target_bucket).put_object(Key=file_key, Body=encoded_string, ContentType=mime_type) print(f'{file_key} uploaded to {target_bucket}') print( f'AWS Resources set and deployed successfully to {target_bucket} bucket' ) except ClientError as ex: print(f'Target Bucket {target_bucket} with error: {ex}') cfnResponse.send(event, context, cfnResponse.FAILED, {}, "CustomResourcePhysicalID")
def deploy_website_to_target_bucket(event, context, target_bucket, files): """ Deploys the website files into the S3 website bucket """ print(f'Starting admin website deployment to {target_bucket} bucket') try: for webSiteFile in files: with open(webSiteFile) as f: content = f.read() encoded_string = content.encode("utf-8") website_key = os.path.relpath(webSiteFile, '/tmp/website-contents') guessed_mime_type = mimetypes.guess_type(webSiteFile) if website_key.startswith('../'): file_key = website_key[len('../'):] else: file_key = website_key print('Key being uploaded to S3: ' + file_key) if guessed_mime_type is None: raise Exception("Failed to guess mimetype") mime_type = guessed_mime_type[0] if mime_type is None: mime_type = 'binary/octet-stream' s3.Bucket(target_bucket).put_object(Key=file_key, Body=encoded_string, ContentType=mime_type) print(f'{file_key} uploaded to {target_bucket}') print(f'Admin website deployed successfully to {target_bucket} bucket') except ClientError as ex: print(f'Target Bucket {target_bucket} with error: {ex}') cfnResponse.send(event, context, cfnResponse.FAILED, {}, "CustomResourcePhysicalID")
def get_website_content_from_origin_bucket(event, context, origin_bucket, origin_prefix): """ Gets the website raw content and stores in the Lambda tmp directory to be processed """ print(f'Getting website files from {origin_bucket} bucket') try: key = 'website-contents.zip' full_key = origin_prefix + key tmp_dir = '/tmp/' local_file_name = tmp_dir + key s3.Bucket(origin_bucket).download_file(full_key, local_file_name) print(f'File {key} downloaded to {local_file_name}') print(f'Extracting file {key} to {tmp_dir}') with zipfile.ZipFile(local_file_name, 'r') as zip_ref: zip_ref.extractall(tmp_dir) print(f'Deleting {local_file_name}') os.remove(local_file_name) files = [] files_to_replace = [] for r, d, f in os.walk(tmp_dir): for file in f: if file.startswith('main') and all( x in file for x in 'js') and '.ico' not in file: files_to_replace.append(os.path.join(r, file)) elif '.ico' not in file: files.append(os.path.join(r, file)) return files, files_to_replace except ClientError as ex: print(f'Origin Bucket {origin_bucket} with error: {ex}') cfnResponse.send(event, context, cfnResponse.FAILED, {}, "CustomResourcePhysicalID")
def handler(event, context): """ Main handler to control wether to process the Custom Resource in the event of a stack creation or deletion. """ print('request: {}'.format(json.dumps(event, indent = 4))) requests = event['ResourceProperties']['Requests'][0] stack_parameters = { 'aws_region': requests['awsRegion'], 'lambda_name': requests['lambdaName'], 'ssm_parameter_name': requests['ssmParamName'], 'role_name': requests['lambdaRoleName'], 'policy_name': requests['lambdaPolicyName'], 'account_id': client_sts.get_caller_identity()['Account'], 'app_sync_endpoint': requests['appSyncEndpoint'] } if event['RequestType'] == 'Create': print('Creating the Stack...') handle_create( event = event, context = context, stack_parameters = stack_parameters ) elif event['RequestType'] == 'Delete': print('Deleting the Stack...') handle_delete( event = event, context = context, stack_parameters = stack_parameters ) else: print('Updating Stack. <No implementation>') cfnResponse.send(event, context, cfnResponse.SUCCESS, {}, "LambdaEdgeCustomResourcePhysicalID")
def handle_delete(event, context, stack_parameters): """ When the stack is being deleted, this function will delete all resources previously created by this function in the Create event. """ try: aws_region = stack_parameters['aws_region'] lambda_name = stack_parameters['lambda_name'] ssm_parameter_name = stack_parameters['ssm_parameter_name'] account_id = stack_parameters['account_id'] role_name = stack_parameters['role_name'] policy_name = stack_parameters['policy_name'] response_detach = iam_client.detach_role_policy( RoleName = role_name, PolicyArn = f'arn:aws:iam::{account_id}:policy/{policy_name}' ) if response_detach['ResponseMetadata']['HTTPStatusCode'] == 200: response_role = iam_client.delete_role( RoleName = role_name ) if response_role['ResponseMetadata']['HTTPStatusCode'] == 200: response_policy = iam_client.delete_policy( PolicyArn = f'arn:aws:iam::{account_id}:policy/{policy_name}' ) if response_policy['ResponseMetadata']['HTTPStatusCode'] == 200: response_ssm = ssm_client.delete_parameter( Name = ssm_parameter_name ) if response_ssm['ResponseMetadata']['HTTPStatusCode'] == 200: cfnResponse.send(event, context, cfnResponse.SUCCESS, {}, "LambdaEdgeCustomResourcePhysicalID") else: cfnResponse.send(event, context, cfnResponse.FAILED, {}, "LambdaEdgeCustomResourcePhysicalID") else: cfnResponse.send(event, context, cfnResponse.FAILED, {}, "LambdaEdgeCustomResourcePhysicalID") else: cfnResponse.send(event, context, cfnResponse.FAILED, {}, "LambdaEdgeCustomResourcePhysicalID") else: cfnResponse.send(event, context, cfnResponse.FAILED, {}, "LambdaEdgeCustomResourcePhysicalID") except ClientError as ex: print(f'Error deploying Lambda Edge in us-east-1 with error: {ex}') cfnResponse.send(event, context, cfnResponse.FAILED, {}, "LambdaEdgeCustomResourcePhysicalID")
def handle_create(event, context, stack_parameters): """ Creates a lambda function and all the resources related. """ try: aws_region = stack_parameters['aws_region'] lambda_name = stack_parameters['lambda_name'] ssm_parameter_name = stack_parameters['ssm_parameter_name'] account_id = stack_parameters['account_id'] role_name = stack_parameters['role_name'] policy_name = stack_parameters['policy_name'] app_sync_endpoint = stack_parameters['app_sync_endpoint'] response_ssm = ssm_client.put_parameter( Name = ssm_parameter_name, Value = aws_region, Type = 'String' ) trust_policy={ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Principal": { "Service": [ "edgelambda.amazonaws.com", "lambda.amazonaws.com" ] }, "Action": "sts:AssumeRole" } ] } managed_policy = { "Version": "2012-10-17", "Statement": [ { "Sid": "AllowSSMParameter", "Effect": "Allow", "Action": "ssm:GetParameter*", "Resource": f'arn:aws:ssm:us-east-1:{account_id}:parameter/{ssm_parameter_name}*' }, { "Effect": "Allow", "Action": "logs:CreateLogGroup", "Resource": f'arn:aws:logs:us-east-1:{account_id}:*' }, { "Effect": "Allow", "Action": [ "logs:CreateLogStream", "logs:PutLogEvents" ], "Resource": [ f'arn:aws:logs:us-east-1:{account_id}:log-group:/aws/lambda/{lambda_name}:*' ] } ] } response_iam = iam_client.create_role( RoleName = role_name, Path = '/service-role/', AssumeRolePolicyDocument = json.dumps(trust_policy), Description = 'Execution role for Lambda edge function' ) response = iam_client.create_policy( PolicyName = policy_name, PolicyDocument = json.dumps(managed_policy) ) iam_client.attach_role_policy( PolicyArn = f'arn:aws:iam::{account_id}:policy/{policy_name}', RoleName = role_name ) time.sleep(10) function_code_replaced = function_code.replace("REPLACE_REGION_SSM", ssm_parameter_name) function_code_replaced = function_code_replaced.replace("REPLACE_APPSYNC_ENDPOINT", app_sync_endpoint) zf = zipfile.ZipFile('/tmp/function.zip', mode='w', compression=zipfile.ZIP_DEFLATED) info = zipfile.ZipInfo('index.js') info.external_attr = 0o664 << 16 zf.writestr(info, function_code_replaced) zf.close() with open('/tmp/function.zip', 'rb') as f: code = f.read() response = lambda_client.create_function( FunctionName = lambda_name, Runtime = "nodejs12.x", Role = f'arn:aws:iam::{account_id}:role/service-role/{role_name}', Handler = "index.handler", Code = { 'ZipFile': code } ) response_lambda = lambda_client.publish_version( FunctionName = lambda_name ) version_arn = response_lambda['FunctionArn'] print(f'Lambda {version_arn} created properly in us-east-1 region') lambda_arn = { 'lambdaArn': version_arn } cfnResponse.send(event, context, cfnResponse.SUCCESS, lambda_arn, "LambdaEdgeCustomResourcePhysicalID") except ClientError as ex: print(f'Error deploying Lambda Edge in us-east-1 with error: {ex}') cfnResponse.send(event, context, cfnResponse.FAILED, {}, "LambdaEdgeCustomResourcePhysicalID")
def handler(event, context): """ Main handler to control wether to process the Custom Resource in the event of a stack creation or deletion. """ print('request: {}'.format(json.dumps(event, indent=4))) requests = event['ResourceProperties']['Requests'][0] origin_bucket = requests['originBucket'] origin_prefix = requests['originPrefix'] website_bucket = requests['websiteBucket'] print('Bucket Origin: ' + origin_bucket) print('Bucket Prefix: ' + origin_prefix) print('Bucket Target: ' + website_bucket) if event['RequestType'] == 'Create': print('Creating the Stack...') aws_resources = { 'aws_region': os.environ['REGION'], 'user_pool_id': requests['userPoolId'], 'app_client_id': requests['appClientId'], 'identity_pool_id': requests['identityPoolId'], 'pinpoint_app_id': requests['pinpointAppId'], 'appsync_endpoint': requests['appSyncEndpoint'] } content, content_to_replace = get_website_content_from_origin_bucket( event=event, context=context, origin_bucket=origin_bucket, origin_prefix=origin_prefix) deploy_website_to_target_bucket(event=event, context=context, target_bucket=website_bucket, files=content) replace_aws_resources(event=event, context=context, target_bucket=website_bucket, files=content_to_replace, aws_resources=aws_resources) cfnResponse.send(event, context, cfnResponse.SUCCESS, {}, "CustomResourcePhysicalID") elif event['RequestType'] == 'Delete': print('Deleting Stack. <No implementation>') cfnResponse.send(event, context, cfnResponse.SUCCESS, {}, "CustomResourcePhysicalID") ''' # In case you want to clean up the website bucket during deletion. Default behavior is to # keep the s3 bucket and its contents. try: print('Deleting the Stack...') bucket = s3.Bucket(website_bucket) if is_bucket_empty(bucket): print(f'Bucket {website_bucket} is empty. No need to clean up') else: bucket.objects.all().delete() print (f'Bucket {website_bucket} was cleaned up with success') cfnResponse.send(event, context, cfnResponse.SUCCESS, {}, "CustomResourcePhysicalID") except ClientError as ex: print(f'Target Bucket {website_bucket} with error: {ex}') cfnResponse.send(event, context, cfnResponse.FAILED, {}, "CustomResourcePhysicalID") ''' else: print('Updating Stack. <No implementation>') cfnResponse.send(event, context, cfnResponse.SUCCESS, {}, "CustomResourcePhysicalID")