def handler(event, context): timer = threading.Timer((context.get_remaining_time_in_millis() / 1000.00) - 0.5, timeout, args=[event, context]) timer.start() print('Received event: %s' % json.dumps(event)) status = cfnresponse.SUCCESS reason = None physical_id = None try: if event['RequestType'] != 'Delete': data = s3_client.get_object(Bucket=event['ResourceProperties']['Bucket'], Key=event['ResourceProperties']['Key'])['Body'].read() try: slots = json.loads(data) except Exception as e: logging.error('Exception: %s' % e, exc_info=True) raise Exception('Intent json is malformed') if type(slots) != list: raise Exception('JSON must be a list of one of more Slots') for s in slots: create_custom_slot_type(s) physical_id = ','.join([s['name'] for i in slots]) else: for s in event['PhysicalResourceId'].split(','): delete_custom_slot_type(s) except Exception as e: logging.error('Exception: %s' % e, exc_info=True) status = cfnresponse.FAILED reason = str(e) finally: timer.cancel() cfnresponse.send(event, context, status, {}, physical_id, reason)
def handler(event, context): timer = threading.Timer((context.get_remaining_time_in_millis() / 1000.00) - 0.5, timeout, args=[event, context]) timer.start() print('Received event: %s' % json.dumps(event)) status = cfnresponse.SUCCESS reason = None physical_id = None try: data = s3_client.get_object(Bucket=event['ResourceProperties']['Bucket'], Key=event['ResourceProperties']['Key'])['Body'].read() try: bot = json.loads(data) except Exception as e: logging.error('Exception: %s' % e, exc_info=True) raise Exception('Intent json is malformed') if event['RequestType'] != 'Delete': create_bot(bot) physical_id = bot['name'] else: delete_bot(event['PhysicalResourceId']) except Exception as e: logging.error('Exception: %s' % e, exc_info=True) status = cfnresponse.FAILED reason = str(e) finally: timer.cancel() cfnresponse.send(event, context, status, {}, physical_id, reason)
def handler(event, context): timer = threading.Timer((context.get_remaining_time_in_millis() / 1000.00) - 0.5, timeout, args=[event, context]) timer.start() print('Received event: %s' % json.dumps(event)) status = cfnresponse.SUCCESS reason = None physical_id = None try: service_name = event['ResourceProperties']['ServiceName'] if 'CustomSuffix' in event['ResourceProperties'].keys(): custom_suffix = event['ResourceProperties']['CustomSuffix'] else: custom_suffix = None if event['RequestType'] != 'Delete': physical_id = create_role(service_name, custom_suffix) else: physical_id = event['PhysicalResourceId'] delete_role(physical_id) except Exception as e: logging.error('Exception: %s' % e, exc_info=True) status = cfnresponse.FAILED reason = str(e) finally: timer.cancel() cfnresponse.send(event, context, status, {'Arn': physical_id}, physical_id, reason)
def handler(event, context): logger.info('event: {}'.format(cfnresponse.json_dump_format(event))) request_type = event.get('RequestType') resource_properties = event.get('ResourceProperties') response_status = cfnresponse.SUCCESS response = {} response_id = event.get('RequestId') reason = '' error = '' if (request_type == 'Delete'): try: buckets = get_buckets_from_properties(resource_properties) delete_buckets(buckets) logger.info('delete_buckets completed') reason = 'Delete' except Exception as e: error = 'failed to delete buckets: {}'.format(e) pass if error: logger.error(error) response_status = cfnresponse.FAILED reason = error cfnresponse.send( event, context, response_status, response, response_id, reason )
def main(event, context): request_type = event.get('RequestType') if not request_type: send(event, context, FAILED, reason='RequestType must be provided') elif request_type == 'Delete': send(event, context, SUCCESS, reason='Nothing to do upon delete') else: do_attach_detach_elbs(event, context)
def send_status(self, PASS_OR_FAIL): send( self.event, self.context, PASS_OR_FAIL, reason=self.reason, response_data=self.response_data )
def handler(event, context): ec2 = boto3.client('ec2') resp = ec2.describe_availability_zones() az_list = [az.get('ZoneName') for az in resp.get('AvailabilityZones') if az.get('State') == 'available'] data = {'AZ0': az_list[0], 'AZ1': az_list[1]} cfnresponse.send(event, context, cfnresponse.SUCCESS, response_data=data) return resp
def handler(event, context): """ CloudFormation Custom Resource Lambda Handler """ import cfnresponse logger.info('event: {}'.format(cfnresponse.json_dump_format(event))) request_type = event.get('RequestType') resource_properties = event.get('ResourceProperties') response_status = cfnresponse.SUCCESS response = {} response_id = event.get('RequestId') reason = request_type error = '' name_prefix = resource_properties.get('NamePrefix') should_delete = resource_properties.get('ShouldDelete', True) bot_definition = read_bot_definition_file(BOT_DEFINITION_FILENAME) bot_definition_prefix = add_prefix(bot_definition, name_prefix) if (request_type in ['Create', 'Update']): try: response_import = import_bot(bot_definition=bot_definition_prefix) response['BotName'] = response_import['bot']['name'] except Exception as e: error = 'failed to {} bot: {}'.format(request_type, e) pass if (request_type == 'Delete' and should_delete != 'false'): try: bot_definition = read_bot_definition_file() bot_name = name_prefix + bot_definition['bot']['name'] delete_bot(bot_name) except Exception as e: error = 'failed to delete bot: {}'.format(e) pass if error: logger.error(error) response_status = cfnresponse.FAILED reason = error if bool(context): cfnresponse.send( event, context, response_status, response, response_id, reason )
def lambda_handler(event, context): attachment = zone_attach(event, context) if event['RequestType'] == 'Delete': attachment.delete() return if event['RequestType'] == 'Create': attachment.create() return if event['RequestType'] == 'Update': attachment.update() return logger.info("Received event: " + json.dumps(event, indent=2)) if context: send(event, context, FAILED, reason="Unknown Request Type %s" % event['RequestType'])
def handler(event, context, sleep=5): # We don't want to delete versions when CloudFormation says so, because # we want to keep them forever. If this delete comes from a stack delete # operation, deleteing the lambda will delete all related versions. if event['RequestType'] == 'Delete': send(event, context, SUCCESS) return output = publish_version(function_name=event['ResourceProperties']['FunctionName']) # Wait a bit until the version becomes available. # FUTURE: Loop until available time.sleep(sleep) send(event, context, SUCCESS, response_data={'Version': output['Version']})
def handler(event, context): print("Received request:", json.dumps(event, indent=4)) action = event["RequestType"] stack = event["ResourceProperties"]["StackName"] resources = int(event["ResourceProperties"]["ResourceCount"]) try: log(stack, action, 1) if action == "Create": log(stack, "ResourceCount", resources) cfnresponse.send(event, context, cfnresponse.SUCCESS, {}, "{} metrics".format(stack)) except Exception as e: cfnresponse.send(event, context, cfnresponse.FAILED, { "Data": str(e), }, "{} metrics".format(stack))
def handler(event, context): logger.info("Received event: %s" % json.dumps(event)) source_bucket = event['ResourceProperties']['SourceBucket'] source_prefix = event['ResourceProperties'].get('SourcePrefix') or '' bucket = event['ResourceProperties']['Bucket'] prefix = event['ResourceProperties'].get('Prefix') or '' result = cfnresponse.SUCCESS try: if event['RequestType'] == 'Create' or event['RequestType'] == 'Update': result = copy_objects(source_bucket, source_prefix, bucket, prefix) elif event['RequestType'] == 'Delete': result = delete_objects(bucket, prefix) except ClientError as e: logger.error('Error: %s', e) result = cfnresponse.FAILED cfnresponse.send(event, context, result, {})
def handler(event, context): client = boto3.client('logs') responseStatus = cfnresponse.SUCCESS responseData = {} if event['RequestType'] == 'Create' or event['RequestType'] == 'Update': groupName = '/aws/lambda/%s' % event['ResourceProperties']['FunctionName'] try: response = client.create_log_group(logGroupName=groupName) responseData['LogGroupName'] = groupName except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == 'ResourceAlreadyExistsException': responseData['LogGroupName'] = groupName else: responseData['Error'] = e.response['Error']['Message'] responseStatus = cfnresponse.FAILED cfnresponse.send(event, context, responseStatus, responseData)
def test_cfn_send_success(self, open_mock): open_mock.return_value = Mock() open_mock.return_value.msg = 'OK' open_mock.return_value.getcode.return_value = 200 response = send( event=self._event(), context=Mock(log_stream_name='log_stream_name'), response_status='response_status', response_data='response_data', ) self.assertTrue(response)
def handler(event, context): logger.info('got event{}'.format(event)) if event['RequestType'] == 'Delete': send(event, context, SUCCESS) return response = { 'StackId': event['StackId'], 'RequestId': event['RequestId'], 'LogicalResourceId': event['LogicalResourceId'], 'RequestId': event['RequestId'], 'Status': 'SUCCESS', 'Data': { 'OutputName1': 'Value1', 'OutputName2': 'Value2', } } send(response, context, SUCCESS) return response
def delete_queues_handler(event, context): """lambda_handler is the entrypoint of this "function".""" logger.info("Request: %s", json.dumps(event)) # In most other contexts that Lambda is used, the return value is # immediately available. However, CloudFormation doesn't use those standard # return values, and instead wants the agent to hit a URL to indicate # success or failure. If CloudFormation does not receive an explicit # notification, it can only rely on timeouts to detect failure, and that # can waste human time. # # We wrap all the logic in a try/except so that we always try to send # CloudFormation a status message. try: sqs_client = boto3.client('sqs') handler(event, context, sqs_client) send(event, context, SUCCESS) except Exception, e: logger.error("Exception: %r", e) # There is no point in trying to notify if we for some reason already # failed to notify. CloudFormation will eventually time out. if not isinstance(e, urllib2.URLError): send(event, context, FAILED) raise
def test_cfn_send_error(self, open_mock): class MockHTTPError(urllib2.HTTPError): def __init__(self, code=503): self.code = code open_mock.side_effect = MockHTTPError response = send( event=self._event(), context=Mock(log_stream_name='log_stream_name'), response_status='response_status', response_data='response_data', ) self.assertFalse(response)
def handler(event, context): print("Received Event: " + json.dumps(event, indent=2)) if event['RequestType'] == 'Delete': # Implement Delete Operation send(event, context, SUCCESS) return if event['RequestType'] == 'Update': # Implement Update Operation pass if event['RequestType'] == 'Create': # Implement Create Operation pass # You'll be able to use Fn::GetAtt to query values returned as part # of the response_data dictionary. # "Fn::GetAtt": [ # "YouResource", # "SomeProperty" # ] send(event, context, SUCCESS, response_data={'SomeProperty': "12345"})
def lambda_handler(event, context): try: print(json.dumps(event)) if event['RequestType'] == 'Delete': s3 = boto3.client('s3') # Delete KeyBucket contents if "KeyBucket" in event["ResourceProperties"].keys(): print 'Getting KeyBucket objects...' s3objects = s3.list_objects_v2(Bucket=event["ResourceProperties"]["KeyBucket"]) if 'Contents' in s3objects.keys(): print 'Deleting KeyBucket objects %s...' % str( [{'Key': key['Key']} for key in s3objects['Contents']]) s3.delete_objects(Bucket=event["ResourceProperties"]["KeyBucket"], Delete={'Objects': [{'Key': key['Key']} for key in s3objects['Contents']]}) # Delete Output bucket contents and versions if "OutputBucket" in event["ResourceProperties"].keys(): print 'Getting OutputBucket objects...' objects = [] versions = s3.list_object_versions(Bucket=event["ResourceProperties"]["OutputBucket"]) while versions: if 'Versions' in versions.keys(): for v in versions['Versions']: objects.append({'Key': v['Key'], 'VersionId': v['VersionId']}) if 'DeleteMarkers' in versions.keys(): for v in versions['DeleteMarkers']: objects.append({'Key': v['Key'], 'VersionId': v['VersionId']}) if versions['IsTruncated']: versions = s3.list_object_versions(Bucket=event["ResourceProperties"]["OutputBucket"], VersionIdMarker=versions['NextVersionIdMarker']) else: versions = False if objects: s3.delete_objects(Bucket=event["ResourceProperties"]["OutputBucket"], Delete={'Objects': objects}) except Exception as e: print(e) traceback.print_exc() cfnresponse.send(event, context, cfnresponse.SUCCESS, {}, '')
def handler(event, context): print('Received event: %s' % json.dumps(event)) status = cfnresponse.SUCCESS physical_resource_id = 'PVCleanup' data = {} reason = None try: if event['RequestType'] == 'Delete': print('Removing any orphaned EBS volumes...') tag_name = 'tag:kubernetes.io/cluster/%s' % event['ResourceProperties']['ClusterId'] response = boto_throttle_backoff( ec2_client.describe_volumes, Filters=[{'Name': tag_name, 'Values': ['owned']}] )['Volumes'] for volume in response: print('deleting volume %s' % volume['VolumeId']) boto_throttle_backoff(ec2_client.delete_volume, VolumeId=volume['VolumeId']) except Exception as e: logging.error('Exception: %s' % e, exc_info=True) reason = str(e) status = cfnresponse.FAILED finally: if event['RequestType'] == 'Delete': try: wait_message = 'waiting for events for request_id %s to propagate to cloudwatch...' % context.aws_request_id while not logs_client.filter_log_events( logGroupName=context.log_group_name, logStreamNames=[context.log_stream_name], filterPattern='"%s"' % wait_message )['events']: print(wait_message) time.sleep(5) except Exception as e: logging.error('Exception: %s' % e, exc_info=True) time.sleep(120) cfnresponse.send(event, context, status, data, physical_resource_id, reason)
def handler(event, context): logger.info('event: {}'.format(cfnresponse.json_dump_format(event))) request_type = event.get('RequestType') resource_properties = event.get('ResourceProperties') response = {} response_status = cfnresponse.SUCCESS request_id = '' reason = '' if (request_type in ['Create', 'Update']): try: response = start_build(resource_properties) logger.info( 'start_build response: {}'.format( cfnresponse.json_dump_format(response) ) ) response_status = cfnresponse.SUCCESS request_id = response['ResponseMetadata']['RequestId'] reason = 'Create' except Exception as e: error = 'failed to start build: {}'.format(e) logger.error(error) response_status = cfnresponse.FAILED reason = error pass cfnresponse.send( event, context, response_status, response, request_id, reason )
def handler(event, context): if event['RequestType'] == 'Delete': cfnresponse.send(event, context, cfnresponse.SUCCESS, {}) function_name = event['ResourceProperties']['FunctionName'] try: code_sha256 = client.get_function(FunctionName=function_name)['Configuration']['CodeSha256'] except Exception as e: print(e) cfnresponse.send(event, context, cfnresponse.FAILED, {}) else: try: resp = client.publish_version( FunctionName=function_name, CodeSha256=code_sha256) except Exception as e: print(e) cfnresponse.send(event, context, cfnresponse.FAILED, {}) else: response_data = {'Response': json.dumps(resp)} cfnresponse.send(event, context, cfnresponse.SUCCESS, response_data)
def handler(event, context): logger = logging.getLogger("crypto_cfn") logger.setLevel(logging.DEBUG) rp = event["ResourceProperties"] name = rp["Name"] value = None try: if event["RequestType"] in ["Create", "Update"]: if event["RequestType"] == "Create" and parameter_exist(name): raise NameError( "A Parameter named {} already exists".format(name)) generate_password = get_property(rp, "GeneratePassword") value = get_property(rp, "Value") if value and generate_password in ['true', 'True', '1', True, 1]: raise ValueError( "Property Value and GeneratePassword cannot be used at the same time" ) if generate_password in ['true', 'True', '1', True, 1]: password_length = get_property(rp, "GeneratePasswordLength") allow_specials = get_property( rp, "GeneratePasswordAllowSpecialCharacters") if not password_length: raise ValueError( "The Resource property GeneratePasswordLength is required" ) try: password_length = int(password_length) except: raise ValueError( "The Resource property GeneratePasswordLength must be an integer" ) charset = ascii_uppercase + ascii_lowercase + digits if allow_specials and allow_specials in [ 'true', 'True', '1', True, 1 ]: charset = charset + "!\"#$%&'()*+,-./:;<=>?@[\]^_`{|}~" value = ''.join( choice(charset) for i in range(password_length)) is_smtp_user = get_property(rp, "GenerateSMTPPassword") iam_secret = get_property(rp, "IAMSecretKey") if is_smtp_user: if not iam_secret: raise ValueError( "To create an SMTP password the IAM secret access key is required" ) message = b"SendRawEmail" result = bytearray(b'\x02') result.extend( newkey(iam_secret.encode('utf-8'), message, digestmod=sha256).digest()) value = b64encode(result).decode('ascii') if not value: raise ValueError("Either generate a password or set a value") response = boto3.client('ssm', config=config).put_parameter( Name=name, Description=rp["Description"], Value=value, Type="SecureString", KeyId=rp["KeyId"], Overwrite=True) logger.info("Successfully stored parameter {}".format(name)) cfnresponse.send(event, context, cfnresponse.SUCCESS, response, name) elif event["RequestType"] in ["Delete"]: try: boto3.client('ssm', config=config).delete_parameter( Name=event["PhysicalResourceId"], ) except ClientError as e: if e.response['Error']['Code'] not in ['ParameterNotFound']: raise pass logger.info("Successfully deleted parameter: {}".format(name)) cfnresponse.send(event, context, cfnresponse.SUCCESS, None, name) except Exception as ex: logger.error("Failed to %s parameter: %s", event["RequestType"], name) logger.debug("Stack trace %s", traceback.format_exc()) if event["RequestType"] in ["Create", "Update", "Delete"]: cfnresponse.send(event, context, cfnresponse.FAILED, None, "0") else: cfnresponse.send(event, context, cfnresponse.SUCCESS, None, "0")
def handler(event, context): """ Bucket notifications configuration has a disgraceful API - sorry. The problem is that you can't individually deal with each configuration and you need to send a (potentially) enormous dictionary with all the configuration. This has lot's of implications when you are trying to implement a tool like gordon: - People might have already some notifications configured. - People might change them - which causes race conditions. The approach we have decided to follow is the following: 1) If there is no configuration attached to this bucket, we continue. 2) If there is any configuration attached, we check that the ID of each notification starts with "gordon-". If that's the case, we are "safe"... in the sense that whatever is the previous status we have the new "correct" configuration. If there is any notification with an id which doesn't starts with "gordon-" we fail miserably... because it is quite risky to start mixing events between two sources. We need to make this behaviour pretty clear in the documentation. Why the physical_resource_id is constant accross notification configurations of the same bucket is a workaround the same issue. We need to keep it constant so CloudFormation don't issue a delete on the old resource once the old one gets updated and get a new physical_resource_id because (for example) the lambda ID has changed. As result, CF will only trigger a delete when the bucket changes - which is expected. """ properties = event['ResourceProperties'] # It doesn't matter how big you put this on the doc... people wil # always put bucket's arn instead of name... and it would be a shame # to fail because this stupid error. buckent_name = properties['Bucket'].replace('arn:aws:s3:::', '') physical_resource_id = '{}-bucket-notification-configuration'.format(buckent_name) client = boto3.client('s3') existing_notifications = client.get_bucket_notification_configuration( Bucket=buckent_name ) # Check if there is any notification-id which doesn't start with gordon- # If so... fail. for _type in AVAILABLE_CONFIGURATIONS: for notification in existing_notifications.get(_type, []): if not notification.get('Id', '').startswith('gordon-'): send( event, context, FAILED, physical_resource_id=physical_resource_id, reason=("Bucket {} contains a notification called {} " "which was not created by gordon, hence the risk " "of trying it to add/modify/delete new notifications. " "Please check the documentation in order to understand " "why gordon refuses to proceed.").format( buckent_name, notification.get('Id', '') ) ) return # For Delete requests, we need to simply send an empty dictionary. # Again - this have bad implications if the user has tried to configure # notification manually, because we are going to override their # configuration. There is no much else we can do. configuration = {} if event['RequestType'] != 'Delete': arn_name_map = { 'LambdaFunctionConfigurations': 'LambdaFunctionArn', 'TopicConfigurations': 'TopicArn', 'QueueConfigurations': 'QueueArn', } for _type in AVAILABLE_CONFIGURATIONS: configuration[_type] = [] for notification in properties.get(_type, []): data = { 'Id': notification['Id'], arn_name_map.get(_type): notification['DestinationArn'], 'Events': notification['Events'], } if notification['KeyFilters']: data['Filter'] = { 'Key': { 'FilterRules': notification['KeyFilters'] } } configuration[_type].append(data) client.put_bucket_notification_configuration( Bucket=buckent_name, NotificationConfiguration=configuration ) send(event, context, SUCCESS, physical_resource_id=physical_resource_id)
def timeout(event, context): logging.error( 'Execution is about to time out, sending failure response to CloudFormation' ) cfnresponse.send(event, context, cfnresponse.FAILED, {}, None)
def handler_update(event, context): cfnresponse.send(event, context, cfnresponse.FAILED, {}, event["PhysicalResourceId"])
def handler(event, context): #Let AWS Cloudformation know its request succeeded if 'RequestType' in event: responseData = {} cfnresponse.send(event, context, cfnresponse.SUCCESS, responseData, "CustomResourcePhysicalID") dataexchange = boto3.client(service_name='dataexchange', region_name=region) s3 = boto3.client(service_name='s3', region_name=region) #If the request is from Cloudformation custom resource get the RevisionID, for first revision if 'ResourceProperties' in event: data_set_id = event['ResourceProperties']['data_set_id'] revision_ids = [event['ResourceProperties']['RevisionIds']] print("Initial revision retrieval") print(event) else: data_set_id = event['resources'][0] revision_ids = event['detail']['RevisionIds'] print("Triggered revision retrieval") print(event) # Used to store the Ids of the Jobs exporting the assets to S3. job_ids = set() for revision_id in revision_ids: # Start Jobs to export all the assets to S3. # We export in batches of 100 as the StartJob API has a limit of 100. revision_assets = dataexchange.list_revision_assets( DataSetId=data_set_id, RevisionId=revision_id) assets_chunks = grouper(revision_assets['Assets'], 100) for assets_chunk in assets_chunks: # Create the Job which exports assets to S3. export_job = dataexchange.create_job( Type='EXPORT_ASSETS_TO_S3', Details={ 'ExportAssetsToS3': { 'DataSetId': data_set_id, 'RevisionId': revision_id, 'AssetDestinations': [{ 'AssetId': asset['Id'], 'Bucket': destination_bucket } for asset in assets_chunk] } }) # Start the Job and save the JobId. dataexchange.start_job(JobId=export_job['Id']) job_ids.add(export_job['Id']) # Iterate until all remaining workflow have reached a terminal state, or an error is found. completed_jobs = set() while job_ids != completed_jobs: for job_id in job_ids: if job_id in completed_jobs: continue get_job_response = dataexchange.get_job(JobId=job_id) if get_job_response['State'] == 'COMPLETED': print("Job {} completed".format(job_id)) completed_jobs.add(job_id) if get_job_response['State'] == 'ERROR': job_errors = get_job_response['Errors'] raise Exception('JobId: {} failed with errors:\n{}'.format( job_id, job_errors)) # Sleep to ensure we don't get throttled by the GetJob API. time.sleep(0.2) return {'statusCode': 200, 'body': json.dumps('All jobs completed.')}
def lowest_open_priority(event, context): try: cfn_request_type = event[u'RequestType'] print("Handling CFN request of type {}".format(cfn_request_type)) cfn = True except KeyError: print("Handling non-CFN request") cfn = False # Check for delete of lambda backed custom resource by CFN, because that is a valid reason for the absence of a # Listener-Arn in the body if cfn and cfn_request_type == u'Delete': print("Responding OK to CFN delete request. And exiting...") cfnresponse.send(event, context, cfnresponse.SUCCESS) return try: if cfn: listener_arn = event[u'ResourceProperties'][u'Listener-Arn'] else: request_body = json.loads(event[u'body'].encode('utf-8')) listener_arn = request_body['ResourceProperties']['Listener-Arn'] print("Listener-Arn retrieved: '{}'".format(listener_arn)) except KeyError: print("No 'Listener-Arn' was supplied. Returning 400") return { "statusCode": 400, "body": json.dumps("Request parameter 'Listener-Arn' needs to be supplied") } elb = boto3.client('elbv2') def retrieve_rule_pages(): resp = elb.describe_rules( ListenerArn=listener_arn, PageSize=10, ) yield resp try: while 'NextMarker' in resp: resp = elb.describe_rules( ListenerArn=listener_arn, PageSize=10, Marker=resp['NextMarker'] ) yield resp except KeyError: # last page does not contain a 'NextMarker', even though # http://boto3.readthedocs.io/en/latest/reference/services/elbv2.html specifies an empty string pass def retrieve_rule_priorities(): for page in retrieve_rule_pages(): print("Page request: {}".format(page)) for rule in page['Rules']: print("Found rule with priority {}".format(rule['Priority'])) if rule['Priority'] != 'default': yield int(rule['Priority']) lowest_priority = 1 while lowest_priority in retrieve_rule_priorities(): lowest_priority = lowest_priority + 1 response_data = { 'lowestOpenPriority': lowest_priority, } print("Responding with data {}".format(response_data)) if cfn: cfnresponse.send(event, context, cfnresponse.SUCCESS, response_data=response_data) else: return { "statusCode": 200, "body": json.dumps(response_data), }
def main(event, context): log.getLogger().setLevel(log.INFO) # This needs to change if there are to be multiple resources in the same stack physical_id = 'AdConnector' responseData = {} try: log.info('Input event: %s', event) if event['RequestType'] == 'Delete': responseData['Complete'] = "True" adConnectorID = event['PhysicalResourceId'] client.delete_directory(DirectoryId=adConnectorID) cfnresponse.send(event, context, cfnresponse.SUCCESS, responseData, adConnectorID) # Check if this is a Create and we're failing Creates if event['RequestType'] == 'Create': IdentityAccountAdConnectorSecretArn = event['ResourceProperties'][ 'IdentityAccountAdConnectorSecretArn'] try: get_secret_value_response = smClient.get_secret_value( SecretId=IdentityAccountAdConnectorSecretArn) except ClientError as e: raise e else: if 'SecretString' in get_secret_value_response: secret = get_secret_value_response['SecretString'] log.info('PAULS') log.info(secret) secretDict = json.loads(secret) name = secretDict['DomainApex'] short_name = secretDict['DomainControllerShortName'] password = secretDict['password'] customerDnsIps0 = secretDict['DomainControllerDnsAddress0'] customerDnsIps1 = secretDict['DomainControllerDnsAddress1'] customerUserName = secretDict[ 'AdConnectorServiceAccountUsername'] description = event['ResourceProperties']['Description'] size = event['ResourceProperties']['Size'] vpcId = event['ResourceProperties']['VpcId'] subnetIds = event['ResourceProperties']['SubnetIds'] response = client.connect_directory( Name=name, ShortName=short_name, Password=password, Description=description, Size=size, ConnectSettings={ 'VpcId': vpcId, 'SubnetIds': subnetIds, 'CustomerDnsIps': [customerDnsIps0, customerDnsIps1], 'CustomerUserName': customerUserName }) responseData['reponse'] = response connectorId = responseData['reponse']['DirectoryId'] log.info(responseData['reponse']['DirectoryId']) cfnresponse.send(event, context, cfnresponse.SUCCESS, responseData, connectorId) except Exception as e: log.exception(e) # cfnresponse's error message is always "see CloudWatch" cfnresponse.send(event, context, cfnresponse.FAILED, {}, physical_id)
def handler(event, context): responseData = {} S3Bucket = event['ResourceProperties']['Code']['S3Bucket'] S3Key = event['ResourceProperties']['Code']['S3Key'] FunctionName = event['ResourceProperties']['FunctionName'] Runtime = event['ResourceProperties']['Runtime'] Timeout = int(event['ResourceProperties']['Timeout']) MemorySize = int(event['ResourceProperties']['MemorySize']) Handler = event['ResourceProperties']['Handler'] Role = event['ResourceProperties']['Role'] AWSRegion = event['ResourceProperties']['AWSRegion'] lambdaClient = boto3.client('lambda', region_name=AWSRegion) try: if event['RequestType'] == 'Create': response = lambdaClient.create_function( Publish=True, FunctionName=FunctionName, Code={ 'S3Bucket': S3Bucket, 'S3Key': S3Key }, Timeout=Timeout, MemorySize=MemorySize, Runtime=Runtime, Role=Role, Handler=Handler, ) responseData['Arn'] = response['FunctionArn'] responseData['Version'] = response['Version'] print("SUCCESS, ResponseData=" + str(responseData)) cfnresponse.send(event, context, cfnresponse.SUCCESS, responseData, response['FunctionArn']) elif event['RequestType'] == 'Update': response = lambdaClient.update_function_configuration( FunctionName=event['PhysicalResourceId'], Timeout=Timeout, MemorySize=MemorySize, Runtime=Runtime, Role=Role, Handler=Handler) response = lambdaClient.update_function_code( Publish=True, FunctionName=event['PhysicalResourceId'], S3Bucket=S3Bucket, S3Key=S3Key) responseData['Arn'] = event['PhysicalResourceId'] responseData['Version'] = response['Version'] print("Update SUCCESS, ResponseData=" + str(responseData)) cfnresponse.send(event, context, cfnresponse.SUCCESS, responseData, event['PhysicalResourceId']) elif event['RequestType'] == 'Delete': lambdaClient.delete_function( FunctionName=event['PhysicalResourceId']) print("Delete SUCCESS") cfnresponse.send( event, context, cfnresponse.SUCCESS, responseData, event['PhysicalResourceId'] if 'PhysicalResourceId' in event.keys() else '') except Exception as e: responseData['Error'] = str(e) cfnresponse.send(event, context, cfnresponse.FAILED, responseData, event['PhysicalResourceId']) print("FAILED ERROR: " + responseData['Error'])
def lambda_handler(event, context): print(event) responseData = {} responseData['data'] = 'Success' roleName = event['ResourceProperties']['TransitAssumeRoleName'] accountNumbers = event['ResourceProperties']['SubscriberAccounts'] transitConfig = event['ResourceProperties']['TransitConfig'] bucketName = event['ResourceProperties']['TransitVpnBucketName'] bgpPool = event['ResourceProperties']['TransitBgpTunnelIpPool'] vgwAsn = event['ResourceProperties']['TransitVgwAsn'] paGropuInfo = event['ResourceProperties']['TransitPaGroupInfo'] ipNetwork1 = event['ResourceProperties']['TransitVpcDmzAz1SubnetGateway'] ipNetwork2 = event['ResourceProperties']['TransitVpcDmzAz2SubnetGateway'] checkStackStatusLambda = event['ResourceProperties'][ 'CheckStackStatusLambda'] configureTransitVpnLambda = event['ResourceProperties'][ 'ConfigureTransitVpnLambda'] rebalancePaGroupsLambda = event['ResourceProperties'][ 'RebalancePaGroupsLambda'] deleteTransitVpnConfigurationLambda = event['ResourceProperties'][ 'DeleteTransitVpnConfigurationLambda'] mgmtAz1 = event['ResourceProperties']['MgmtAz1SubnetId'] mgmtAz2 = event['ResourceProperties']['MgmtAz2SubnetId'] ip1 = ipaddress.ip_network(ipNetwork1) ip2 = ipaddress.ip_network(ipNetwork2) ip1List = list(ip1) ip2List = list(ip2) event['ResourceProperties']['TransitVpcDmzAz1SubnetGateway'] = str( ip1List[1]) event['ResourceProperties']['TransitVpcDmzAz2SubnetGateway'] = str( ip2List[1]) responseData['TransitVpcDmzAz2SubnetGateway'] = str(ip2List[1]) responseData['TransitVpcDmzAz1SubnetGateway'] = str(ip1List[1]) if event['RequestType'] == 'Create': #Update Assume Role if accountNumbers: updateAssumeRole(roleName, accountNumbers) #Update DynamoDB TranstiConfig Table updateTransitConfig(transitConfig, event['ResourceProperties']) #Update DynamoDB BgPTunnleIpPool Table updateBgpTunnelIpPoolTable(bgpPool) #Update DynamoDB VgwAsn Table updateVgwAsn(vgwAsn) #Update DynamoDB PaGroupInfo Table updatePaGroupInfo(paGropuInfo) #Return gateway ips for subnets cfnresponse.send(event, context, cfnresponse.SUCCESS, responseData, "CustomResourcePhysicalID") elif event['RequestType'] == 'Update': if accountNumbers: updateAssumeRole(roleName, accountNumbers) cfnresponse.send(event, context, cfnresponse.SUCCESS, responseData, "CustomResourcePhysicalID") else: cfnresponse.send(event, context, cfnresponse.SUCCESS, responseData, "CustomResourcePhysicalID") #Update DynamoDB TranstiConfig Table updateTransitConfig(transitConfig, event['ResourceProperties']) elif event['RequestType'] == 'Delete': s3 = boto3.resource('s3') bucket = s3.Bucket(bucketName) bucket.objects.all().delete() bucket.delete() print("Successully Deleted S3 Objects and the Bucket: {}".format( bucketName)) try: import time lambda_conn = boto3.client('lambda') lambda_conn.delete_function(FunctionName=checkStackStatusLambda) lambda_conn.delete_function(FunctionName=configureTransitVpnLambda) lambda_conn.delete_function(FunctionName=rebalancePaGroupsLambda) lambda_conn.delete_function( FunctionName=deleteTransitVpnConfigurationLambda) print("Deleted Lambda launched in VPCs") ec2_conn = boto3.client('ec2') filters = [{'Name': 'subnet-id', 'Values': [mgmtAz1, mgmtAz2]}] interfaces = ec2_conn.describe_network_interfaces( Filters=filters)['NetworkInterfaces'] if interfaces: for interface in interfaces: print("Detaching Network Interface: {}".format( interface['NetworkInterfaceId'])) ec2_conn.detach_network_interface( AttachmentId=interface['Attachment']['AttachmentId']) print("Detached Network Interface: {}".format( interface['NetworkInterfaceId'])) print("Sleeping for 5 seconds") time.sleep(5) if interfaces: for interface in interfaces: print("Deleting Network Interface: {}".format( interface['NetworkInterfaceId'])) ec2_conn.delete_network_interface( NetworkInterfaceId=interface['NetworkInterfaceId']) print("Deleted Network Interface: {}".format( interface['NetworkInterfaceId'])) print("Deleting Mmgt subnets: {},{}".format(mgmtAz1, mgmtAz2)) #ec2_conn.delete_security_group(GroupId=trustedSg) ec2_conn.delete_subnet(SubnetId=mgmtAz1) ec2_conn.delete_subnet(SubnetId=mgmtAz2) #ec2_conn.delete_vpc(VpcId=vpcId) print("Deleted Mmgt subnets: {},{}".format(mgmtAz1, mgmtAz2)) cfnresponse.send(event, context, cfnresponse.SUCCESS, responseData, "CustomResourcePhysicalID") except Exception as e: print( "Erro While deleting the Network Interfaces|TrustedSg|MgmtSubnets|Vpc. Error: {}" .format(str(e))) cfnresponse.send(event, context, cfnresponse.SUCCESS, responseData, "CustomResourcePhysicalID") cfnresponse.send(event, context, cfnresponse.SUCCESS, responseData, "CustomResourcePhysicalID")
def lambda_handler(event, context): try: physicalResourceId = str(uuid.uuid4()) if 'PhysicalResourceId' in event: physicalResourceId = event['PhysicalResourceId'] # only deleting the vault_pass from parameter store if event['RequestType'] == 'Delete': if not delete_password_from_param_store(): return cfnresponse.send(event, context, cfnresponse.FAILED, "Failed to delete 'Vault_Pass' from parameter store, see detailed error in logs", {}, physicalResourceId) delete_sessions_table() return cfnresponse.send(event, context, cfnresponse.SUCCESS, None, {}, physicalResourceId) if event['RequestType'] == 'Create': requestCPMName = event['ResourceProperties']['CPM'] requestUsername = event['ResourceProperties']['Username'] requestUnixSafeName = event['ResourceProperties']['SafeName'] requestPvwaIp = event['ResourceProperties']['PVWAIP'] requestPassword = event['ResourceProperties']['Password'] requestKeyPairSafe = event['ResourceProperties']['KeyPairSafe'] requestKeyPairName = event['ResourceProperties']['KeyPairName'] requestAWSRegionName = event['ResourceProperties']['AWSRegionName'] requestAWSAccountId = event['ResourceProperties']['AWSAccountId'] isPasswordSaved = save_password_to_param_store(requestPassword) if not isPasswordSaved: # if password failed to be saved return cfnresponse.send(event, context, cfnresponse.FAILED, "Failed to create Vault user's password in Parameter Store", {}, physicalResourceId) pvwaSessionId = logon_pvwa(requestUsername, requestPassword, requestPvwaIp) if not pvwaSessionId: return cfnresponse.send(event, context, cfnresponse.FAILED, "Failed to connect to PVWA, see detailed error in logs", {}, physicalResourceId) isSafeCreated = create_safe(requestUnixSafeName, requestCPMName, requestPvwaIp, pvwaSessionId, 1) if not isSafeCreated: return cfnresponse.send(event, context, cfnresponse.FAILED, "Failed to create the Safe '{0}', see detailed error in logs".format(requestUnixSafeName), {}, physicalResourceId) if not create_session_table(): return cfnresponse.send(event, context, cfnresponse.FAILED, "Failed to create 'Sessions' table in DynamoDB, see detailed error in logs", {}, physicalResourceId) # Creating KeyPair Safe isSafeCreated = create_safe(requestKeyPairSafe, "", requestPvwaIp, pvwaSessionId) if not isSafeCreated: return cfnresponse.send(event, context, cfnresponse.FAILED, "Failed to create the Key Pairs safe: {0}, see detailed error in logs".format(requestUnixSafeName), {}, physicalResourceId) # key pair is optional parameter if not requestKeyPairName: print("Key Pair name parameter is empty, the solution will not create a new Key Pair") return cfnresponse.send(event, context, cfnresponse.SUCCESS, None, {}, physicalResourceId) else: awsKeypair = create_new_key_pair_on_AWS(requestKeyPairName) if awsKeypair is False: # Account already exist, no need to create it, can't insert it to the vault return cfnresponse.send(event, context, cfnresponse.FAILED, "Failed to create Key Pair '{0}' in AWS".format(requestKeyPairName), {}, physicalResourceId) if awsKeypair is True: return cfnresponse.send(event, context, cfnresponse.FAILED, "Key Pair '{0}' already exists in AWS".format(requestKeyPairName), {}, physicalResourceId) # Create the key pair account on KeyPairs vault isAwsAccountCreated = create_key_pair_in_vault(pvwaSessionId, requestKeyPairName, awsKeypair, requestPvwaIp, requestKeyPairSafe, requestAWSAccountId, requestAWSRegionName) if not isAwsAccountCreated: return cfnresponse.send(event, context, cfnresponse.FAILED, "Failed to create Key Pair {0} in safe {1}. see detailed error in logs".format(requestKeyPairName, requestKeyPairSafe), {}, physicalResourceId) return cfnresponse.send(event, context, cfnresponse.SUCCESS, None, {}, physicalResourceId) except Exception as e: print("Exception occurred:{0}:".format(e)) return cfnresponse.send(event, context, cfnresponse.FAILED, "Exception occurred: {0}".format(e), {}) finally: if 'pvwaSessionId' in locals(): # pvwaSessionId has been declared if pvwaSessionId: # Logging off the session in case of successful logon logoff_pvwa(requestPvwaIp, pvwaSessionId)
def lambda_handler(event, context): # Try to get the properties required by the handler. Fail if initialization fails try: # If not a valid cloudformation custom resource call if not 'RequestType' in event or not event['ResourceProperties']: return for required_property in [ "SecretName", "SecretType", "SecretVersion", "TableCredstash", "KeyAlias" ]: if not event['ResourceProperties'][required_property]: print "{0} is required".format(required_property) cfnresponse.send(event, context, cfnresponse.FAILED, "{0} is required".format(required_property), '') return secret_name = event['ResourceProperties']['SecretName'] secret_type = event['ResourceProperties']['SecretType'] secret_version = event['ResourceProperties']['SecretVersion'] table = event['ResourceProperties']['TableCredstash'] key_alias = event['ResourceProperties']['KeyAlias'] region = context.invoked_function_arn.split(':')[3] print "stack {0} requested for secret {1}, version {2} of type {3}".format( event['RequestType'], secret_name, secret_version, secret_type) print "using table {0} in region {1} with key alias {2}".format( table, region, key_alias) response_status = cfnresponse.SUCCESS response_data = {} credential = Credential() if event['RequestType'] == 'Create': try: secret = credential.create(version=secret_version, region=region, keytype=secret_type, secret_name=secret_name, table=table, kms_alias=key_alias, digest="SHA256") cfnresponse.send(event, context, cfnresponse.SUCCESS, response_data=secret) except Exception as e: print e cfnresponse.send(event, context, cfnresponse.FAILED, 'Could not create.', '') return elif event['RequestType'] == 'Update': try: secret = credential.create(version=secret_version, region=region, keytype=secret_type, secret_name=secret_name, table=table, kms_alias=key_alias, digest="SHA256", response_data=secret) cfnresponse.send(event, context, cfnresponse.SUCCESS) except Exception as e: print e cfnresponse.send(event, context, cfnresponse.FAILED, 'Could not update.', '') return elif event['RequestType'] == 'Delete': cfnresponse.send(event, context, cfnresponse.SUCCESS) return else: cfnresponse.send(event, context, cfnresponse.FAILED, 'Inconsistent state', '') return except Exception as e: print e cfnresponse.send(event, context, cfnresponse.FAILED, 'Error', '')
def lambda_handler(event, context): print(event) print(event['ResourceProperties']) logger.debug('Event: {}'.format(event)) logger.debug('Context: {}'.format(context)) responseData = {} cfnClient = boto3.client('cloudformation') # CloudFormation custom resouces will send a RequestType of Delete, Update, or Create to Lambda. We need to figure out what it is and do something based on the specific request. # Immediately respond on Delete if event['RequestType'] == 'Delete': try: cfnresponse.send(event, context, cfnresponse.SUCCESS, responseData, 'CustomResourcePhysicalID') except Exception as e: logger.error(e, exc_info=True) responseData = {'Error': str(e)} cfnresponse.send(event, context, cfnresponse.FAILED, responseData, 'CustomResourcePhysicalID') if event['RequestType'] == 'Update': try: cfnresponse.send(event, context, cfnresponse.SUCCESS, responseData, 'CustomResourcePhysicalID') except Exception as e: logger.error(e, exc_info=True) responseData = {'Error': str(e)} cfnresponse.send(event, context, cfnresponse.FAILED, responseData, 'CustomResourcePhysicalID') if event['RequestType'] == 'Create': try: response = cfnClient.describe_stacks(StackName=event['StackId']) tags = response['Stacks'][0]['Tags'] for tag in tags: if 'provisioningPrincipalArn' in tag['Key']: provisioningPrincipalArn = tag['Value'] provisioningPrincipal = provisioningPrincipalArn.split( "/")[2].split("@")[0] responseData = { 'Success': 'Got provisioningPrincipal.', 'Username': provisioningPrincipal } cfnresponse.send(event, context, cfnresponse.SUCCESS, responseData, 'CustomResourcePhysicalID') except Exception as e: logger.error(e, exc_info=True) responseData = {'Error': str(e)} cfnresponse.send(event, context, cfnresponse.FAILED, responseData, 'CustomResourcePhysicalID')
def handler(event, context): """ """ logger = logging.getLogger("datadog") logger.setLevel(logging.DEBUG) ch = logging.StreamHandler() logger.addHandler(ch) def create(type, properties): response = api.Monitor.create(type=type, **properties) cfnresponse.send(event, context, cfnresponse.SUCCESS, physical_resource_id=str(response['id'])) logger.info("Created a %s monitor", type) logger.debug("Response object: %s", response) def delete(id): if id == 'FAILURE': cfnresponse.send(event, context, cfnresponse.SUCCESS, physical_resource_id='FAILURE') return response = api.Monitor.delete(id) cfnresponse.send(event, context, cfnresponse.SUCCESS, physical_resource_id=str( response['deleted_monitor_id'])) logger.info("Deleted monitor: %s", id) logger.debug("Response object: %s", response) def update(id, properties): logger.debug("Old properties: %s", event['OldResourceProperties']) logger.debug("New properties: %s", properties) response = api.Monitor.update(id, **properties) cfnresponse.send(event, context, cfnresponse.SUCCESS, physical_resource_id=str(response['id'])) logger.info("Updated monitor: %s", id) logger.debug("Response object: %s", response) try: initialize(app_key=application_key, api_key=api_key) logger.debug("event: %s", event) event['ResourceProperties']["query"] = event['ResourceProperties'][ "query"].lower() if event['RequestType'] == 'Delete': delete(event['PhysicalResourceId']) elif event['RequestType'] == 'Create': types = { MetricAlert.resource_type: "metric alert", ServiceCheck.resource_type: "service check", EventAlert.resource_type: "event alert", Composite.resource_type: "composite", } create(types[event['ResourceType']], event['ResourceProperties']) elif event['RequestType'] == 'Update': update(event['PhysicalResourceId'], event["ResourceProperties"]) else: raise TypeError("Invalid CF event RequestType") except Exception as ex: logger.error("Exception %s", ex) logger.debug("Traceback: %s", traceback.format_exc()) if event['RequestType'] == 'Delete': cfnresponse.send(event, context, cfnresponse.SUCCESS) return cfnresponse.send(event, context, cfnresponse.FAILED, physical_resource_id="FAILURE") raise ex
def handler(event, context): logger = logging.getLogger("crypto_cfn") logger.setLevel(logging.DEBUG) ch = logging.StreamHandler() logger.addHandler(ch) name = event["ResourceProperties"]["Name"] value = None try: if event["RequestType"] in ["Create", "Update"]: if event["RequestType"] == "Create" and parameter_exist(name): raise NameError( "A Parameter named {} already exists".format(name)) generate_password = event["ResourceProperties"][ "GeneratePassword"] if "GeneratePassword" in event[ "ResourceProperties"] else None value = event["ResourceProperties"]["Value"] if "Value" in event[ "ResourceProperties"] else None if value and generate_password in ['true', 'True', '1', True, 1]: raise ValueError( "Property Value and GeneratePassword cannot be used at the same time" ) if generate_password in ['true', 'True', '1', True, 1]: password_length = event["ResourceProperties"][ "GeneratePasswordLength"] if "GeneratePasswordLength" in event[ "ResourceProperties"] else None allow_specials = event["ResourceProperties"][ "GeneratePasswordAllowSpecialCharacters"] if "GeneratePasswordAllowSpecialCharacters" in event[ "ResourceProperties"] else None if not password_length: raise ValueError( "The Resource property GeneratePasswordLength is required" ) try: password_length = int(password_length) except: raise ValueError( "The Resource property GeneratePasswordLength must be an integer or castable to an integer" ) charset = ascii_uppercase + ascii_lowercase + digits if allow_specials and allow_specials in [ 'true', 'True', '1', True, 1 ]: charset = charset + "!\"#$%&'()*+,-./:;<=>?@[\]^_`{|}~" value = ''.join( choice(charset) for i in range(password_length)) if not value: raise ValueError("Either generate a password or set a value") response = boto3.client('ssm').put_parameter( Name=name, Description=event["ResourceProperties"]["Description"], Value=value, Type="SecureString", KeyId=event["ResourceProperties"]["KeyId"], Overwrite=True) logger.info("Successfully stored parameter {}".format(name)) cfnresponse.send(event, context, cfnresponse.SUCCESS, response, name) else: boto3.client('ssm').delete_parameter( Name=event["PhysicalResourceId"], ) logger.info("Successfully deleted parameter: {}".format(name)) cfnresponse.send(event, context, cfnresponse.SUCCESS, None, name) except Exception as ex: logger.error("Failed to %s parameter: %s", event["RequestType"], name) logger.debug("Stack trace %s", traceback.format_exc()) if event["RequestType"] in ["Create", "Update"]: cfnresponse.send(event, context, cfnresponse.FAILED, None, "0") else: cfnresponse.send(event, context, cfnresponse.SUCCESS, None, "0")
def handler_delete(event, context): # Nothing to do... this is a informational lambda and no resource is created. cfnresponse.send(event, context, cfnresponse.SUCCESS, {}, event["PhysicalResourceId"])
def send_status(self, PASS_OR_FAIL): send(self.event, self.context, PASS_OR_FAIL, reason=self.reason, response_data=self.response_data)
def lambda_handler(event, context): # TODO implement if 'RequestType' in event.keys(): if event['RequestType'] == "Create": vpcId = os.getenv('VPC_ID') response = ec2client.describe_security_groups(Filters=[{ 'Name': 'vpc-id', 'Values': [ vpcId, ] }, { 'Name': 'group-name', 'Values': [ 'default', ] }], ) securityGroupId = response['SecurityGroups'][0]['GroupId'] print( "Adding ingress rule for prort 80 for securityGroupId={} vpcId={}" .format(securityGroupId, vpcId)) data = ec2client.authorize_security_group_ingress( GroupId=securityGroupId, IpPermissions=[ { 'IpProtocol': 'tcp', 'FromPort': 80, 'ToPort': 80, 'IpRanges': [{ 'CidrIp': '0.0.0.0/0' }] }, ]) OnDemandTargetCapacity = int(os.getenv('ONDEMANDTARGETCAPACITY')) SpotTargetCapacity = int(os.getenv('SPOTTARGETCAPACITY')) TotalTargetCapacity = int(os.getenv('TOTALTARGETCAPACITY')) launchTemplateVersion = '1' SubnetId = subnetIdsString EC2FleetId = createEC2Fleet(launchTemplateVersion, SubnetId, TotalTargetCapacity, OnDemandTargetCapacity, SpotTargetCapacity) updateInstanceStatusInDynamoDB(EC2FleetId) elif event['RequestType'] == "Delete": scan = InstanceTable.scan() with InstanceTable.batch_writer() as batch: for each in scan['Items']: InstanceId = each['InstanceId'] print("Deleting the InstanceId={} from Dynamodb table={}". format(InstanceId, InstancesTableName)) batch.delete_item(Key={'InstanceId': InstanceId}) time.sleep(5) try: print("Terminating the InstanceId={}".format( InstanceId)) response = ec2client.terminate_instances( InstanceIds=[InstanceId]) except Exception as e: print(e['Error']['Message']) else: print("CFN event RequestType={} is NOT handled currently".format( event['RequestType'])) responseData = {} responseData['Data'] = '1' cfnresponse.send(event, context, cfnresponse.SUCCESS, responseData, "CustomResourcePhysicalID") return { 'statusCode': 200, 'body': json.dumps("Completed processing of the event={}".format(event)) } else: InstanceId = event['detail']['instance-id'] print( "Received EC2 Instance State-change Notification InstanceId={}...". format(InstanceId)) handleNodeTermination(InstanceId) return { 'statusCode': 200, 'body': json.dumps( "Complered processing of the termination of the InstanceId={}". format(InstanceId)) }
def lambda_handler(event, context): # Setup alarm for remaining runtime minus a second try: signal.alarm((context.get_remaining_time_in_millis() / 1000) - 1) LOGGER.info('REQUEST RECEIVED: %s', event) LOGGER.info('REQUEST RECEIVED: %s', context) if event['RequestType'] == 'Create' or event['RequestType'] == 'Update': LOGGER.info('Creating or updating S3 Object') bucket_name = event['ResourceProperties']['BucketName'] file_name = event['ResourceProperties']['FileName'] content = event['ResourceProperties']['Content'] create_zip = True if event['ResourceProperties']['CreateMode'] in [ 'zip', 'zip-literal' ] else False literal_file = True if event['ResourceProperties'][ 'CreateMode'] == 'plain-literal' else False literal_zip = True if event['ResourceProperties'][ 'CreateMode'] == 'zip-literal' else False md5_hash = hashlib.md5(content).hexdigest() with open('/tmp/' + file_name, 'w') as lambda_file: lambda_file.write(content) lambda_file.close() s3 = boto3.resource('s3') if create_zip == True: if literal_zip == True: output_filename = ".".join( file_name.split(".")[:-1]) + '.zip' else: output_filename = file_name + '_' + md5_hash + '.zip' zf = zipfile.ZipFile('/tmp/' + output_filename, mode='w') try: zf.write('/tmp/' + file_name, file_name) finally: zf.close() data = open('/tmp/' + output_filename, 'rb') s3.Bucket(bucket_name).put_object(Key=output_filename, Body=data) else: if literal_file == True: data = open('/tmp/' + file_name, 'rb') s3.Bucket(bucket_name).put_object(Key=file_name, Body=content) else: extension = file_name.split(".")[-1] output_filename = ".".join(file_name.split( ".")[:-1]) + '_' + md5_hash + '.' + extension data = open('/tmp/' + file_name, 'rb') s3.Bucket(bucket_name).put_object(Key=output_filename, Body=content) cfnresponse.send(event, context, cfnresponse.SUCCESS, {'Message': output_filename}) elif event['RequestType'] == 'Delete': LOGGER.info('DELETE!') cfnresponse.send(event, context, cfnresponse.SUCCESS, {'Message': 'Resource deletion successful!'}) else: LOGGER.info('FAILED!') cfnresponse.send( event, context, cfnresponse.SUCCESS, {'Message': 'There is no such success like failure.'}) except Exception as e: #pylint: disable=W0702 LOGGER.info(e) cfnresponse.send(event, context, cfnresponse.SUCCESS, {'Message': 'There is no such success like failure.'})
def lambda_handler(event, context): message = '' response = cfnresponse.FAILED instance_status = '' # Get CloudFormation parameters cfn_stack_id = event.get('StackId') cfn_request_type = event.get('RequestType') cfn_physicalResourceId = context.log_stream_name if event.get('ResourceProperties.PhysicalResourceId') is None else event.get('ResourceProperties.PhysicalResourceId') if cfn_stack_id and cfn_request_type != 'Delete': try: # Wait for instance to become available before trying to connect while instance_status.lower() != 'available': # Exit if Lambda will timeout before next sleep ends if context.get_remaining_time_in_millis() < (30 * 1000): message = 'Function will timeout. Exiting with failure.' print('[ERROR] ', message) cfnresponse.send(event, context, response, { 'Message': message }, cfn_physicalResourceId) return { 'statusCode': 200, 'body': json.dumps(message) } # Get instance availability status every 30 seconds time.sleep(30) rdsclient = boto3.client('rds') dbinstance = rdsclient.describe_db_instances(DBInstanceIdentifier=DB_INSTANCE) instance_status = dbinstance['DBInstances'][0]['DBInstanceStatus'] print('[INFO] DBInstance {} status: {}. Time remaining: {} ms.'.format(DB_NAME, instance_status, context.get_remaining_time_in_millis())) sql_queries = [ 'DROP TABLE IF EXISTS rideTransactions;' ,''' CREATE TABLE rideTransactions ( id INT NOT NULL AUTO_INCREMENT ,userId VARCHAR(64) ,stationId INT ,stationName VARCHAR(128) ,duration INT ,price DECIMAL(5,2) ,createdDate TIMESTAMP DEFAULT CURRENT_TIMESTAMP ,PRIMARY KEY (id) ); ''' ,'CREATE INDEX idxRideUserId ON rideTransactions(userId);' ,'CREATE INDEX idxRideStationId ON rideTransactions(stationId);' ] print('[INFO] Connecting...') conn_info = connection_info(DB_CREDS) print("[DEBUG] DB_CREDS: " + json.dumps(DB_CREDS, indent=2)) conn = pymysql.connect(host=conn_info['host'], user=conn_info['username'], password=conn_info['password'], database=conn_info['dbname'], connect_timeout=30) with conn.cursor() as cur: for sql in sql_queries: print('[INFO] Executing SQL: {}'.format(sql)) cur.execute(sql) conn.commit() conn.close() message = '[SUCCESS] Executed setup queries successfully.' response = cfnresponse.SUCCESS except Exception as e: print('[ERROR] ', e) message = '{}'.format(e) else: message = '[INFO] Deleting function.' response = cfnresponse.SUCCESS cfnresponse.send(event, context, response, { 'Message': message }, cfn_physicalResourceId) return { 'statusCode': 200, 'body': json.dumps(message) }
def handler(event, context): # This function handles two RequestTypes: CREATE and DELETE if event['RequestType'] == 'Delete': client = boto3.client('route53') # CREATE handler below puts the new Zone ID into PhysicalResourceId hostedZoneId = event['PhysicalResourceId'] zones = client.list_hosted_zones() # PhysicalResourceId has hostedZoneId as value only, no /hostedzone/ myZone = filter( lambda find_rec: find_rec['Id'] == '/hostedzone/' + hostedZoneId, zones['HostedZones']) # We expect only one match. Otherwise we just bail. if len(myZone) == 1: try: # At this point, we don't handle responses with more than 100 records, # where response['IsTruncated'] == True print 'Listing resource record sets..' response = client.list_resource_record_sets( HostedZoneId=hostedZoneId) if len(response['ResourceRecordSets']) > 2: # Empty zone will always have at least 2 records: SOA and NS # If there are more than 2, we have work to do print 'Iterating over records to delete..' for record in response['ResourceRecordSets']: if record['Type'] != 'SOA' and record['Type'] != 'NS': print 'Deleting record: ', record result = client.change_resource_record_sets( HostedZoneId=hostedZoneId, ChangeBatch={ "Changes": [{ "Action": "DELETE", "ResourceRecordSet": record }] }) print 'Deleting the zone itself..' response = client.delete_hosted_zone(Id=hostedZoneId) print 'Deleted the zone successfully.' responseData = {} responseData['HostedZoneId'] = hostedZoneId cfnresponse.send(event, context, cfnresponse.SUCCESS, responseData, event['PhysicalResourceId']) except Exception as err: print 'Error encountered: ', err, sys.exc_info()[0] responseData = {} cfnresponse.send(event, context, cfnresponse.FAILED, responseData, event['PhysicalResourceId']) else: print 'Zone ID lookup did not return exactly one result, bailing.' responseData = {} cfnresponse.send(event, context, cfnresponse.SUCCESS, responseData, event['PhysicalResourceId']) # elif event['RequestType'] == 'Create': client = boto3.client('route53') kwargs = {} z_name = event['ResourceProperties']['Name'] if z_name[-1] != '.': z_name = z_name + '.' kwargs['Name'] = z_name kwargs['CallerReference'] = ''.join( random.choice(string.ascii_uppercase + string.digits) for _ in range(8)) # If HostedZoneConfig and/or Region/VPC are specified, put them into request hzconf = event['ResourceProperties'].get('HostedZoneConfig', None) region = event['ResourceProperties'].get('Region', None) vpc = event['ResourceProperties'].get('VPC', None) hztags = event['ResourceProperties'].get('HostedZoneTags', None) if vpc: kwargs['VPC'] = vpc if hzconf: hzc1 = {} hzc1['Comment'] = hzconf.get('Comment', None) pz = hzconf.get('PrivateZone', None) if pz: # CloudFormation sends Bool variables "true" / "false" as String hzc1['PrivateZone'] = json.loads(pz) kwargs['HostedZoneConfig'] = hzc1 try: print 'Creating the Hosted Zone..' response = client.create_hosted_zone(**kwargs) # Id has /hostedzone/ as part of it hostedZoneId1 = response['HostedZone']['Id'] hostedZoneId = str.split(str(hostedZoneId1), '/')[2] print 'New Hosted Zone created:', hostedZoneId responseData = {} responseData['HostedZoneId'] = hostedZoneId # There is no DelegationSet returned when we created a Private Hosted Zone. ds = response.get('DelegationSet', None) if ds: # Return Name Servers if we have DelegationSet responseData['NameServers'] = ds['NameServers'] else: # Return "info message" instead of Name Servers if we created a Private Hosted Zone responseData['NameServers'] = [ 'Private zone created', 'No DelegationSet returned', '', '' ] if hztags: # create_hosted_zone doesn't set Tags; # if HostedZoneTags specified, we need to set tags separately print 'HostedZoneTags is supplied; applying tags to the new zone.' kwags = {} kwags['ResourceType'] = 'hostedzone' kwags['ResourceId'] = hostedZoneId kwags['AddTags'] = hztags response = client.change_tags_for_resource(**kwags) cfnresponse.send(event, context, cfnresponse.SUCCESS, responseData, hostedZoneId) except Exception as err: print 'Error encountered: ', err, sys.exc_info()[0] responseData = {} hostedZoneId = 'XXX' responseData['HostedZoneId'] = hostedZoneId cfnresponse.send(event, context, cfnresponse.FAILED, responseData, hostedZoneId) else: print 'RequestType is not Create or Delete; bailing.' cfnresponse.send(event, context, cfnresponse.SUCCESS, responseData, event['PhysicalResourceId']) # # To prep this code for in-line CF (mind the \t in the last sed edit set): # cat HostedZoneWrapper.py | grep -v '#' | sed -e 's/\"/\\"/g' -e 's/^/\"/g' -e 's/$/\",/g' -e 's/ / /g' # (Need to convert spaces to tabs since CF only allows in-line Lambdas smaller than 4096 bytes)
def lambda_handler(event, context): """ .. note:: This function is the entry point for the ```sched_event1``` Lambda function. This function performs the following actions: firewall_asg_update(event, context) firewall_init_config(event, context) network_load_balancer_update(event, context) | invokes ```check_and_send_message_to_queue()``` | desc: Checks the messages on the queue to ensure its up to date | and for any changes as the case maybe. | invokes ```firewall_asg_update()``` | desc: monitor firewall asg and create asg if not exist | invokes ```firewall_init_config()``` | desc: monitor firewall in INIT state and move it to COMMIT if | firewall auto commit is lifecycle_hook_success | invokes ```network_load_balancer_update()``` | desc: update firewall nat rules based on info in firewall table | nlb table :param event: Encodes all the input variables to the lambda function, when the function is invoked. Essentially AWS Lambda uses this parameter to pass in event data to the handler function. :type event: dict :param context: AWS Lambda uses this parameter to provide runtime information to your handler. :type context: LambdaContext :return: None """ global stackname global region global sg_mgmt global sg_untrust global sg_trust global sg_vpc global keyname global iamprofilebs global s3master global subnetmgmt global subnetuntrust global subnettrust global routetableidtrust global vpcid global imageID global ScalingPeriod global ScaleUpThreshold global ScaleDownThreshold global ScalingParameter global instanceType global gcontext global MinInstancesASG global MaximumInstancesASG global LambdaExecutionRole global ASGNotifierRolePolicy global ASGNotifierRole global LambdaS3Bucket global SubnetIDNATGW global SubnetIDLambda global logger global fw_azs global trust_def_gw global apikey global LambdaENISNSTopic gcontext = context logger.info('got event{}'.format(event)) eventresources = event['ResourceProperties'] debug = eventresources['Debug'] if debug == 'Yes': logger.setLevel(logging.INFO) logger.info('got eventresources{}'.format(eventresources)) stackname = eventresources['StackName'] region = eventresources['Region'] vpcid = eventresources['VpcId'] subnetmgmt = eventresources['SubnetIDMgmt'] subnetuntrust = eventresources['SubnetIDUntrust'] subnettrust = eventresources['SubnetIDTrust'] routetableidtrust = eventresources['RouteTableIDTrust'] sg_mgmt = eventresources['MgmtSecurityGroup'] sg_trust = eventresources['TrustSecurityGroup'] sg_untrust = eventresources['UntrustSecurityGroup'] sg_vpc = eventresources['VPCSecurityGroup'] keyname = eventresources['KeyName'] instanceType = eventresources['FWInstanceType'] MinInstancesASG = int(eventresources['MinInstancesASG']) MaximumInstancesASG = int(eventresources['MaximumInstancesASG']) ScaleUpThreshold = float(eventresources['ScaleUpThreshold']) ScaleDownThreshold = float(eventresources['ScaleDownThreshold']) ScalingParameter = eventresources['ScalingParameter'] ScalingPeriod = int(eventresources['ScalingPeriod']) imageID = eventresources['ImageID'] LambdaENISNSTopic = (eventresources['LambdaENISNSTopic']) iamprofilebs = str(eventresources['FirewallBootstrapRole']) LambdaExecutionRole = str(eventresources['LambdaExecutionRole']) ASGNotifierRole = str(eventresources['ASGNotifierRole']) ASGNotifierRolePolicy = str(eventresources['ASGNotifierRolePolicy']) s3master = eventresources['BootstrapS3BucketName'] LambdaS3Bucket = eventresources['LambdaS3Bucket'] SubnetIDNATGW = eventresources['SubnetIDNATGW'] SubnetIDLambda = eventresources['SubnetIDLambda'] apikey = eventresources['apikey'] subnetuntrust = str((subnetuntrust)) subnetuntrust = fix_subnets(subnetuntrust) subnetmgmt = str((subnetmgmt)) subnetmgmt = fix_subnets(subnetmgmt) subnettrust = str((subnettrust)) subnettrust = fix_subnets(subnettrust) SubnetIDNATGW = str((SubnetIDNATGW)) SubnetIDNATGW = fix_subnets(SubnetIDNATGW) SubnetIDLambda = str((SubnetIDLambda)) SubnetIDLambda = fix_subnets(SubnetIDLambda) routetableidtrust = str((routetableidtrust)) routetableidtrust = fix_subnets(routetableidtrust) logger.info('StackName:' + stackname) logger.info('Mgmt Security Group ID : ' + sg_mgmt) logger.info('KeyName is :' + keyname) logger.info('S3 Master Bucket :' + s3master) logger.info('iamprofilebs: ' + iamprofilebs) logger.info('Subnet Mgmt List: ' + subnetmgmt) logger.info('Subnet Untrust List: ' + subnetuntrust) logger.info('Subnet Trust List: ' + subnettrust) logger.info('Trust Route Table IDlist: ' + routetableidtrust) logger.info('Deployed VpcId is :' + vpcid) try: logger.info("Calling get_azs with {} list".format(subnettrust)) fw_azs = get_azs(subnettrust) # print("[{0}]".format(', '.join(map(str, fw_azs)))) trust_def_gw = [] for i in fw_azs: # logger.info("got inside of for loop") trust_subnet_id = choose_subnet(subnettrust, i) subnet = ec2.Subnet(trust_subnet_id) subnet_str, gw = get_subnet_and_gw(subnet.cidr_block) trust_def_gw.append(gw) # logger.info("Trust subnet default gw[{}]: {}".format(i, trust_def_gw[i])) logger.info("trust_def_gw:") print("[{0}]".format(', '.join(map(str, trust_def_gw)))) except Exception as e: logger.exception("Get az and trust default gw error]: {}".format(e)) responseData = {} # TODO fix cfnresponse info try: if event['RequestType'] == 'Delete': try: if remove_asg(): responseData['data'] = 'SUCCESS' cfnresponse.send(event, context, cfnresponse.SUCCESS, responseData, "CustomResourcePhysicalID") else: responseData['data'] = 'FAILED' cfnresponse.send(event, context, cfnresponse.SUCCESS, responseData, "CustomResourcePhysicalID") except Exception as e: logger.error('Got ERROR in remove_asg .. check for left over resources {}'.format(e)) cfnresponse.send(event, context, cfnresponse.SUCCESS, responseData, "CustomResourcePhysicalID") elif event['RequestType'] == 'Create': try: if create_asg(): responseData['data'] = 'SUCCESS' cfnresponse.send(event, context, cfnresponse.SUCCESS, responseData, "CustomResourcePhysicalID") else: responseData['data'] = 'FAILED' cfnresponse.send(event, context, cfnresponse.SUCCESS, responseData, "CustomResourcePhysicalID") except Exception as e: logger.info("Got exception creating ASG: {}".format(e)) elif event['RequestType'] == 'Update': pass except: logger.error('Got ERROR in create_asg Lamnda handler {}'.format(e)) cfnresponse.send(event, context, cfnresponse.SUCCESS, responseData, "CustomResourcePhysicalID")
def cfn_failed(event, context): """ Short function for signaling CFN about fault """ resp_data = {} resp = cfnresponse.FAILED cfnresponse.send(event, context, resp, resp_data)
def handler(event, context): """Main Lambda Function""" # default is failed with no data log.debug("Received event: %s", json.dumps(event)) filters = flat_out_filter(event["ResourceProperties"]["Filter"]) filters_raw = event["ResourceProperties"]["Filter"] log.debug("Where filters are: %s", filters) if event["RequestType"] == "Delete": # Nothing to do here, let's signal CFN that we are done resp = cfnresponse.SUCCESS cfnresponse.send(event, context, resp, {}) return if event["ResourceType"] == "Custom::Subnet": log.debug("Subnet lookup") ec2 = boto3.client("ec2") search = ec2.describe_subnets(Filters=filters) if len(search["Subnets"]) == 1: subnet = search["Subnets"][0] resp = cfnresponse.SUCCESS cfnresponse.send(event, context, resp, subnet, subnet["SubnetId"]) else: cfn_failed(event, context) elif event["ResourceType"] == "Custom::VPC": log.debug("VPC lookup") ec2 = boto3.client("ec2") search = ec2.describe_vpcs(Filters=filters) if len(search["Vpcs"]) == 1: vpc = search["Vpcs"][0] resp = cfnresponse.SUCCESS cfnresponse.send(event, context, resp, vpc, vpc["VpcId"]) else: cfn_failed(event, context) elif event["ResourceType"] == "Custom::R53HostedZone": log.debug("R53 Hosted Zone lookup") r53 = boto3.client("route53") if "HostedZoneId" in filters_raw: zone_id = filters_raw["HostedZoneId"] elif "DNSName" in filters_raw: print(filters_raw["DNSName"]) zone_list = r53.list_hosted_zones_by_name( DNSName=filters_raw["DNSName"]) if not zone_list: log.error("Error on R53 lookup") cfn_failed(event, context) zones = [] for zone in zone_list["HostedZones"]: private = str(zone["Config"]["PrivateZone"]).lower() if private == filters_raw["PrivateZone"].lower(): zones.append(zone) if len(zones) == 1: zone_id = zones[0]["Id"].split("/")[2] else: log.error("Too many or too few matches") cfn_failed(event, context) else: log.error("Unsupported R53 filters") cfn_failed(event, context) zone = r53.get_hosted_zone(Id=zone_id) if zone: resp = cfnresponse.SUCCESS cfnresponse.send(event, context, resp, flat_out_dict(zone), zone_id) else: cfn_failed(event, context) else: log.error("Unsupported resource lookup") cfn_failed(event, context)
def lambda_handler(event, context): s3 = boto3.resource('s3') # Script Template for Conversion Glue Job script_template = "" # Read Script Template script_template_file = os.environ['LAMBDA_TASK_ROOT'] + '/glueJobScript.template' with open(script_template_file, 'r') as template_file: script_template=template_file.read() # Create Column Mappings map_item_id = '("{}", "int", "{}", "int")'.format(event['ResourceProperties']['SourceColumnItemId'], event['ResourceProperties']['DestinationColumnItemId']) map_user_id = '("{}", "int", "{}", "int")'.format(event['ResourceProperties']['SourceColumnUserId'], event['ResourceProperties']['DestinationColumnUserId']) map_event_type = '("{}", "string", "{}", "string")'.format(event['ResourceProperties']['SourceColumnEventType'], event['ResourceProperties']['DestinationColumnEventType']) map_event_value = '("{}", "int", "{}", "int")'.format(event['ResourceProperties']['SourceColumnEventValue'], event['ResourceProperties']['DestinationColumnEventValue']) map_timestamp = '("{}", "int", "{}", "int")'.format(event['ResourceProperties']['SourceColumnTimestamp'], event['ResourceProperties']['DestinationColumnTimestamp']) # Setup Column Mappings based on PersonalizeDatasetName (User, Item, Interactions) if event['ResourceProperties']['PersonalizeDatasetName'] == "User": column_mappings = '{}'.format(map_user_id) elif event['ResourceProperties']['PersonalizeDatasetName'] == "Item": column_mappings = '{}'.format(map_item_id) else: # Map all if not User/Item column_mappings = '{},{},{},{},{}'.format(map_item_id, map_user_id, map_event_type, map_event_value, map_timestamp) # Replace Column Mappings in Template script_template = script_template.replace('[COLUMN_MAPPINGS]', column_mappings) # Replace Template Placeholder Values with ResourceProperties script_template = script_template.replace('[DATABASE_NAME]', event['ResourceProperties']['DatabaseName']) script_template = script_template.replace('[TABLE_NAME]', event['ResourceProperties']['TableName']) script_template = script_template.replace('[OUTPUT_PATH]', 's3://{}{}'.format(event['ResourceProperties']['DestinationBucketName'], event['ResourceProperties']['DestinationDataPrefix'])) # Output Location Details script_bucket = os.environ['CONVERSION_JOB_SCRIPT_BUCKET'] script_filename = 'conversionScript' try: if event['RequestType'] == 'Create': object = s3.Object(script_bucket, script_filename) object.put(Body=str.encode(script_template)) response_data = {"Message": "Resource creation successful!", "Script": 's3://{}/{}'.format(script_bucket, script_filename)} cfnresponse.send(event, context, cfnresponse.SUCCESS, response_data) elif event['RequestType'] == 'Update': object = s3.Object(script_bucket, script_filename) object.put(Body=str.encode(script_template)) response_data = {"Message": "Resource creation successful!","Script": 's3://{}/{}'.format(script_bucket, script_filename)} cfnresponse.send(event, context, cfnresponse.SUCCESS, response_data) elif event['RequestType'] == 'Delete': s3.Object(script_bucket, script_filename).delete() s3.Object(script_bucket, script_filename+'.temp').delete() response_data = {"Message": "Resource deletion successful!"} cfnresponse.send(event, context, cfnresponse.SUCCESS, response_data) else: response_data = {"Message": "Unexpected event received from CloudFormation"} cfnresponse.send(event, context, cfnresponse.SUCCESS, response_data) except Exception as error: print(error) response_data = {"Message": "Unexpected error occured."} cfnresponse.send(event, context, cfnresponse.FAILED, response_data)
def handler(event, context): """ Bucket notifications configuration has a disgraceful API - sorry. The problem is that you can't individually deal with each configuration and you need to send a (potentially) enormous dictionary with all the configuration. This has lot's of implications when you are trying to implement a tool like gordon: - People might have already some notifications configured. - People might change them - which causes race conditions. The approach we have decided to follow is the following: 1) If there is no configuration attached to this bucket, we continue. 2) If there is any configuration attached, we check that the ID of each notification starts with "gordon-". If that's the case, we are "safe"... in the sense that whatever is the previous status we have the new "correct" configuration. If there is any notification with an id which doesn't starts with "gordon-" we fail miserably... because it is quite risky to start mixing events between two sources. We need to make this behaviour pretty clear in the documentation. Why the physical_resource_id is constant accross notification configurations of the same bucket is a workaround the same issue. We need to keep it constant so CloudFormation don't issue a delete on the old resource once the old one gets updated and get a new physical_resource_id because (for example) the lambda ID has changed. As result, CF will only trigger a delete when the bucket changes - which is expected. """ properties = event['ResourceProperties'] # It doesn't matter how big you put this on the doc... people wil # always put bucket's arn instead of name... and it would be a shame # to fail because this stupid error. buckent_name = properties['Bucket'].replace('arn:aws:s3:::', '') physical_resource_id = '{}-bucket-notification-configuration'.format( buckent_name) client = boto3.client('s3') existing_notifications = client.get_bucket_notification_configuration( Bucket=buckent_name) # Check if there is any notification-id which doesn't start with gordon- # If so... fail. for _type in AVAILABLE_CONFIGURATIONS: for notification in existing_notifications.get(_type, []): if not notification.get('Id', '').startswith('gordon-'): send( event, context, FAILED, physical_resource_id=physical_resource_id, reason=( "Bucket {} contains a notification called {} " "which was not created by gordon, hence the risk " "of trying it to add/modify/delete new notifications. " "Please check the documentation in order to understand " "why gordon refuses to proceed.").format( buckent_name, notification.get('Id', ''))) return # For Delete requests, we need to simply send an empty dictionary. # Again - this have bad implications if the user has tried to configure # notification manually, because we are going to override their # configuration. There is no much else we can do. configuration = {} if event['RequestType'] != 'Delete': arn_name_map = { 'LambdaFunctionConfigurations': 'LambdaFunctionArn', 'TopicConfigurations': 'TopicArn', 'QueueConfigurations': 'QueueArn', } for _type in AVAILABLE_CONFIGURATIONS: configuration[_type] = [] for notification in properties.get(_type, []): data = { 'Id': notification['Id'], arn_name_map.get(_type): notification['DestinationArn'], 'Events': notification['Events'], } if notification['KeyFilters']: data['Filter'] = { 'Key': { 'FilterRules': notification['KeyFilters'] } } configuration[_type].append(data) client.put_bucket_notification_configuration( Bucket=buckent_name, NotificationConfiguration=configuration) send(event, context, SUCCESS, physical_resource_id=physical_resource_id)
def lambda_handler(event, context): if event['RequestType'] == 'Delete': cfnresponse.send(event, context, cfnresponse.SUCCESS, {}, None) return # make sure we send a failure to CloudFormation if the function is going to timeout timer = threading.Timer( (context.get_remaining_time_in_millis() / 1000.00) - 0.5, timeout, args=[event, context]) timer.start() status = cfnresponse.SUCCESS try: region = event['ResourceProperties']['region'] cacrypto_bucket = event['ResourceProperties']['cacrypto_bucket'] caCmkKey = event['ResourceProperties']['caCmkKey'] certEnrollLamnda = event['ResourceProperties']['certEnrollLamnda'] print(event) print(cacrypto_bucket) # Generate CA key pass with 128 Bytes kmsclient = boto3.client('kms') print("Generating key password with KMS (128 Bytes)") random = kmsclient.generate_random(NumberOfBytes=128) rnd_token = base64.b64encode( random[u'Plaintext']).decode(encoding="utf-8") print("Generated ca password") # Generation the key and cert with openssl p = subprocess.Popen( 'openssl genrsa -aes256 -out /tmp/ca.key.encrypted.pem -passout pass:'******' 4096 && openssl req -new -extensions v3_ca -sha256 -key /tmp/ca.key.encrypted.pem -x509 -days 3650 -out /tmp/cacert.pem -subj "/CN=ipsec.' + region + '" -passin pass:'******'Error in execution of openssl script: ' + str(p.returncode)) else: print('Certificate and key generated. Subject CN=ipsec.' + region + ' Valid 10 years') # Upload the encrypted key and CA cert f = open("/tmp/ca.key.encrypted.pem", 'rb') s3 = boto3.client('s3', region_name=region) s3.put_object(Bucket=cacrypto_bucket, Key='ca.key.encrypted.pem', Body=f) print('Encrypted CA key uploaded in bucket ' + cacrypto_bucket) f = open("/tmp/cacert.pem", 'rb') s3.put_object(Bucket=cacrypto_bucket, Key='ca.cert.pem', Body=f) print('CA cert uploaded in bucket ' + cacrypto_bucket) os.remove('/tmp/cacert.pem') os.remove('/tmp/ca.key.encrypted.pem') # Encrypt the key with CA CMK kms = boto3.client('kms', region_name=region) ency_token = base64.b64encode( kms.encrypt(KeyId=caCmkKey, Plaintext=rnd_token)['CiphertextBlob']).decode( encoding="utf-8") lmb = boto3.client('lambda', region_name=region) env = lmb.get_function_configuration( FunctionName=certEnrollLamnda)['Environment'] env['Variables']['CA_PWD'] = ency_token boto3.client('lambda', region_name=region).update_function_configuration( FunctionName=certEnrollLamnda, Environment=env) print('Lambda function' + certEnrollLamnda + ' updated') # Restrict the CA key for encryption. Remove allow kms:encrypt action policy_response = kms.get_key_policy(KeyId=caCmkKey, PolicyName='default') kms.put_key_policy(KeyId=caCmkKey, PolicyName='default', Policy=policy_response['Policy'].replace( '"kms:Encrypt",', '')) print( 'Resource policy for CA CMK hardened - removed action kms:encrypt') except Exception as e: logging.error('Exception: %s' % e, exc_info=True) status = cfnresponse.FAILED finally: timer.cancel() cfnresponse.send(event, context, status, {}, None)
def do_attach_detach_elbs(event, context): props = event.get('ResourceProperties', {}) # List of ELB names elbs = props.get('Elbs') # Name of autoscaling group A (or None) asg_a = props.get('AsgA') # Name of autoscaling group B (or None) asg_b = props.get('AsgB') # One of: AsgA, AsgB or Maintenance active_asg = props.get('ActiveAsg') # E.g. eu-west-1 aws_region = props.get('AwsRegion') if not elbs: send(event, context, FAILED, reason='Property Elbs must be defined') return if not asg_a and not asg_b: send(event, context, FAILED, reason='At least one of [AsgA, AsgB] must be defined') return if active_asg == 'AsgA' and not asg_a: send(event, context, FAILED, reason='AsgA was set active but ASG name was not specified') return if active_asg == 'AsgB' and not asg_b: send(event, context, FAILED, reason='AsgB was set active but ASG name was not specified') return if not aws_region: send(event, context, FAILED, reason='Property AwsRegion must be defined') return all_asg_names = [asg_name for asg_name in [asg_a, asg_b] if asg_name] active_asg_name = asg_a if active_asg == 'AsgA' else (asg_b if active_asg == 'AsgB' else None) inactive_asg_name = (asg_b or None) if active_asg_name == asg_a else (asg_a or None) logger.info('All ASGs: %s', all_asg_names) logger.info('Active ASG: %s', active_asg_name) autoscaling = boto3.client('autoscaling', region_name=aws_region) def detach_elbs_from_asg(asg_name): result = autoscaling.describe_load_balancers(AutoScalingGroupName=asg_name) matching_attached_elbs = [ elb['LoadBalancerName'] for elb in result['LoadBalancers'] if elb['State'] != 'Removing' and elb['LoadBalancerName'] in elbs ] if len(matching_attached_elbs) > 0: logger.info('Detaching ELBs: %s from ASG: %s', matching_attached_elbs, asg_name) autoscaling.detach_load_balancers(AutoScalingGroupName=asg_name, LoadBalancerNames=matching_attached_elbs) try: if active_asg_name: # Attach ELBs to active ASG and detach from inactive ASG (if any) logger.info('Attaching ELBs: %s to ASG: %s', elbs, active_asg_name) autoscaling.attach_load_balancers(AutoScalingGroupName=active_asg_name, LoadBalancerNames=elbs) if inactive_asg_name: detach_elbs_from_asg(inactive_asg_name) else: # Detach ELBs from all ASGs (maintenance mode) for asg_name in all_asg_names: detach_elbs_from_asg(asg_name) send(event, context, SUCCESS, reason='Successfully attached/detached ELBs') except ClientError as err: logger.error('Client error occurred while attaching/detaching ELBs: %s', err) send(event, context, FAILED, reason='Failed to attach/detach ELBs: {0} ({1})'.format(elbs, aws_region))
def handler(event, context): print('Received event: %s' % json.dumps(event)) status = cfnresponse.SUCCESS physical_resource_id = None data = {} reason = None try: if event['RequestType'] == 'Create': token = ''.join(ch for ch in str(event['StackId'] + event['LogicalResourceId']) if ch.isalnum()) token = token[len(token)-32:] if len(event['ResourceProperties']['HostNames']) > 1: arn = acm_client.request_certificate( ValidationMethod='DNS', DomainName=event['ResourceProperties']['HostNames'][0], SubjectAlternativeNames=event['ResourceProperties']['HostNames'][1:], IdempotencyToken=token )['CertificateArn'] else: arn = acm_client.request_certificate( ValidationMethod='DNS', DomainName=event['ResourceProperties']['HostNames'][0], IdempotencyToken=token )['CertificateArn'] physical_resource_id = arn logging.info("certificate arn: %s" % arn) rs = {} while True: try: for d in acm_client.describe_certificate(CertificateArn=arn)['Certificate']['DomainValidationOptions']: rs[d['ResourceRecord']['Name']] = d['ResourceRecord']['Value'] break except KeyError: if (context.get_remaining_time_in_millis() / 1000.00) > 20.0: print('waiting for ResourceRecord to be available') time.sleep(15) else: logging.error('timed out waiting for ResourceRecord') status = cfnresponse.FAILED time.sleep(15) rs = [{'Action': 'CREATE', 'ResourceRecordSet': {'Name': r, 'Type': 'CNAME', 'TTL': 600,'ResourceRecords': [{'Value': rs[r]}]}} for r in rs.keys()] try: r53_client.change_resource_record_sets(HostedZoneId=event['ResourceProperties']['HostedZoneId'], ChangeBatch={'Changes': rs}) except Exception as e: if not str(e).endswith('but it already exists'): raise while 'PENDING_VALIDATION' in [v['ValidationStatus'] for v in acm_client.describe_certificate(CertificateArn=arn)['Certificate']['DomainValidationOptions']]: print('waiting for validation to complete') if (context.get_remaining_time_in_millis() / 1000.00) > 20.0: time.sleep(15) else: logging.error('validation timed out') status = cfnresponse.FAILED for r in [v for v in acm_client.describe_certificate(CertificateArn=arn)['Certificate']['DomainValidationOptions']]: if r['ValidationStatus'] != 'SUCCESS': logging.debug(r) status = cfnresponse.FAILED reason = 'One or more domains failed to validate' logging.error(reason) data['Arn'] = arn elif event['RequestType'] == 'Update': reason = 'Exception: Stack updates are not supported' logging.error(reason) status = cfnresponse.FAILED physical_resource_id = event['PhysicalResourceId'] elif event['RequestType'] == 'Delete': physical_resource_id=event['PhysicalResourceId'] if not re.match(r'arn:[\w+=/,.@-]+:[\w+=/,.@-]+:[\w+=/,.@-]*:[0-9]+:[\w+=,.@-]+(/[\w+=,.@-]+)*', physical_resource_id): logging.info("PhysicalId is not an acm arn, assuming creation never happened and skipping delete") else: rs={} for d in acm_client.describe_certificate(CertificateArn=physical_resource_id)['Certificate']['DomainValidationOptions']: rs[d['ResourceRecord']['Name']] = d['ResourceRecord']['Value'] rs = [{'Action': 'DELETE', 'ResourceRecordSet': {'Name': r, 'Type': 'CNAME', 'TTL': 600,'ResourceRecords': [{'Value': rs[r]}]}} for r in rs.keys()] try: r53_client.change_resource_record_sets(HostedZoneId=event['ResourceProperties']['HostedZoneId'], ChangeBatch={'Changes': rs}) except r53_client.exceptions.InvalidChangeBatch as e: pass time.sleep(30) try: acm_client.delete_certificate(CertificateArn=physical_resource_id) except acm_client.exceptions.ResourceInUseException as e: time.sleep(60) acm_client.delete_certificate(CertificateArn=physical_resource_id) except Exception as e: logging.error('Exception: %s' % e, exc_info=True) reason = str(e) status = cfnresponse.FAILED finally: if event['RequestType'] == 'Delete': try: wait_message = 'waiting for events for request_id %s to propagate to cloudwatch...' % context.aws_request_id while not logs_client.filter_log_events( logGroupName=context.log_group_name, logStreamNames=[context.log_stream_name], filterPattern='"%s"' % wait_message )['events']: print(wait_message) time.sleep(5) except Exception as e: logging.error('Exception: %s' % e, exc_info=True) time.sleep(120) cfnresponse.send(event, context, status, data, physical_resource_id, reason)
def handler(event, context): if event['RequestType'] == 'Delete': send(event, context, SUCCESS) return time.sleep(int(event['ResourceProperties']['Time'])) send(event, context, SUCCESS)
def timeout(event, context): logging.error('Execution is about to time out, sending failure response to CloudFormation') cfnresponse.send(event, context, cfnresponse.FAILED, {}, None)
def handler(event, context): responseData = {} cognitoIDPClient = boto3.client('cognito-idp') print(str(event)) PoolName = 'il-auth-at-edge-userpool' ClientName = 'il-auth-at-edge-userpool-client' UserPoolId = '' ClientId = '' Domain = '' try: try: print("Try PhysicalResourceId") ResourceIdString = base64.b64decode( event['PhysicalResourceId']).decode() UserPoolId = ResourceIdString.split(':')[0] ClientId = ResourceIdString.split(':')[1] Domain = ResourceIdString.split(':')[2] print("Decoded values from PhysicalResourceId: {" + UserPoolId + ", " + ClientId + ", " + Domain + "}") except: # Try to find user pool and client that were created previously and # not coded to resourceid. This is needed when updating stack that was # created from earlier version of this project since userpools and clients # were not properly tracked print("PhysicalResourceId dd not contain resource ids, try API") pools = cognitoIDPClient.list_user_pools(MaxResults=60) for pool in pools['UserPools']: if pool['Name'] == PoolName: UserPoolId = pool['Id'] break if UserPoolId: clients = cognitoIDPClient.list_user_pool_clients( UserPoolId=UserPoolId, MaxResults=60) for userpoolclient in clients['UserPoolClients']: if userpoolclient['ClientName'] == ClientName: ClientId = userpoolclient['ClientId'] break pool = cognitoIDPClient.describe_user_pool( UserPoolId=UserPoolId)['UserPool'] Domain = pool['Domain'] print("Values from API: {" + UserPoolId + ", " + ClientId + ", " + Domain + "}") if UserPoolId and ClientId and Domain and event[ 'RequestType'] == 'Create': print( "We have existing userpool with client and domain, do Update instead of" + event['RequestType']) event['RequestType'] = 'Update' customResourcePhysicalId = base64.b64encode( (UserPoolId + ':' + ClientId + ':' + Domain).encode()).decode() event['PhysicalResourceId'] = customResourcePhysicalId if event['RequestType'] == 'Create': print("Create") response = cognitoIDPClient.create_user_pool( AdminCreateUserConfig={ 'AllowAdminCreateUserOnly': True, 'UnusedAccountValidityDays': 7 }, PoolName=PoolName, AutoVerifiedAttributes=['email'], Schema=[{ 'Name': 'email', 'Required': True }]) CreatedUserPoolId = response['UserPool']['Id'] response = cognitoIDPClient.create_user_pool_client( UserPoolId=CreatedUserPoolId, ClientName=ClientName, ReadAttributes=[ 'address', 'birthdate', 'email', 'email_verified', 'family_name', 'gender', 'given_name', 'locale', 'middle_name', 'name', 'nickname', 'phone_number', 'phone_number_verified', 'picture', 'preferred_username', 'profile', 'updated_at', 'website', 'zoneinfo' ], WriteAttributes=[ 'address', 'birthdate', 'email', 'family_name', 'gender', 'given_name', 'locale', 'middle_name', 'name', 'nickname', 'phone_number', 'picture', 'preferred_username', 'profile', 'updated_at', 'website', 'zoneinfo' ], SupportedIdentityProviders=['COGNITO'], CallbackURLs=event['ResourceProperties']['CallbackUrls'].split( ','), LogoutURLs=event['ResourceProperties']['CallbackUrls'].split( ','), AllowedOAuthFlows=['implicit', 'code'], AllowedOAuthScopes=['aws.cognito.signin.user.admin', 'openid'], AllowedOAuthFlowsUserPoolClient=True) CreatedClientId = response['UserPoolClient']['ClientId'] response = cognitoIDPClient.create_user_pool_domain( Domain=str(CreatedClientId), UserPoolId=CreatedUserPoolId) CreatedDomain = CreatedClientId responseData['UserPoolId'] = CreatedUserPoolId responseData['ClientId'] = CreatedClientId customResourcePhysicalId = base64.b64encode( (CreatedUserPoolId + ':' + CreatedClientId + ':' + CreatedDomain).encode()).decode() print("Create SUCCESS, ResponseData=" + str(responseData)) cfnresponse.send(event, context, cfnresponse.SUCCESS, responseData, customResourcePhysicalId) elif event['RequestType'] == 'Update': print("Update") cognitoIDPClient.update_user_pool_client( UserPoolId=UserPoolId, ClientId=ClientId, ClientName=ClientName, ReadAttributes=[ 'address', 'birthdate', 'email', 'email_verified', 'family_name', 'gender', 'given_name', 'locale', 'middle_name', 'name', 'nickname', 'phone_number', 'phone_number_verified', 'picture', 'preferred_username', 'profile', 'updated_at', 'website', 'zoneinfo' ], WriteAttributes=[ 'address', 'birthdate', 'email', 'family_name', 'gender', 'given_name', 'locale', 'middle_name', 'name', 'nickname', 'phone_number', 'picture', 'preferred_username', 'profile', 'updated_at', 'website', 'zoneinfo' ], SupportedIdentityProviders=['COGNITO'], CallbackURLs=event['ResourceProperties']['CallbackUrls'].split( ','), LogoutURLs=event['ResourceProperties']['CallbackUrls'].split( ','), AllowedOAuthFlows=['implicit', 'code'], AllowedOAuthScopes=['aws.cognito.signin.user.admin', 'openid'], AllowedOAuthFlowsUserPoolClient=True) responseData['UserPoolId'] = UserPoolId responseData['ClientId'] = ClientId print("Update SUCCESS - responseData=" + str(responseData)) cfnresponse.send(event, context, cfnresponse.SUCCESS, responseData, event['PhysicalResourceId']) elif event['RequestType'] == 'Delete': print("Delete") cognitoIDPClient.delete_user_pool_domain(Domain=Domain, UserPoolId=UserPoolId) cognitoIDPClient.delete_user_pool_client(UserPoolId=UserPoolId, ClientId=ClientId) cognitoIDPClient.delete_user_pool(UserPoolId=UserPoolId) print("Delete SUCCESS - responseData=" + str(responseData)) cfnresponse.send(event, context, cfnresponse.SUCCESS, responseData, event['PhysicalResourceId']) except Exception as e: responseData['Error'] = str(e) print("FAILED, Exception: " + responseData['Error']) cfnresponse.send( event, context, cfnresponse.FAILED, responseData, event['PhysicalResourceId'] if 'PhysicalResourceId' in event.keys() else '')
def lambda_handler(event, context): client = boto3.client('waf-regional') token = client.get_change_token() print token Ruleid1 = get_mandatory_evar("Rule1") Ruleid2 = get_mandatory_evar("Rule2") Ruleid3 = get_mandatory_evar("Rule3") Ruleid4 = get_mandatory_evar("Rule4") Ruleid5 = get_mandatory_evar("Rule5") Ruleid6 = get_mandatory_evar("Rule6") Ruleid7 = get_mandatory_evar("Rule7") Ruleid8 = get_mandatory_evar("Rule8") Ruleid9 = get_mandatory_evar("Rule9") Ruleid10 = get_mandatory_evar("Rule10") rulegroup = client.create_rule_group(Name='pac-rulegroup-test1', MetricName='cdn', ChangeToken=token['ChangeToken']) print rulegroup time.sleep(10) updatetoken = client.get_change_token() updateRuleGrp = client.update_rule_group( RuleGroupId=rulegroup['RuleGroup']['RuleGroupId'], Updates=[ { 'Action': 'INSERT', 'ActivatedRule': { 'Priority': 10, 'RuleId': Ruleid1, 'Action': { 'Type': 'BLOCK' }, 'OverrideAction': { 'Type': 'COUNT' }, 'Type': 'REGULAR' } }, { 'Action': 'INSERT', 'ActivatedRule': { 'Priority': 20, 'RuleId': Ruleid2, 'Action': { 'Type': 'BLOCK' }, 'OverrideAction': { 'Type': 'COUNT' }, 'Type': 'REGULAR' } }, { 'Action': 'INSERT', 'ActivatedRule': { 'Priority': 30, 'RuleId': Ruleid3, 'Action': { 'Type': 'BLOCK' }, 'OverrideAction': { 'Type': 'COUNT' }, 'Type': 'REGULAR' } }, { 'Action': 'INSERT', 'ActivatedRule': { 'Priority': 40, 'RuleId': Ruleid4, 'Action': { 'Type': 'BLOCK' }, 'OverrideAction': { 'Type': 'COUNT' }, 'Type': 'REGULAR' } }, { 'Action': 'INSERT', 'ActivatedRule': { 'Priority': 50, 'RuleId': Ruleid5, 'Action': { 'Type': 'BLOCK' }, 'OverrideAction': { 'Type': 'COUNT' }, 'Type': 'REGULAR' } }, { 'Action': 'INSERT', 'ActivatedRule': { 'Priority': 60, 'RuleId': Ruleid6, 'Action': { 'Type': 'BLOCK' }, 'OverrideAction': { 'Type': 'COUNT' }, 'Type': 'REGULAR' } }, { 'Action': 'INSERT', 'ActivatedRule': { 'Priority': 70, 'RuleId': Ruleid7, 'Action': { 'Type': 'BLOCK' }, 'OverrideAction': { 'Type': 'COUNT' }, 'Type': 'REGULAR' } }, { 'Action': 'INSERT', 'ActivatedRule': { 'Priority': 80, 'RuleId': Ruleid8, 'Action': { 'Type': 'BLOCK' }, 'OverrideAction': { 'Type': 'COUNT' }, 'Type': 'REGULAR' } }, { 'Action': 'INSERT', 'ActivatedRule': { 'Priority': 90, 'RuleId': Ruleid9, 'Action': { 'Type': 'BLOCK' }, 'OverrideAction': { 'Type': 'COUNT' }, 'Type': 'REGULAR' } }, { 'Action': 'INSERT', 'ActivatedRule': { 'Priority': 100, 'RuleId': Ruleid10, 'Action': { 'Type': 'BLOCK' }, 'OverrideAction': { 'Type': 'COUNT' }, 'Type': 'REGULAR' } }, ], ChangeToken=updatetoken['ChangeToken']) ruleGrpId = {'ruleGrpId': rulegroup['RuleGroup']['RuleGroupId']} physicalResourceId = 'FWM-RULE-GRP-ID-' + context.log_stream_name cfnresponse.send(event, context, cfnresponse.SUCCESS, ruleGrpId, physicalResourceId=physicalResourceId) return updateRuleGrp
def create_endpoint(event, context): responseData = {} # Check if the account is part of an Organization. Only accounts within an Organization can receive a SCP try: response = org.describe_organization() logger.info("Account is member of an existing Organization.") try: getRootId = org.list_roots() rootId = getRootId['Roots'][0]['Id'] enableSCP = org.enable_policy_type( RootId=rootId, PolicyType='SERVICE_CONTROL_POLICY' ) print("SCP has been enabled") responseData['response'] = enableSCP responseData['statusMessage'] = 'SCP Enabled' cfnresponse.send(event, context, cfnresponse.SUCCESS, responseData) return { 'body': 'Organization exists & SCP Policy Type is enabled.' } except: print("SCP policies are already enabled") responseData['response'] = "Success" responseData['statusMessage'] = 'SCP Enabled' cfnresponse.send(event, context, cfnresponse.SUCCESS, responseData) return { 'body': 'Organization exists & SCP Policy Type is enabled.' } except: print("Not part of an Organization. Organization will be created.") # Create the Organization based on the current account createOrganization = org.create_organization( FeatureSet='ALL' ) print("Organization created.") print(createOrganization) # Enable SCP getRootId = org.list_roots() rootId = getRootId['Roots'][0]['Id'] enableSCP = org.enable_policy_type( RootId=rootId, PolicyType='SERVICE_CONTROL_POLICY' ) print("SCP has been enabled") responseData['response'] = enableSCP responseData['statusMessage'] = 'SCP Enabled' cfnresponse.send(event, context, cfnresponse.SUCCESS, responseData) return { 'body': 'Organization exists & SCP Policy Type is enabled.' }
def handler(event, context): status = cfnresponse.SUCCESS level = logging.getLevelName(os.getenv('LogLevel')) log.setLevel(level) log.debug(event) s3_bucket = os.getenv('AuthBucket') cluster_name = os.getenv('ClusterName') number_of_students = int(os.getenv('NumStudents')) hosted_zone_name = os.getenv('HostedZoneName') openshift_client_base_mirror_url = os.getenv('OpenShiftMirrorURL') openshift_version = os.getenv('OpenShiftVersion') openshift_client_binary = os.getenv('OpenShiftClientBinary') openshift_install_binary = os.getenv('OpenShiftInstallBinary') create_cloud9_instance = decide_cloud9(os.getenv("CreateCloud9Instance")) file_extension = '.tar.gz' cluster_data = {"cluster_name": cluster_name, "openshift_version": openshift_version, "clusters_information": {} } stack_arr = {} if not event.get('RequestType') == 'Delete': stack_arr = build_stack_arr(cluster_name, number_of_students, hosted_zone_name, create_cloud9_instance, s3_bucket, openshift_version) generate_webtemplate(s3_bucket, cluster_data, stack_arr) if sys.platform == 'darwin': openshift_install_os = '-mac-' else: openshift_install_os = '-linux-' openshift_client_package = openshift_client_binary + openshift_install_os + openshift_version + file_extension openshift_client_mirror_url = openshift_client_base_mirror_url + openshift_version + "/" download_path = '/tmp/' log.info("Cluster name: " + os.getenv('ClusterName')) # We are in the Deploy CloudFormation event if 'RequestType' in event.keys(): try: if event['RequestType'] == 'Delete': waiter_array = [] wait_for_state = "stack_delete_complete" log.info("Deleting all student stacks in {} deployment'.format(cluster_name)") delete_contents_s3(s3_bucket=s3_bucket) for i in range(number_of_students): student_cluster_name = cluster_name + '-' + 'student' + str(i) delete_stack(student_cluster_name) waiter_array.append({ "stack_name": student_cluster_name, "stack_state": wait_for_state }) # TODO: If the stack is in state other than 'DELETE_IN_PROGRESS' then the lambda will timeout waiting on # 'stack_delete_complete' state wait_for_stack_state(waiter_array) elif event['RequestType'] == 'Update': log.info("Update sent, however, this is unsupported at this time.") pass else: cf_client = boto3.client('cloudformation') cf_params = parse_properties(event['ResourceProperties']) log.info("Delete and Update not detected, proceeding with Create") pull_secret = os.environ.get('PullSecret') ssh_key = os.environ.get('SSHKey') if openshift_version != "3": openshift_install_package = openshift_install_binary \ + openshift_install_os \ + openshift_version \ + file_extension log.info("Generating OCP installation files for cluster " + cluster_name) install_dependencies(openshift_client_mirror_url, openshift_install_package, openshift_install_binary, download_path) for stack in stack_arr: # The only status is either building or complete, skip if either is found if stack["status"]: continue build_array = [] log.debug("STACK: {}".format(stack)) student_cluster_name = stack["name"] building_key = os.path.join(student_cluster_name, "building") local_student_folder = download_path + student_cluster_name if openshift_version != "3": generate_ignition_files(openshift_install_binary, download_path, student_cluster_name, ssh_key, pull_secret, hosted_zone_name, student_num=stack["number"]) upload_ignition_files_to_s3(local_student_folder, s3_bucket) save_cfparams_json(cf_params=cf_params, s3_bucket=s3_bucket, student_cluster_name=student_cluster_name, create_cloud9_instance=create_cloud9_instance) cf_params["StackName"] = stack["name"] build_array.append(cf_params) log.debug(build_array) build_stacks(build_array) stack["status"] = "building" add_file_to_s3(s3_bucket=s3_bucket,body="building",key=building_key, content_type="text/plain", acl="private") generate_webtemplate(s3_bucket, cluster_data, stack_arr) log.info("Complete") except Exception: logging.error('Unhandled exception', exc_info=True) status = cfnresponse.FAILED finally: cfnresponse.send(event, context, status, {}, None) # We are in the Validate openshift clusters event else: try: if openshift_version != "3": install_dependencies(openshift_client_mirror_url, openshift_client_package, openshift_client_binary, download_path) failed_clusters = [] for stack in stack_arr: if stack["status"] == "complete": log.debug("Stack complete {}".format(stack["name"])) continue # If its OpenShift 3, add as a Failed. # If it is not available,add to array and continue - Is this needed? # Otherwise it is available and OCP 4 so test. # If successful, continue, otherwise append to failed clusters if openshift_version != "3": if not cluster_availabe(url=stack["api_url"]): failed_clusters.append(stack["name"]) continue if scale_ocp_replicas(s3_bucket, stack["name"], stack["status"]): stack["status"] = "complete" continue log.debug("Stack failed {}".format(stack["name"])) failed_clusters.append(stack["name"]) generate_webtemplate(s3_bucket, cluster_data, stack_arr) if len(failed_clusters) == 0: deactivate_event(cluster_name) else: log.debug("failed_clusters = {}".format(failed_clusters)) rebuild_stacks(cluster_name, failed_clusters, s3_bucket) log.info("Complete") except Exception: logging.error('Unhandled exception', exc_info=True)
def lambda_handler(event, context): message = '' response = cfnresponse.FAILED cluster_status = '' # Get CloudFormation parameters cfn_stack_id = event.get('StackId') cfn_request_type = event.get('RequestType') cfn_physicalResourceId = context.log_stream_name if event.get( 'ResourceProperties.PhysicalResourceId') is None else event.get( 'ResourceProperties.PhysicalResourceId') if cfn_stack_id and cfn_request_type != 'Delete': try: # Wait for cluster to become available before trying to connect while cluster_status != 'Available': # Exit if Lambda will timeout before next sleep ends if context.get_remaining_time_in_millis() < (30 * 1000): message = 'Function will timeout. Exiting with failure.' print('ERROR: ', message) cfnresponse.send(event, context, response, {'Message': message}, cfn_physicalResourceId) return {'statusCode': 200, 'body': json.dumps(message)} # Get cluster availability status every 30 seconds time.sleep(30) rsclient = boto3.client('redshift') clusters = rsclient.describe_clusters( ClusterIdentifier=REDSHIFT_NAME) cluster_status = clusters['Clusters'][0][ 'ClusterAvailabilityStatus'] print('INFO: Cluster {} status: {}. Time remaining: {} ms.'. format(REDSHIFT_NAME, cluster_status, context.get_remaining_time_in_millis())) create_spectrum_schema_sql = '' create_status_history_table_sql = '' with open('sql/create_spectrum_schema.sql', 'r') as spectrum_sql_file: create_spectrum_schema_sql = spectrum_sql_file.read() create_spectrum_schema_sql = create_spectrum_schema_sql.replace( '${GLUE_DB}', GLUE_DB).replace('${IAM_ROLE_ARN}', IAM_ROLE_ARN) with open('sql/create_status_history_table.sql', 'r') as table_sql_file: create_status_history_table_sql = table_sql_file.read() print('INFO: Connecting...') conn_info = connection_info(DB_CREDS) with psycopg2.connect(dbname=conn_info['dbname'], host=conn_info['host'], port=conn_info['port'], user=conn_info['username'], password=conn_info['password']) as conn: with conn.cursor() as cur: print('INFO: Executing SQL: {}'.format( create_spectrum_schema_sql)) cur.execute(create_spectrum_schema_sql) print('INFO: Executing SQL: {}'.format( create_status_history_table_sql)) cur.execute(create_status_history_table_sql) message = 'SUCCESS: Executed setup queries successfully.' response = cfnresponse.SUCCESS except Exception as e: print('ERROR: ', e) message = '{}'.format(e) else: message = 'INFO: Deleting function.' response = cfnresponse.SUCCESS cfnresponse.send(event, context, response, {'Message': message}, cfn_physicalResourceId) return {'statusCode': 200, 'body': json.dumps(message)}