def handler(event, context):
    """Main handler function, passes off it's work to crhelper's cfn_handler."""  # noqa: D401
    # update the logger with event info
    global logger
    logger = crhelper.log_config(event, loglevel="info")
    return crhelper.cfn_handler(event, context, create, update, delete, logger,
                                init_failed)
Пример #2
0
def handler(event, context):

    # update the logger with event info
    global logger
    logger = crhelper.log_config(event)
    return crhelper.cfn_handler(event, context, create, update, delete, logger,
                                init_failed)
def handler(event, context):
    """
    Main handler function, passes off it's work to crhelper's cfn_handler
    """
    # update the logger with event info
    global logger
    logger = crhelper.log_config(event)
    return crhelper.cfn_handler(event, context, create, update, delete, logger, init_failed)
Пример #4
0
def handler(event, context):
    """
    Main handler function, passes off it's work to crhelper's cfn_handler
    """
    print('CloudFormation event received: %s' % str(event))
    # update the logger with event info
    global logger
    logger = crhelper.log_config(event)
    return crhelper.cfn_handler(event, context, create, update, delete, logger, init_failed)
Пример #5
0
def handler(event, context):
    """
    Main handler function, passes off it's work to crhelper's cfn_handler
    """
    # update the logger with event info
    print("event: {}".format(event))
    global logger
    logger = crhelper.log_config(event)
    return crhelper.cfn_handler(event, context, create, update, delete, logger, init_failed)
Пример #6
0
def delete(event, context):
    global logger
    logger = crhelper.log_config(event)

    print("event: {}".format(event))

    bucket = event['ResourceProperties']['BucketName']
    empty_delete_buckets(event, bucket)

    physical_resource_id = event['PhysicalResourceId']
    response_data = {}
    return physical_resource_id, response_data
Пример #7
0
def elastictranscoder_resource_handler(event: CustomResourceRequest,
                                       context: LambdaContext) -> None:
    """AWS Lambda handler for ElasticTranscoder service to
    create/update and delete Video Pipelines from using CFN

    """
    try:
        logger = crhelper.log_config(event)
        init_failed = False
    except Exception as e:
        logger.error(e, exc_info=True)
        init_failed = True

    crhelper.cfn_handler(event, context, create_pipeline, update_pipeline,
                         delete_pipeline, logger, init_failed)
Пример #8
0
import boto3
import crhelper

# initialise logger
logger = crhelper.log_config({"RequestId": "CONTAINER_INIT"})
logger.info("Logging configured")
# set global to track init failures
init_failed = False

try:
    # Place initialization code here
    logger.info("Container initialization completed")
except Exception as e:
    logger.error(e, exc_info=True)
    init_failed = e


def create(event, context):
    apps = event["ResourceProperties"]["AppsEMR"]
    s3Bucket = event["ResourceProperties"]["S3Bucket"]
    emrReleaseLabel = event["ResourceProperties"]["emrReleaseLabel"]
    prestoEngineRequested = "Presto"
    isPrestoAppRequested = False
    isSparkAppRequested = False
    formatted_applist = apps.split(",")
    applist = []
    for app in formatted_applist:
        applist.append({"Name": app.strip()})
        if app.strip() in ["Presto", "PrestoSQL"]:
            isPrestoAppRequested = True
            prestoEngineRequested = app.strip()
"""
Get PrefixListID based on PrefixListName
"""

from boto3 import client
from botocore.exceptions import ClientError
import os

import crhelper

# initialise logger
logger = crhelper.log_config({"RequestId": "CONTAINER_INIT"})
logger.info('Logging configured')
# set global to track init failures
init_failed = False

try:
    # Place initialization code here
    logger.info("Container initialization completed")
except Exception as e:
    logger.error(e, exc_info=True)
    init_failed = e


def get_pl_id(pl_name, region):
    """
    Get PrefixListID for given PrefixListName
    """

    logger.info("Get PrefixListId for PrefixListName: %s in %s" % (pl_name, region)) 
    try:
def lambda_handler(event, context):
    global logger
    logger = crhelper.log_config(event)
    return crhelper.cfn_handler(event, context, create, update, delete, logger,
                                init_failed)
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
import re
import time

import boto3
from botocore.exceptions import ClientError

import crhelper

# initialise logger
logger = crhelper.log_config({"RequestId": "CONTAINER_INIT"}, loglevel="info")
logger.info("Logging configured")
# set global to track init failures
init_failed = False

try:
    # Place initialization code here
    logger.info("Container initialization completed")
    batch_client = boto3.client("batch")
except Exception as e:
    logger.error(e, exc_info=True)
    init_failed = e


def get_job_definition_name_by_arn(job_definition_arn):
    """
Пример #12
0
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
import re
import time

import boto3
from botocore.exceptions import ClientError

import crhelper

# initialise logger
logger = crhelper.log_config({"RequestId": "CONTAINER_INIT"}, loglevel="info")
logger.info("Logging configured")
# set global to track init failures
init_failed = False

try:
    # Place initialization code here
    logger.info("Container initialization completed")
    batch_client = boto3.client("batch")
except Exception as e:
    logger.error(e, exc_info=True)
    init_failed = e


def get_job_definition_name_by_arn(job_definition_arn):
    """
Пример #13
0
def empty_delete_buckets(event, bucket_name):
    global logger
    logger = crhelper.log_config(event)
    logger.info("trying to delete the bucket {0}".format(bucket_name))
    # s3_client = SESSION.client('s3', region_name=region)
    print("bucketname: {}".format(bucket_name))
    s3_client = boto3.client('s3')
    # s3 = SESSION.resource('s3', region_name=region)
    s3 = boto3.resource('s3')
    try:
        bucket = s3.Bucket(bucket_name).load()
    except ClientError as e:
        logger.error(e, exc_info=True)
        logger.error("bucket {0} does not exist".format(bucket_name))
        return
    # Check if versioning is enabled
    #try:
    response = s3_client.get_bucket_versioning(Bucket=bucket_name)
    #except Exception as e:
    #    logger.error(e, exc_info=True)
    #    logger.error("Error getting versioning info".format(bucket_name))
    #    return
    status = response.get('Status','')
    print("s3 version status: {}".format(status))
    if status == 'Enabled':
         response = s3_client.put_bucket_versioning(Bucket=bucket_name,
                                                    VersioningConfiguration={'Status': 'Suspended'})
    paginator = s3_client.get_paginator('list_object_versions')
    page_iterator = paginator.paginate(
        Bucket=bucket_name
    )
    for page in page_iterator:
        logger.info(page)
        if 'DeleteMarkers' in page:
            delete_markers = page['DeleteMarkers']
            if delete_markers is not None:
                for delete_marker in delete_markers:
                    key = delete_marker['Key']
                    versionId = delete_marker['VersionId']
                    s3_client.delete_object(Bucket=bucket_name, Key=key, VersionId=versionId)
        if 'Versions' in page and page['Versions'] is not None:
            versions = page['Versions']
            for version in versions:
                logger.info(version)
                key = version['Key']
                versionId = version['VersionId']
                s3_client.delete_object(Bucket=bucket_name, Key=key, VersionId=versionId)
    object_paginator = s3_client.get_paginator('list_objects_v2')
    page_iterator = object_paginator.paginate(
        Bucket=bucket_name
    )
    for page in page_iterator:
        if 'Contents' in page:
            for content in page['Contents']:
                key = content['Key']
                s3_client.delete_object(Bucket=bucket_name, Key=content['Key'])
    #UNCOMMENT THE LINE BELOW TO MAKE LAMBDA DELETE THE BUCKET.
    # THIS WILL CAUSE AN FAILURE SINCE CLOUDFORMATION ALSO TRIES TO DELETE THE BUCKET
    #s3_client.delete_bucket(Bucket=bucket_name)
    #print "Successfully deleted the bucket {0}".format(bucket_name)
    logger.info("Successfully emptied the bucket {0}".format(bucket_name))