Exemplo n.º 1
0
 def deployment_stack_deleted(self, deployment_name, deployment_stack_id,
                              deployment_access_stack_id):
     self._output_message(
         '\n{} deployment stack {} and access stack {} have been deleted.'.
         format(deployment_name,
                util.get_stack_name_from_arn(deployment_stack_id),
                util.get_stack_name_from_arn(deployment_access_stack_id)))
Exemplo n.º 2
0
def __get_access_stack_parameters(context, deployment_name, deployment_stack_id = None, uploader = None):
    return {
        'ConfigurationBucket': uploader.bucket if uploader else None,
        'ConfigurationKey': uploader.key if uploader else None,
        'ProjectResourceHandler': context.config.project_resource_handler_id,
        'PlayerAccessTokenExchange': context.config.token_exchange_handler_id,
        'ProjectStack': util.get_stack_name_from_arn(context.config.project_stack_id),
        'DeploymentName': deployment_name,
        'DeploymentStack': util.get_stack_name_from_arn(deployment_stack_id) if deployment_stack_id else None,
        'DeploymentStackArn': deployment_stack_id
    }
Exemplo n.º 3
0
    def delete(self, stack_id, pending_resource_status=None):

        stack_name = util.get_stack_name_from_arn(stack_id)

        self.context.view.deleting_stack(stack_name, stack_id)

        self.__clean_undeltable_resources(
            stack_id, pending_resource_status=pending_resource_status)

        monitor = Monitor(self.context, stack_id, 'DELETE')

        cf = self.context.aws.client('cloudformation',
                                     region=util.get_region_from_arn(stack_id))

        try:
            res = cf.delete_stack(StackName=stack_id)
        except ClientError as e:
            raise HandledError(
                'Could not start delete of {} stack ({}).'.format(
                    stack_name, stack_id), e)

        monitor.wait()

        self.__clean_log_groups(
            stack_id, pending_resource_status=pending_resource_status)
Exemplo n.º 4
0
def __delete_custom_resource_lambdas(context, args):
    context.view.deleting_custom_resource_lambdas()
    stack_id = context.config.project_stack_id
    project_name = util.get_stack_name_from_arn(stack_id)
    region = util.get_region_from_arn(stack_id)
    lambda_client = context.aws.client('lambda', region=region)
    iam_client = context.aws.client('iam')
    delete_functions = []
    delete_roles = []
    prefixes = [
        "{}-{}-".format(project_name, prefix)
        for prefix in resource_type_info.LAMBDA_TAGS
    ]

    # Iterate through all lambda functions and generate a list that begin with any of the prefixes associated with
    # custom resource handlers
    for response in lambda_client.get_paginator('list_functions').paginate():
        for entry in response['Functions']:
            function_name = entry['FunctionName']
            if any(function_name.startswith(prefix) for prefix in prefixes):
                delete_functions.append(function_name)
                delete_roles.append(
                    aws_utils.get_role_name_from_role_arn(entry['Role']))

    # Delete the functions and roles related to custom resource handlers
    for function_name, role_name in zip(delete_functions, delete_roles):
        lambda_client.delete_function(FunctionName=function_name)
        iam_client.delete_role_policy(RoleName=role_name, PolicyName="Default")
        iam_client.delete_role(RoleName=role_name)

    context.view.deleting_lambdas_completed(len(delete_functions))
def handler(event, context):
    print "Start FIFO Auto Scale"
    prefix = util.get_stack_name_from_arn(event[c.ENV_STACK_ID], False)
    request_type = event['RequestType']
    assigned_suffix = event['ResourceProperties'].get('Suffix', None)
    type = event['ResourceProperties'].get('QueueType', "fifo")
    initial_number_of_queues = int(event['ResourceProperties'].get(
        'IntialNumberOfQueues', 5))

    if assigned_suffix:
        prefix = "{0}{1}".format(prefix, assigned_suffix)

    sqs = Sqs({}, queue_prefix=prefix, type=type)
    if request_type == 'Delete':
        sqs.delete_all_queues(prefix)
    else:
        queues = sqs.get_queues()
        number_of_queues = len(queues)

        #5 queues to start, each queue can support 300 send message calls per second.  total: 1500 messages per second
        if number_of_queues < initial_number_of_queues:
            for i in range(number_of_queues, initial_number_of_queues):
                sqs.add_fifo_queue(prefix)

    return custom_resource_response.success_response({}, "*")
Exemplo n.º 6
0
 def __add_to_sqs(self, files):
     prefix = util.get_stack_name_from_arn(
         os.environ[c.ENV_DEPLOYMENT_STACK_ARN])
     sqs = Sqs(self.context, "{0}{1}".format(prefix,
                                             c.KEY_SQS_AMOEBA_SUFFIX))
     sqs.set_queue_url(lowest_load_queue=False)
     sqs.send_generic_message(json.dumps({"paths": files}))
def main(event, lambdacontext):  
    starttime = time.time()    
    queue_url = event.get(c.KEY_SQS_QUEUE_URL, None)        
    print "Started consumer with queue url '{}'".format(queue_url)    
    context = event.get("context", {})        
    context[c.KEY_SQS_QUEUE_URL] = queue_url        
    context[c.KEY_LAMBDA_FUNCTION] = lambdacontext.function_name if hasattr(lambdacontext, 'function_name') else None
    context[c.KEY_REQUEST_ID] = lambdacontext.aws_request_id if hasattr(lambdacontext, 'aws_request_id') else None
    context[c.KEY_IS_LAMBDA_ENV] = context[c.KEY_REQUEST_ID] is not None
      
    prefix = util.get_stack_name_from_arn(os.environ[c.ENV_DEPLOYMENT_STACK_ARN])    

    context[c.KEY_STACK_PREFIX] = prefix
    context[c.KEY_SQS] = Sqs(context, "{0}_".format(prefix))
    context[c.KEY_SQS_AMOEBA] = Sqs(context, "{0}{1}_".format(prefix, c.KEY_SQS_AMOEBA_SUFFIX))
    context[c.KEY_SQS_AMOEBA].set_queue_url(lowest_load_queue=True)    
    context[c.KEY_LAMBDA] = Lambda(context)
    context[c.KEY_CLOUDWATCH] = CloudWatch(context)
    
    context[c.KEY_THREAD_POOL] = ThreadPool(context, 8)               
    context[c.KEY_METRIC_BUCKET] = os.environ[c.RES_S3_STORAGE]            
    
    context[c.KEY_START_TIME] = starttime
    context[c.CW_ATTR_SAVE_DURATION] = context[c.KEY_CLOUDWATCH].avg_save_duration(util.get_cloudwatch_namespace(os.environ[c.ENV_DEPLOYMENT_STACK_ARN]))
    context[c.CW_ATTR_DELETE_DURATION] = context[c.KEY_CLOUDWATCH].avg_delete_duration(util.get_cloudwatch_namespace(os.environ[c.ENV_DEPLOYMENT_STACK_ARN]))    
          
    context[c.KEY_SUCCEEDED_MSG_IDS] = []
    process(context)    
    del context
    gc.collect()
    return {        
        'StatusCode': 200        
    }
Exemplo n.º 8
0
    def delete(self, stack_id, pending_resource_status = None):

        stack_name = util.get_stack_name_from_arn(stack_id)

        self.context.view.deleting_stack(stack_name, stack_id)

        self.__clean_undeltable_resources(stack_id, pending_resource_status = pending_resource_status)

        monitor = Monitor(self.context, stack_id, 'DELETE')

        cf = self.context.aws.client('cloudformation', region=util.get_region_from_arn(stack_id))
        
        failed_resources=[]
        attempts =0;
        while attempts < 5:
            try:
                res = cf.delete_stack(StackName = stack_id, RetainResources=list(failed_resources)) 
            except ClientError as e:
                raise HandledError('Could not start delete of {} stack ({}).'.format(stack_name, stack_id), e)
            failed_resources = monitor.wait()                        
            if len(failed_resources)==0:
                break
            attempts+=1

        self.__clean_log_groups(stack_id, pending_resource_status = pending_resource_status)
Exemplo n.º 9
0
    def update(self, stack_id, template_url, parameters={}, pending_resource_status={}, capabilities = []):

        stack_name = util.get_stack_name_from_arn(stack_id)

        self.context.view.updating_stack(stack_name, template_url, parameters)

        if pending_resource_status is not None:
            self.__clean_undeltable_resources(stack_id, pending_resource_status=pending_resource_status)

        monitor = Monitor(self.context, stack_id, 'UPDATE')

        cf = self.context.aws.client('cloudformation', region=util.get_region_from_arn(stack_id))

        parameter_list = [ { 'ParameterKey': k, 'ParameterValue': v } for k, v in parameters.iteritems() ]
        
        try:
            res = cf.update_stack(
                StackName = stack_id,
                TemplateURL = template_url,
                Capabilities = capabilities,
                Parameters = parameter_list
            )
        except ClientError as e:
            raise HandledError('Could not start update of {} stack ({}).'.format(stack_name, stack_id), e)

        monitor.wait()

        self.__clean_log_groups(stack_id, pending_resource_status = pending_resource_status)
Exemplo n.º 10
0
    def describe_resources(
        self,
        stack_id,
        recursive=True,
        optional=False
    ):

        region = util.get_region_from_arn(stack_id)
        cf = self.context.aws.client('cloudformation', region=region)

        self.context.view.describing_stack_resources(stack_id)

        try:
            res = cf.describe_stack_resources(StackName=stack_id)
        except ClientError as e:
            if optional and e.response['Error']['Code'] == 'ValidationError':
                return {}
            message = e.message
            if e.response['Error']['Code'] == 'ValidationError':
                message += ' Make sure the AWS credentials you are using have access to the project\'s resources.'
            raise HandledError('Could not get stack {} resource data. {}'.format(
                util.get_stack_name_from_arn(stack_id), message))

        resource_descriptions = {}

        for entry in res['StackResources']:
            resource_descriptions[entry['LogicalResourceId']] = entry
            if recursive and entry['ResourceType'] == 'AWS::CloudFormation::Stack':
                physical_resource_id = entry.get('PhysicalResourceId', None)
                if physical_resource_id is not None:
                    logical_resource_id = entry['LogicalResourceId']
                    nested_map = self.describe_resources(physical_resource_id)
                    for k,v in nested_map.iteritems():
                        resource_descriptions[entry['LogicalResourceId'] + '.' + k] = v
            elif entry['ResourceType'] == 'Custom::CognitoUserPool':    # User Pools require extra information (client id/secret)
                resource_descriptions[entry['LogicalResourceId']]['UserPoolClients'] = []
                idp = self.context.aws.client('cognito-idp', region=region)
                pool_id = entry.get('PhysicalResourceId', None)
                # Lookup client IDs if the pool ID is valid.  Valid pool ids must contain an underscore.
                # CloudFormation initializes the physical ID to a UUID without an underscore before the resource is created.
                # If the pool creation doesn't happen or it fails, the physical ID isn't updated to a valid value.
                if pool_id is not None and pool_id.find('_') >= 0:
                    try:
                        client_list = idp.list_user_pool_clients(UserPoolId=pool_id, MaxResults=60)['UserPoolClients']
                    except ClientError as e:
                        client_list = {}
                        if e.response['Error']['Code'] == 'ResourceNotFoundException':
                            continue
                    collected_details = {}
                    for client in client_list:
                        client_name = client['ClientName']
                        client_id = client['ClientId']
                        client_description = idp.describe_user_pool_client(UserPoolId=pool_id, ClientId=client_id)['UserPoolClient']
                        collected_details[client_name] = {
                            'ClientId': client_id
                        }
                    resource_descriptions[entry['LogicalResourceId']]['UserPoolClients'] = collected_details

        return resource_descriptions
Exemplo n.º 11
0
def delete_stack(context, args):

    if context.config.project_stack_id is None:
        raise HandledError("Project stack does not exist.")

    if context.config.deployment_names:
        raise HandledError(
            'The project has {} deployment stack(s): {}. You must delete these stacks before you can delete the project stack.'
            .format(
                len(context.config.deployment_names), ', '.join(
                    deployment_name
                    for deployment_name in context.config.deployment_names)))

    if context.stack.id_exists(context.config.project_stack_id):

        logs_bucket_id = context.stack.get_physical_resource_id(
            context.config.project_stack_id,
            'Logs',
            optional=True,
            expected_type='AWS::S3::Bucket')

        pending_resource_status = __get_pending_resource_status(context,
                                                                deleting=True)
        context.stack.confirm_stack_operation(context.config.project_stack_id,
                                              'project stack', args,
                                              pending_resource_status)

        context.stack.delete(context.config.project_stack_id,
                             pending_resource_status=pending_resource_status)

        if logs_bucket_id:

            s3 = context.aws.client('s3')

            # Check to see if the bucket still exists, old versions of project-template.json
            # don't have DeletionPolicy="Retain" on this bucket.
            try:
                s3.head_bucket(Bucket=logs_bucket_id)
                bucket_still_exists = True
            except:
                bucket_still_exists = False

            if bucket_still_exists:
                stack_name = util.get_stack_name_from_arn(
                    context.config.project_stack_id)
                util.delete_bucket_contents(context, stack_name, "Logs",
                                            logs_bucket_id)
                context.view.deleting_bucket(logs_bucket_id)
                s3.delete_bucket(Bucket=logs_bucket_id)

    else:

        context.view.clearing_project_stack_id(context.config.project_stack_id)

    context.config.clear_project_stack_id()
def main(event, lambdacontext):
    global context
    global timestamp
    global aws_sqs
    start = time.time()
    ok_response =  {        
        'StatusCode': 200,            
    }
    refreshtime = datetime.datetime.utcnow() - datetime.timedelta(minutes=1)
    if context is None or aws_sqs is None or refreshtime > timestamp:        
        context=dict({})    
        stack_id = os.environ[c.ENV_DEPLOYMENT_STACK_ARN]
        context[c.KEY_REQUEST_ID] = lambdacontext.aws_request_id if hasattr(lambdacontext, 'aws_request_id') else None
        db = DynamoDb(context)
        prefix = util.get_stack_name_from_arn(stack_id)
        aws_sqs = Sqs(context, queue_prefix="{}_".format(prefix))
        aws_sqs.set_queue_url(True)         
        timestamp = datetime.datetime.utcnow() 
    else:
        context[c.KEY_SQS_QUEUE_URL] = aws_sqs.queue_url

    data =  event.get(c.API_PARAM_PAYLOAD, {})[c.API_PARAM_DATA]
    source_IP = event.get(c.API_PARAM_SOURCE_IP, None)     
    sensitivity_type = event.get(c.SQS_PARAM_SENSITIVITY_TYPE, sensitivity.SENSITIVITY_TYPE.NONE)           
    compression_mode = event.get(c.SQS_PARAM_COMPRESSION_TYPE, compression.COMPRESSION_MODE.NONE)           
    payload_type = event.get(c.SQS_PARAM_PAYLOAD_TYPE, payload.PAYLOAD_TYPE.CSV)    
    compression_mode = CompressionClassFactory.instance(compression_mode)    
    sensitivity_type = SensitivityClassFactory.instance(sensitivity_type)
    payload_type = PayloadClassFactory.instance(context, payload_type, compression_mode, sensitivity_type, source_IP)
      
    print "[{}]Using SQS queue URL '{}'".format(context[c.KEY_REQUEST_ID],aws_sqs.queue_url) 
    if os.environ[c.ENV_VERBOSE]== "True":
        print "The post request contains a paylod of\n{}".format(data)
    if data is None:   
        print "Terminating, there is no data."
        return ok_response
        
    total_metrics = "all"    
    try:
        data_size = len(data) + sqs.message_overhead_size(sensitivity_type, compression_mode, payload_type)      
        message_chunks, total_metrics = payload_type.chunk(data)   
    
        for message in message_chunks:                    
            print "Sending a sqs message with {} bytes".format(len(message))            
            aws_sqs.send_message(message, sensitivity_type, compression_mode, payload_type)    
    except Exception as e:        
        traceback.print_exc()                
        raise errors.ClientError(e.message)     

    print "The job sent {} metric(s) to the FIFO queue '{}'".format(total_metrics, aws_sqs.queue_url)    
    print "The job took {} seconds.".format(time.time() -start)
    return ok_response
Exemplo n.º 13
0
    def stack_changes(self, stack_id, stack_description, change_map):
        
        if stack_id:
            full_stack_description = '{} stack ({})'.format(stack_description, util.get_stack_name_from_arn(stack_id))
        else:
            full_stack_description = '{} stack'.format(stack_description)

        self._output_message('\nThis operation will perform the following pending actions on the {} resources in AWS:'.format(full_stack_description))

        if change_map:
            self.__view_resource_list(change_map)
        else:
            self._output_message('\n(no changes detected)')

        print ''
Exemplo n.º 14
0
def handler(event, context):
    print "Start FIFO Auto Scale"
    prefix = util.get_stack_name_from_arn(event[c.ENV_STACK_ID], False)
    sqs = Sqs({}, queue_prefix=prefix)
    request_type = event['RequestType']
    print request_type, prefix
    if request_type == 'Delete':
        sqs.delete_all_queues(prefix)
    else:
        queues = sqs.get_queues()
        print queues
        number_of_queues = len(queues)

        #5 queues to start, each queue can support 300 send message calls per second.  total: 1500 messages per second
        for i in range(number_of_queues, 5):
            sqs.add_fifo_queue(prefix)

    return custom_resource_response.success_response({}, "*")
Exemplo n.º 15
0
    def __init__(self, context, stack_id, operation):

        self.context = context
        self.stack_id = stack_id
        self.stack_name = util.get_stack_name_from_arn(stack_id)
        self.operation = operation
        self.events_seen = {}
        self.success_status = operation + '_COMPLETE'
        self.finished_status = [
            self.success_status, operation + '_FAILED',
            operation + '_ROLLBACK_COMPLETE', operation + '_ROLLBACK_FAILED',
            context.stack.STATUS_ROLLBACK_COMPLETE,
            context.stack.STATUS_ROLLBACK_FAILED
        ]
        self.client = self.context.aws.client('cloudformation',
                                              region=util.get_region_from_arn(
                                                  self.stack_id))
        self.client.verbose = False
        self.start_nested_stack_status = [
            context.stack.STATUS_UPDATE_IN_PROGRESS,
            context.stack.STATUS_CREATE_IN_PROGRESS,
            context.stack.STATUS_DELETE_IN_PROGRESS
        ]
        self.end_nested_stack_status = [
            context.stack.STATUS_UPDATE_COMPLETE,
            context.stack.STATUS_UPDATE_FAILED,
            context.stack.STATUS_CREATE_COMPLETE,
            context.stack.STATUS_CREATE_FAILED,
            context.stack.STATUS_DELETE_COMPLETE,
            context.stack.STATUS_DELETE_FAILED,
            context.stack.STATUS_ROLLBACK_COMPLETE,
            context.stack.STATUS_ROLLBACK_FAILED
        ]
        self.monitored_stacks = [stack_id]

        if operation != 'CREATE':
            self.__load_existing_events()
Exemplo n.º 16
0
def main(event, lambdacontext):
    context = dict({})
    stack_id = os.environ[c.ENV_DEPLOYMENT_STACK_ARN]
    context[c.KEY_LAMBDA_FUNCTION] = lambdacontext.function_name if hasattr(
        lambdacontext, 'function_name') else None
    context[c.KEY_REQUEST_ID] = lambdacontext.aws_request_id if hasattr(
        lambdacontext, 'aws_request_id') else None
    is_lambda = context[c.KEY_REQUEST_ID] is not None
    db = DynamoDb(context)
    if not is_lambda:
        import lambda_fifo_message_consumer as consumer

    prefix = util.get_stack_name_from_arn(stack_id)
    sqs = Sqs(context, "{0}_".format(prefix))
    awslambda = Lambda(context)

    if sqs.is_all_under_load:
        sqs.add_fifo_queue(prefix)

    queues = sqs.get_queues()
    for queue_url in queues:
        payload = {c.KEY_SQS_QUEUE_URL: queue_url, "context": context}
        print "Starting {} with queue url '{}'".format(
            "lambda" if is_lambda else "thread", queue_url)
        if is_lambda:
            invoke(context, awslambda, payload)
        else:
            payload[c.ENV_STACK_ID] = event['StackId']
            consumer.main(
                payload,
                type('obj', (object, ),
                     {'function_name': context[c.KEY_LAMBDA_FUNCTION]}))

    print "{} {} lambdas have started".format(len(queues),
                                              context[c.KEY_LAMBDA_FUNCTION])
    return custom_resource_response.success_response({}, "*")
Exemplo n.º 17
0
 def retrieving_mappings(self, deployment_name, deployment_stack_id):
     self._output_message(
         'Loading mappings for deployment {} from stack {}.'.format(
             deployment_name,
             util.get_stack_name_from_arn(deployment_stack_id)))
Exemplo n.º 18
0
def create_stack(context, args):

    # Has the project been initialized?
    if not context.config.project_initialized:
        raise HandledError('The project has not been initialized.')

    # Does a deployment with that name already exist?
    if context.config.deployment_stack_exists(args.deployment):
        raise HandledError('The project already has a {} deployment.'.format(args.deployment))

    # Does deployment-template.json include resource group from a gem which isn't enabled for the project?
    for resource_group_name in context.resource_groups.keys():
         __check_resource_group_gem_status(context, resource_group_name)

    # Is the project settings file writable?
    context.config.validate_writable(context.config.local_project_settings_path)

    # Is the deployment name valid?
    util.validate_stack_name(args.deployment)

    # If there is no project default deployment, make this the project default deployment
    if context.config.project_default_deployment is None:
        args.make_project_default = True

    # If there is no release deployment, make this the release deployment
    if context.config.release_deployment is None:
        args.make_release_deployment = True

    # Need to handle situations where the deployment and/or access stack were
    # not successfully created on previous attempts.

    pending_deployment_stack_id = context.config.get_pending_deployment_stack_id(args.deployment)
    pending_deployment_access_stack_id = context.config.get_pending_deployment_access_stack_id(args.deployment)

    pending_deployment_stack_status = context.stack.get_stack_status(pending_deployment_stack_id)
    pending_deployment_access_stack_status = context.stack.get_stack_status(pending_deployment_access_stack_id)

    # Does a stack with the name already exist? It's ok if a previous attempt
    # at creation left a stack with this name behind, we'll deal with that later.
    deployment_stack_name = args.stack_name or context.config.get_default_deployment_stack_name(args.deployment)
    deployment_region = util.get_region_from_arn(context.config.project_stack_id)
    if pending_deployment_stack_id is None or deployment_stack_name != util.get_stack_name_from_arn(pending_deployment_stack_id):
        if context.stack.name_exists(deployment_stack_name, deployment_region):
            raise HandledError('An AWS Cloud Formation stack with the name {} already exists in region {}. Use the --stack-name option to provide a different name.'.format(deployment_stack_name, deployment_region))

    # Resource group (and other) file write checks
    create_and_validate_writable_list(context)

    # Is it ok to use AWS?

    pending_resource_status = __get_pending_combined_resource_status(context, args.deployment)

    capabilities = context.stack.confirm_stack_operation(
        None, # stack id
        'deployment {}'.format(args.deployment),
        args,
        pending_resource_status,
        ignore_resource_types = [ 'Custom::EmptyDeployment' ]
    )

    # We have the following scenerios to deal with:
    #
    # 1) This is the first attempt to create the deployment, or previous attempts didn't
    #    get as far as creating any stacks.
    #
    # 2) The previous attempt failed to create or update the deployment stack, which was
    # left in a ROLLBACK_COMPLETED, UPDATE_ROLLBACK_FAILED, or ROLLBACK_FAILED state. This
    # stack must be deleted and a new one created.
    #
    # 3) The previous attempt created the deployment stack but failed to create the access
    # stack, leaving it in the ROLLBACK_COMPLETED state. In this case we update the deployment
    # stack (to make sure it reflects any changes that may have been made), delete the access
    # stack and attempt to create a new one.
    #
    # 4) Both the deployment and access stacks were created successfully, but the pending
    # stack id properites in the config were not replaced with the non-pending properties
    # (this could happen if someone kills the client during the access stack creation
    # process, which then runs to a successful completion). In this case we update both
    # stacks to make sure they reflect any changes, then replace the "pending" stack id
    # properties.

    project_uploader = ProjectUploader(context)
    deployment_uploader = project_uploader.get_deployment_uploader(args.deployment)

    template_url = before_update(context, deployment_uploader)

    deployment_stack_parameters = __get_deployment_stack_parameters(context, args.deployment, uploader = deployment_uploader)

    # wait a bit for S3 to help insure that templates can be read by cloud formation
    time.sleep(constant.STACK_UPDATE_DELAY_TIME)

    try:

        if pending_deployment_stack_status not in [None, context.stack.STATUS_ROLLBACK_COMPLETE, context.stack.STATUS_DELETE_COMPLETE, context.stack.STATUS_UPDATE_ROLLBACK_FAILED, context.stack.STATUS_ROLLBACK_FAILED]:

            # case 3 or 4 - deployment stack was previously created successfully, update it

            context.stack.update(
                pending_deployment_stack_id, 
                template_url, 
                deployment_stack_parameters,
                capabilities = capabilities
            )
            deployment_stack_id = pending_deployment_stack_id

        else:

            if pending_deployment_stack_status in [context.stack.STATUS_ROLLBACK_COMPLETE, context.stack.STATUS_ROLLBACK_FAILED, context.stack.STATUS_UPDATE_ROLLBACK_FAILED]:

                # case 2 - deployment stack failed to create previously, delete it

                context.stack.delete(pending_deployment_stack_id)

            # case 1 and 2 - deployment stack wasn't creatred previously or was just
            # deleted, attempt to create it

            deployment_stack_id = context.stack.create_using_url(
                deployment_stack_name,
                template_url,
                deployment_region,
                deployment_stack_parameters,
                created_callback=lambda id: context.config.set_pending_deployment_stack_id(args.deployment, id),
                capabilities = capabilities)

        # Now create or update the access stack...

        context.view.processing_template('{} deployment'.format(args.deployment))

        access_template_url = deployment_uploader.upload_content(
            constant.DEPLOYMENT_ACCESS_TEMPLATE_FILENAME, 
            json.dumps(context.config.deployment_access_template_aggregator.effective_template, indent=4, sort_keys=True),
            'processed deployment access temmplate')

        access_stack_parameters = __get_access_stack_parameters(
            context, 
            args.deployment, 
            deployment_stack_id = deployment_stack_id, 
            uploader = deployment_uploader
        )
        
        if pending_deployment_access_stack_status not in [None, context.stack.STATUS_ROLLBACK_COMPLETE, context.stack.STATUS_DELETE_COMPLETE]:

            # case 4 - access stack was previously created successfully but the pending
            # stack id properties were not replaced. Update the stack.

            context.stack.update(
                pending_deployment_access_stack_id, 
                access_template_url, 
                deployment_stack_parameters,
                capabilities = capabilities
            )

            deployment_access_stack_id = pending_deployment_access_stack_id

        else:

            if pending_deployment_access_stack_status == context.stack.STATUS_ROLLBACK_COMPLETE:

                # case 3 - access stack failed to create previously, delete it

                context.stack.delete(pending_deployment_access_stack_id)

            # case 1 or 3 - access stack wasn't created before, or was just deleted. Attempt
            # to create.

            deployment_access_stack_name = deployment_stack_name + '-Access'

            deployment_access_stack_id = context.stack.create_using_url(
                deployment_access_stack_name,
                access_template_url,
                deployment_region,
                parameters = access_stack_parameters,
                created_callback=lambda id: context.config.set_pending_deployment_access_stack_id(args.deployment, id),
                capabilities = capabilities)

    except:
        context.config.force_gui_refresh()
        raise

    context.config.force_gui_refresh()

    context.config.finalize_deployment_stack_ids(args.deployment)

    context.view.deployment_stack_created(args.deployment, deployment_stack_id, deployment_access_stack_id)

    # Should the new deployment become the project default deployment or the release deployment?

    if args.make_project_default:
        context.config.set_project_default_deployment(args.deployment)
        mappings.update(context, util.Args())
        context.view.default_deployment(context.config.user_default_deployment, context.config.project_default_deployment)

    if args.make_release_deployment:
        context.config.set_release_deployment(args.deployment)
        temp_args = util.Args()
        temp_args.release = True
        mappings.update(context, temp_args)
        context.view.release_deployment(context.config.release_deployment)
    
    after_update(context, deployment_uploader)
Exemplo n.º 19
0
def launch(event, lambdacontext):
    util.debug_print("Start Amoeba Launcher")
    context = dict({})
    context[c.KEY_START_TIME] = time.time()
    context[c.KEY_LAMBDA_FUNCTION] = lambdacontext.function_name if hasattr(
        lambdacontext, 'function_name') else None
    context[c.KEY_REQUEST_ID] = lambdacontext.aws_request_id if hasattr(
        lambdacontext, 'aws_request_id') else None
    prefix = util.get_stack_name_from_arn(
        os.environ[c.ENV_DEPLOYMENT_STACK_ARN])
    prefix = "{0}{1}".format(prefix, c.KEY_SQS_AMOEBA_SUFFIX)
    db = DynamoDb(context)
    sqs = Sqs(context, prefix, "sqs")
    sqs.set_queue_url(lowest_load_queue=False)

    if sqs.is_all_under_load:
        sqs.add_fifo_queue(prefix)

    elapsed = util.elapsed(context)
    timeout = context[c.KEY_MAX_LAMBDA_TIME] * c.RATIO_OF_MAX_LAMBDA_TIME
    map = {}
    queues_checked = 0
    number_of_queues = sqs.number_of_queues
    sqs_delete_tokens = {}
    while elapsed < timeout and queues_checked < number_of_queues:
        messages = sqs.read_queue()
        length = len(messages)
        if sqs.queue_url not in sqs_delete_tokens:
            sqs_delete_tokens[sqs.queue_url] = []

        if length > 0:
            for x in range(0, length):
                message = messages[x]
                body = json.loads(message["Body"])
                paths = body["paths"]
                msg_token = "{}{}{}".format(message['MessageId'],
                                            context[c.KEY_SEPERATOR_CSV],
                                            message['ReceiptHandle'])
                sqs_delete_tokens[sqs.queue_url].append(msg_token)
                for i in range(0, len(paths)):
                    path = paths[i]
                    parts = path.split(context[c.KEY_SEPERATOR_PARTITION])
                    filename = parts.pop()
                    directory = context[c.KEY_SEPERATOR_PARTITION].join(parts)
                    if directory not in map:
                        map[directory] = {"paths": [], "size": 0}
                    #lambda payload limit for Event invocation type  131072
                    sizeof = len(path) + map[directory]["size"]
                    is_invoked = map[directory].get("invoked", False)
                    if sizeof >= c.MAXIMUM_ASYNC_PAYLOAD_SIZE and not is_invoked:
                        invoke_lambda(context, directory,
                                      map[directory]["paths"])
                        map[directory] = {
                            "paths": [],
                            "size": 0,
                            "invoked": True
                        }
                    else:
                        map[directory]["paths"].append(path)
                        map[directory]["size"] = sizeof

        else:
            queues_checked += 1
            sqs.set_queue_url(lowest_load_queue=False)

        elapsed = util.elapsed(context)

    #Invoke a amoeba generator for each S3 leaf node
    for directory, settings in iteritems(map):
        is_invoked = settings.get("invoked", False)
        #Amoeba's are not designed to have multiple amoebas working against one directory
        #If the Amoeba has already been invoked due to payload size then we requeue the remaining paths
        if is_invoked:
            sqs.send_generic_message(json.dumps({"paths": settings["paths"]}))
        else:
            invoke_lambda(context, directory, settings["paths"])

    context[c.KEY_THREAD_POOL] = ThreadPool(context, 8)
    #Delete SQS messages that have been processed
    for key, value in iteritems(sqs_delete_tokens):
        sqs.delete_message_batch(value, key)

    return custom_resource_response.success_response({"StatusCode": 200}, "*")
Exemplo n.º 20
0
 def describing_stack(self, stack_id):
     if self.__verbose:
         self._output_message('Retrieving description of stack {}.'.format(
             util.get_stack_name_from_arn(stack_id)))
Exemplo n.º 21
0
 def retrieving_mappings(self, deployment_name, deployment_stack_id, role):
     self._output_message(
         "Loading mappings for deployment '{}' with role '{}' from stack '{}'."
         .format(deployment_name, role,
                 util.get_stack_name_from_arn(deployment_stack_id)))
Exemplo n.º 22
0
 def __remove_bucket_contents(self, stack_id, logical_resource_id):
     physical_bucket_id = self.get_physical_resource_id(
         stack_id, logical_resource_id)
     stack_name = util.get_stack_name_from_arn(stack_id)
     util.delete_bucket_contents(self.context, stack_name,
                                 logical_resource_id, physical_bucket_id)
Exemplo n.º 23
0
 def get_project_stack_name(self):
     return util.get_stack_name_from_arn(self.project_stack_id)