def handler(event, context):
    print "Start FIFO Auto Scale"
    prefix = util.get_stack_name_from_arn(event[c.ENV_STACK_ID], False)
    request_type = event['RequestType']
    assigned_suffix = event['ResourceProperties'].get('Suffix', None)
    type = event['ResourceProperties'].get('QueueType', "fifo")
    initial_number_of_queues = int(event['ResourceProperties'].get(
        'IntialNumberOfQueues', 5))

    if assigned_suffix:
        prefix = "{0}{1}".format(prefix, assigned_suffix)

    sqs = Sqs({}, queue_prefix=prefix, type=type)
    if request_type == 'Delete':
        sqs.delete_all_queues(prefix)
    else:
        queues = sqs.get_queues()
        number_of_queues = len(queues)

        #5 queues to start, each queue can support 300 send message calls per second.  total: 1500 messages per second
        if number_of_queues < initial_number_of_queues:
            for i in range(number_of_queues, initial_number_of_queues):
                sqs.add_fifo_queue(prefix)

    return custom_resource_response.success_response({}, "*")
Beispiel #2
0
def handler(event, context):
    print "Start FIFO Auto Scale"
    prefix = util.get_stack_name_from_arn(event[c.ENV_STACK_ID], False)
    sqs = Sqs({}, queue_prefix=prefix)
    request_type = event['RequestType']
    print request_type, prefix
    if request_type == 'Delete':
        sqs.delete_all_queues(prefix)
    else:
        queues = sqs.get_queues()
        print queues
        number_of_queues = len(queues)

        #5 queues to start, each queue can support 300 send message calls per second.  total: 1500 messages per second
        for i in range(number_of_queues, 5):
            sqs.add_fifo_queue(prefix)

    return custom_resource_response.success_response({}, "*")
def main(event, lambdacontext):
    context = dict({})
    stack_id = os.environ[c.ENV_DEPLOYMENT_STACK_ARN]
    context[c.KEY_LAMBDA_FUNCTION] = lambdacontext.function_name if hasattr(
        lambdacontext, 'function_name') else None
    context[c.KEY_REQUEST_ID] = lambdacontext.aws_request_id if hasattr(
        lambdacontext, 'aws_request_id') else None
    is_lambda = context[c.KEY_REQUEST_ID] is not None
    db = DynamoDb(context)
    if not is_lambda:
        import lambda_fifo_message_consumer as consumer

    prefix = util.get_stack_name_from_arn(stack_id)
    sqs = Sqs(context, "{0}_".format(prefix))
    awslambda = Lambda(context)

    if sqs.is_all_under_load:
        sqs.add_fifo_queue(prefix)

    queues = sqs.get_queues()
    for queue_url in queues:
        payload = {c.KEY_SQS_QUEUE_URL: queue_url, "context": context}
        print "Starting {} with queue url '{}'".format(
            "lambda" if is_lambda else "thread", queue_url)
        if is_lambda:
            invoke(context, awslambda, payload)
        else:
            payload[c.ENV_STACK_ID] = event['StackId']
            consumer.main(
                payload,
                type('obj', (object, ),
                     {'function_name': context[c.KEY_LAMBDA_FUNCTION]}))

    print "{} {} lambdas have started".format(len(queues),
                                              context[c.KEY_LAMBDA_FUNCTION])
    return custom_resource_response.success_response({}, "*")
Beispiel #4
0
def launch(event, lambdacontext):
    util.debug_print("Start Amoeba Launcher")
    context = dict({})
    context[c.KEY_START_TIME] = time.time()
    context[c.KEY_LAMBDA_FUNCTION] = lambdacontext.function_name if hasattr(
        lambdacontext, 'function_name') else None
    context[c.KEY_REQUEST_ID] = lambdacontext.aws_request_id if hasattr(
        lambdacontext, 'aws_request_id') else None
    prefix = util.get_stack_name_from_arn(
        os.environ[c.ENV_DEPLOYMENT_STACK_ARN])
    prefix = "{0}{1}".format(prefix, c.KEY_SQS_AMOEBA_SUFFIX)
    db = DynamoDb(context)
    sqs = Sqs(context, prefix, "sqs")
    sqs.set_queue_url(lowest_load_queue=False)

    if sqs.is_all_under_load:
        sqs.add_fifo_queue(prefix)

    elapsed = util.elapsed(context)
    timeout = context[c.KEY_MAX_LAMBDA_TIME] * c.RATIO_OF_MAX_LAMBDA_TIME
    map = {}
    queues_checked = 0
    number_of_queues = sqs.number_of_queues
    sqs_delete_tokens = {}
    while elapsed < timeout and queues_checked < number_of_queues:
        messages = sqs.read_queue()
        length = len(messages)
        if sqs.queue_url not in sqs_delete_tokens:
            sqs_delete_tokens[sqs.queue_url] = []

        if length > 0:
            for x in range(0, length):
                message = messages[x]
                body = json.loads(message["Body"])
                paths = body["paths"]
                msg_token = "{}{}{}".format(message['MessageId'],
                                            context[c.KEY_SEPERATOR_CSV],
                                            message['ReceiptHandle'])
                sqs_delete_tokens[sqs.queue_url].append(msg_token)
                for i in range(0, len(paths)):
                    path = paths[i]
                    parts = path.split(context[c.KEY_SEPERATOR_PARTITION])
                    filename = parts.pop()
                    directory = context[c.KEY_SEPERATOR_PARTITION].join(parts)
                    if directory not in map:
                        map[directory] = {"paths": [], "size": 0}
                    #lambda payload limit for Event invocation type  131072
                    sizeof = len(path) + map[directory]["size"]
                    is_invoked = map[directory].get("invoked", False)
                    if sizeof >= c.MAXIMUM_ASYNC_PAYLOAD_SIZE and not is_invoked:
                        invoke_lambda(context, directory,
                                      map[directory]["paths"])
                        map[directory] = {
                            "paths": [],
                            "size": 0,
                            "invoked": True
                        }
                    else:
                        map[directory]["paths"].append(path)
                        map[directory]["size"] = sizeof

        else:
            queues_checked += 1
            sqs.set_queue_url(lowest_load_queue=False)

        elapsed = util.elapsed(context)

    #Invoke a amoeba generator for each S3 leaf node
    for directory, settings in iteritems(map):
        is_invoked = settings.get("invoked", False)
        #Amoeba's are not designed to have multiple amoebas working against one directory
        #If the Amoeba has already been invoked due to payload size then we requeue the remaining paths
        if is_invoked:
            sqs.send_generic_message(json.dumps({"paths": settings["paths"]}))
        else:
            invoke_lambda(context, directory, settings["paths"])

    context[c.KEY_THREAD_POOL] = ThreadPool(context, 8)
    #Delete SQS messages that have been processed
    for key, value in iteritems(sqs_delete_tokens):
        sqs.delete_message_batch(value, key)

    return custom_resource_response.success_response({"StatusCode": 200}, "*")