def main(event, lambdacontext): starttime = time.time() queue_url = event.get(c.KEY_SQS_QUEUE_URL, None) print "Started consumer with queue url '{}'".format(queue_url) context = event.get("context", {}) context[c.KEY_SQS_QUEUE_URL] = queue_url context[c.KEY_LAMBDA_FUNCTION] = lambdacontext.function_name if hasattr(lambdacontext, 'function_name') else None context[c.KEY_REQUEST_ID] = lambdacontext.aws_request_id if hasattr(lambdacontext, 'aws_request_id') else None context[c.KEY_IS_LAMBDA_ENV] = context[c.KEY_REQUEST_ID] is not None prefix = util.get_stack_name_from_arn(os.environ[c.ENV_DEPLOYMENT_STACK_ARN]) context[c.KEY_STACK_PREFIX] = prefix context[c.KEY_SQS] = Sqs(context, "{0}_".format(prefix)) context[c.KEY_SQS_AMOEBA] = Sqs(context, "{0}{1}_".format(prefix, c.KEY_SQS_AMOEBA_SUFFIX)) context[c.KEY_SQS_AMOEBA].set_queue_url(lowest_load_queue=True) context[c.KEY_LAMBDA] = Lambda(context) context[c.KEY_CLOUDWATCH] = CloudWatch(context) context[c.KEY_THREAD_POOL] = ThreadPool(context, 8) context[c.KEY_METRIC_BUCKET] = os.environ[c.RES_S3_STORAGE] context[c.KEY_START_TIME] = starttime context[c.CW_ATTR_SAVE_DURATION] = context[c.KEY_CLOUDWATCH].avg_save_duration(util.get_cloudwatch_namespace(os.environ[c.ENV_DEPLOYMENT_STACK_ARN])) context[c.CW_ATTR_DELETE_DURATION] = context[c.KEY_CLOUDWATCH].avg_delete_duration(util.get_cloudwatch_namespace(os.environ[c.ENV_DEPLOYMENT_STACK_ARN])) context[c.KEY_SUCCEEDED_MSG_IDS] = [] process(context) del context gc.collect() return { 'StatusCode': 200 }
def handler(event, context): print "Start FIFO Auto Scale" prefix = util.get_stack_name_from_arn(event[c.ENV_STACK_ID], False) request_type = event['RequestType'] assigned_suffix = event['ResourceProperties'].get('Suffix', None) type = event['ResourceProperties'].get('QueueType', "fifo") initial_number_of_queues = int(event['ResourceProperties'].get( 'IntialNumberOfQueues', 5)) if assigned_suffix: prefix = "{0}{1}".format(prefix, assigned_suffix) sqs = Sqs({}, queue_prefix=prefix, type=type) if request_type == 'Delete': sqs.delete_all_queues(prefix) else: queues = sqs.get_queues() number_of_queues = len(queues) #5 queues to start, each queue can support 300 send message calls per second. total: 1500 messages per second if number_of_queues < initial_number_of_queues: for i in range(number_of_queues, initial_number_of_queues): sqs.add_fifo_queue(prefix) return custom_resource_response.success_response({}, "*")
def __add_to_sqs(self, files): prefix = util.get_stack_name_from_arn( os.environ[c.ENV_DEPLOYMENT_STACK_ARN]) sqs = Sqs(self.context, "{0}{1}".format(prefix, c.KEY_SQS_AMOEBA_SUFFIX)) sqs.set_queue_url(lowest_load_queue=False) sqs.send_generic_message(json.dumps({"paths": files}))
def main(event, lambdacontext): context = dict({}) stack_id = os.environ[c.ENV_DEPLOYMENT_STACK_ARN] context[c.KEY_LAMBDA_FUNCTION] = lambdacontext.function_name if hasattr( lambdacontext, 'function_name') else None context[c.KEY_REQUEST_ID] = lambdacontext.aws_request_id if hasattr( lambdacontext, 'aws_request_id') else None is_lambda = context[c.KEY_REQUEST_ID] is not None db = DynamoDb(context) if not is_lambda: import lambda_fifo_message_consumer as consumer prefix = util.get_stack_name_from_arn(stack_id) sqs = Sqs(context, "{0}_".format(prefix)) awslambda = Lambda(context) if sqs.is_all_under_load: sqs.add_fifo_queue(prefix) queues = sqs.get_queues() for queue_url in queues: payload = {c.KEY_SQS_QUEUE_URL: queue_url, "context": context} print "Starting {} with queue url '{}'".format( "lambda" if is_lambda else "thread", queue_url) if is_lambda: invoke(context, awslambda, payload) else: payload[c.ENV_STACK_ID] = event['StackId'] consumer.main( payload, type('obj', (object, ), {'function_name': context[c.KEY_LAMBDA_FUNCTION]})) print "{} {} lambdas have started".format(len(queues), context[c.KEY_LAMBDA_FUNCTION]) return custom_resource_response.success_response({}, "*")
def main(event, lambdacontext): global context global timestamp global aws_sqs start = time.time() ok_response = { 'StatusCode': 200, } refreshtime = datetime.datetime.utcnow() - datetime.timedelta(minutes=1) if context is None or aws_sqs is None or refreshtime > timestamp: context=dict({}) stack_id = os.environ[c.ENV_DEPLOYMENT_STACK_ARN] context[c.KEY_REQUEST_ID] = lambdacontext.aws_request_id if hasattr(lambdacontext, 'aws_request_id') else None db = DynamoDb(context) prefix = util.get_stack_name_from_arn(stack_id) aws_sqs = Sqs(context, queue_prefix="{}_".format(prefix)) aws_sqs.set_queue_url(True) timestamp = datetime.datetime.utcnow() else: context[c.KEY_SQS_QUEUE_URL] = aws_sqs.queue_url data = event.get(c.API_PARAM_PAYLOAD, {})[c.API_PARAM_DATA] source_IP = event.get(c.API_PARAM_SOURCE_IP, None) sensitivity_type = event.get(c.SQS_PARAM_SENSITIVITY_TYPE, sensitivity.SENSITIVITY_TYPE.NONE) compression_mode = event.get(c.SQS_PARAM_COMPRESSION_TYPE, compression.COMPRESSION_MODE.NONE) payload_type = event.get(c.SQS_PARAM_PAYLOAD_TYPE, payload.PAYLOAD_TYPE.CSV) compression_mode = CompressionClassFactory.instance(compression_mode) sensitivity_type = SensitivityClassFactory.instance(sensitivity_type) payload_type = PayloadClassFactory.instance(context, payload_type, compression_mode, sensitivity_type, source_IP) print "[{}]Using SQS queue URL '{}'".format(context[c.KEY_REQUEST_ID],aws_sqs.queue_url) if os.environ[c.ENV_VERBOSE]== "True": print "The post request contains a paylod of\n{}".format(data) if data is None: print "Terminating, there is no data." return ok_response total_metrics = "all" try: data_size = len(data) + sqs.message_overhead_size(sensitivity_type, compression_mode, payload_type) message_chunks, total_metrics = payload_type.chunk(data) for message in message_chunks: print "Sending a sqs message with {} bytes".format(len(message)) aws_sqs.send_message(message, sensitivity_type, compression_mode, payload_type) except Exception as e: traceback.print_exc() raise errors.ClientError(e.message) print "The job sent {} metric(s) to the FIFO queue '{}'".format(total_metrics, aws_sqs.queue_url) print "The job took {} seconds.".format(time.time() -start) return ok_response
def handler(event, context): print "Start FIFO Auto Scale" prefix = util.get_stack_name_from_arn(event[c.ENV_STACK_ID], False) sqs = Sqs({}, queue_prefix=prefix) request_type = event['RequestType'] print request_type, prefix if request_type == 'Delete': sqs.delete_all_queues(prefix) else: queues = sqs.get_queues() print queues number_of_queues = len(queues) #5 queues to start, each queue can support 300 send message calls per second. total: 1500 messages per second for i in range(number_of_queues, 5): sqs.add_fifo_queue(prefix) return custom_resource_response.success_response({}, "*")
except Exception, e: #print "Error when looking up data in ha-config: %s" % e raise # this salt master updates info about itself into dynamodb try: modified = datetime.datetime.now(utc_tz) local_master = MasterManager(region) local_master.create_or_update_master(instanceid=instanceid, modified=modified, ipaddr=ipaddr, status=u'LAUNCH' ) except Exception, e: raise # Need sqs instances try: sqs_master = Sqs(boto.sqs.connect_to_region(region), master_queue) sqs_minion = Sqs(boto.sqs.connect_to_region(region), minion_queue) except Exception, e: raise # This is the endless loop of goodness while True: # Handling autoscaling sns messages for the salt master group try: master_queue_length = sqs_master.get_queue_length() if master_queue_length > 0: for i in range(master_queue_length): message = sqs_master.get_a_message() if not message: continue #print "MASTER: type of message is: %s" % type(message)
def launch(event, lambdacontext): util.debug_print("Start Amoeba Launcher") context = dict({}) context[c.KEY_START_TIME] = time.time() context[c.KEY_LAMBDA_FUNCTION] = lambdacontext.function_name if hasattr( lambdacontext, 'function_name') else None context[c.KEY_REQUEST_ID] = lambdacontext.aws_request_id if hasattr( lambdacontext, 'aws_request_id') else None prefix = util.get_stack_name_from_arn( os.environ[c.ENV_DEPLOYMENT_STACK_ARN]) prefix = "{0}{1}".format(prefix, c.KEY_SQS_AMOEBA_SUFFIX) db = DynamoDb(context) sqs = Sqs(context, prefix, "sqs") sqs.set_queue_url(lowest_load_queue=False) if sqs.is_all_under_load: sqs.add_fifo_queue(prefix) elapsed = util.elapsed(context) timeout = context[c.KEY_MAX_LAMBDA_TIME] * c.RATIO_OF_MAX_LAMBDA_TIME map = {} queues_checked = 0 number_of_queues = sqs.number_of_queues sqs_delete_tokens = {} while elapsed < timeout and queues_checked < number_of_queues: messages = sqs.read_queue() length = len(messages) if sqs.queue_url not in sqs_delete_tokens: sqs_delete_tokens[sqs.queue_url] = [] if length > 0: for x in range(0, length): message = messages[x] body = json.loads(message["Body"]) paths = body["paths"] msg_token = "{}{}{}".format(message['MessageId'], context[c.KEY_SEPERATOR_CSV], message['ReceiptHandle']) sqs_delete_tokens[sqs.queue_url].append(msg_token) for i in range(0, len(paths)): path = paths[i] parts = path.split(context[c.KEY_SEPERATOR_PARTITION]) filename = parts.pop() directory = context[c.KEY_SEPERATOR_PARTITION].join(parts) if directory not in map: map[directory] = {"paths": [], "size": 0} #lambda payload limit for Event invocation type 131072 sizeof = len(path) + map[directory]["size"] is_invoked = map[directory].get("invoked", False) if sizeof >= c.MAXIMUM_ASYNC_PAYLOAD_SIZE and not is_invoked: invoke_lambda(context, directory, map[directory]["paths"]) map[directory] = { "paths": [], "size": 0, "invoked": True } else: map[directory]["paths"].append(path) map[directory]["size"] = sizeof else: queues_checked += 1 sqs.set_queue_url(lowest_load_queue=False) elapsed = util.elapsed(context) #Invoke a amoeba generator for each S3 leaf node for directory, settings in iteritems(map): is_invoked = settings.get("invoked", False) #Amoeba's are not designed to have multiple amoebas working against one directory #If the Amoeba has already been invoked due to payload size then we requeue the remaining paths if is_invoked: sqs.send_generic_message(json.dumps({"paths": settings["paths"]})) else: invoke_lambda(context, directory, settings["paths"]) context[c.KEY_THREAD_POOL] = ThreadPool(context, 8) #Delete SQS messages that have been processed for key, value in iteritems(sqs_delete_tokens): sqs.delete_message_batch(value, key) return custom_resource_response.success_response({"StatusCode": 200}, "*")