Ejemplo n.º 1
0
 def lambda_handler(*args, **kwargs):
     event = args[0]
     context = args[1]
     aws_lambda_logging.setup(level=os.environ.get('LOGLEVEL', 'INFO'),
                              aws_request_id=context.aws_request_id,
                              boto_level='CRITICAL')
     received_raw_kinesis_records = event['Records']
     for raw_kinesis_records in chunks(
             iter_deaggregate_records(received_raw_kinesis_records),
             batch_size):
         kinesis_records: list = []
         for raw_kinesis_record in raw_kinesis_records:
             kinesis_record = KinesisRecord(raw_kinesis_record)
             if kinesis_record.is_any_of(event_types):
                 kinesis_records.append(kinesis_record)
         if kinesis_records:
             log.info({
                 "Action":
                 "Processing",
                 "Events": [
                     kinesis_record.get_type()
                     for kinesis_record in kinesis_records
                 ]
             })
             results = func(kinesis_records, context)
             if results:
                 log.info({"Results": results})
Ejemplo n.º 2
0
def lambda_handler(event, context):
    aws_lambda_logging.setup(level=log_level,
                             aws_request_id=context.aws_request_id)
    dataframes = []
    # download files locally
    for filename in event:
        data_file = download_data(filename)
        # read each file and store as Pandas dataframe
        with gzip.open(data_file, 'rb') as ndjson_file:
            records = map(json.loads, ndjson_file)
            df = pd.DataFrame.from_records(json_normalize(records))
            dataframes.append(df)

    # process the data to get air quality readings
    parameter_readings = process_data(dataframes)

    # write to file
    results_filename = "{}.json.gz".format(context.aws_request_id)
    parameter_readings.to_json(os.path.join('/tmp', results_filename),
                               compression='gzip')

    # upload to target S3 bucket
    upload_intermediate_results(results_filename)

    # return temp file and number of rows processed.
    return {
        "message": "Mapper phase complete.",
        "processed_file":
        'lambda-etl-refarch/temp/{}'.format(results_filename),
        "rows": len(parameter_readings)
    }
Ejemplo n.º 3
0
def handler(event, context):
    """Main function handler.

    Args:
        event: AWS event
        context:  Lambda context

    """
    # Set up logging
    aws_lambda_logging.setup(level=LOG_LEVEL, boto_level="CRITICAL")
    logger.debug(event)

    # Set up S3 client
    s3 = boto3.client("s3", region_name=MAIL_BUCKET_REGION)

    attachment_prefixes_serialized = event["Records"][0]["Sns"][
        "MessageAttributes"]["attachments"]["Value"]
    attachment_prefixes = json.loads(attachment_prefixes_serialized)
    for prefix in attachment_prefixes:
        try:
            file_name = prefix.split("/")[-1]
            file_name = f"/tmp/{file_name}"
            with open(file_name, "wb") as fp:
                s3.download_fileobj(MAIL_BUCKET, prefix, fp)
        except Exception as e:
            logging.error(f"{repr(e)}")
            raise e

        key_map, value_map, block_map = get_kv_map(file_name)

        # Get Key Value relationship
        kvs = get_kv_relationship(key_map, value_map, block_map)
        o = marshal_response(kvs)
        logging.info(f"{file_name}: {o}")
def test_setup_with_invalid_log_level(root_logger, logger, stdout):
    from aws_lambda_logging import setup
    setup('not a valid log level')  # writes a log event

    log_dict = json.loads(stdout.getvalue())

    check_log_dict(log_dict)
Ejemplo n.º 5
0
def handler_example3(event, context):
 aws_lambda_logging.setup(level='INFO', This_Is_A_Custom_Key="This is the custom keys value")
 
 logger.info('This is example number 3')
 
 
 # Example 4 - Basic log entry with varying log levels
 def handler_example4(event, context):
 aws_lambda_logging.setup(level='INFO')
 
 logger.info('This is example number 4 - INFO level')
 logger.warning('This is example number 4 - WARNING level')
 logger.error('This is example number 4 - ERROR level')
 logger.debug("This is example number 4 - DEBUG level which you won't see in the output")
 
 
 # Example 5 - JSON String log entry
 def handler_example5(event, context):
 aws_lambda_logging.setup(level='INFO')
 
 logger.info('{"Example 5": [1,2,3]}')


 # Example 6 - Variant boto logging level
def handler_example6(event, context):
 aws_lambda_logging.setup(level=loglevel, boto_level=botologlevel)
Ejemplo n.º 6
0
 def lambda_handler(*args, **kwargs):
     event = args[0]
     context = args[1]
     aws_lambda_logging.setup(level=os.environ.get('LOGLEVEL', 'INFO'),
                              aws_request_id=context.aws_request_id,
                              boto_level='CRITICAL')
     received_raw_dynamo_records = event['Records']
     for raw_dynamo_records in chunks(received_raw_dynamo_records,
                                      batch_size):
         dynamo_records: list = []
         for raw_dynamo_record in raw_dynamo_records:
             dynamo_record = DynamoRecord(raw_dynamo_record)
             if dynamo_record.is_any_of(event_types):
                 dynamo_records.append(dynamo_record)
         if dynamo_records:
             log.info({
                 "Action":
                 "Processing",
                 "Event": [
                     dynamo_record.get_type()
                     for dynamo_record in dynamo_records
                 ]
             })
             result = func(dynamo_records, context)
             if result:
                 log.info({"Result": result})
Ejemplo n.º 7
0
def handler(event, context):
    """Handler for drm-filter-api"""
    aws_lambda_logging.setup(level=os.environ.get('LOGLEVEL', 'INFO'),
                             env=os.environ.get('ENV'))

    try:
        logging.debug(json.dumps({'message': 'logging event', 'event': event}))
    except:
        logging.exception(json.dumps({'message': 'logging event'}))
        raise

    try:
        request = json.loads(event['body'])
        logging.debug(
            json.dumps({
                'message': "decoding message",
                "request": request
            }))
    except:
        logging.exception(json.dumps({'message': "decoding message"}))
        response = {
            "statusCode":
            400,
            "body":
            json.dumps(
                {"error": "Could not decode request: JSON parsing failed"}),
            'headers': {
                'Content-Type': 'application/json',
            }
        }
        return response

    try:
        body = filter_drm(request)
        logging.debug(
            json.dumps({
                'message': "filtering response",
                "body": body
            }))
    except:
        logging.exception(json.dumps({'message': "filtering response"}))
        response = {
            "statusCode": 503,
            "body": json.dumps({"error": "Failed to filter payload"}),
            'headers': {
                'Content-Type': 'application/json',
            }
        }
        return response

    response = {
        "statusCode": 200,
        "body": json.dumps(body),
        'headers': {
            'Content-Type': 'application/json',
        }
    }
    logging.info(json.dumps({'message': 'responding', 'response': response}))
    return response
Ejemplo n.º 8
0
def main():
    aws_lambda_logging.setup(level=os.environ.get('LOGLEVEL', 'INFO'),
                             env=os.environ.get('ENV'),
                             timestamp=int(time.time()))
    aws_lambda_logging.setup(level=os.environ.get('LOGLEVEL', 'INFO'),
                             env=os.environ.get('ENV'),
                             timestamp=int(time.time()))
    unittest.main(verbosity=2)
def handler(event, context):
    aws_lambda_logging.setup(level='DEBUG',
                             aws_request_id=context.aws_request_id)
    
    random_number = random.randrange(100) + 1
    
    log.info({"Number": random_number})
    return(random_number)
def test_with_dict_message(root_logger, logger, stdout):
    from aws_lambda_logging import setup
    setup('DEBUG', another='value')

    msg = {'x': 'isx'}
    logger.critical(msg)

    log_dict = json.loads(stdout.getvalue())

    assert msg == log_dict['message']
Ejemplo n.º 11
0
def handler(event, context):
    aws_lambda_logging.setup(level="INFO")
    logger.info("event: " + json.dumps(event))
    pp = pprint.PrettyPrinter(indent=4)
    logger.info("context")
    logger.info(pp.pprint(context))
    sqs = boto3.resource("sqs")
    queue = sqs.get_queue_by_name(QueueName="ImageSyncJobQueue")
    logger.info(queue)
    queue.send_message(MessageBody="testing testing 123")
    return {"message": "Hello, World!"}
def log_handler(event: dict, context) -> None:
    aws_lambda_logging.setup(level="INFO")
    log.info(event)
    s3_event: S3Event = S3Event.from_dict(event)

    for record in s3_event.records:
        bucket = record.s3.bucket.name
        key = record.s3.object.key

        log.info(f"Processing {bucket}/{key}")
        SHIPPER.ship(bucket, key)
Ejemplo n.º 13
0
def lambda_handler(event, context):
    """
    AWS lambda insertion point.

    :param event: AWS Lambda event data
    :param context: AWS Lambda context
    :return: Service response
    """
    aws_lambda_logging.setup(level='INFO', boto_level='INFO')
    LOGGER.info(f'Micro Airlines API {__version__}')
    LOGGER.info({'event': event})
    return awsgi.response(app, event, context)
Ejemplo n.º 14
0
def lambda_handler(event, context):
    aws_lambda_logging.setup(level=log_level,
                             aws_request_id=context.aws_request_id)
    log.info(f"Processing data for: {prev_day}")

    file_names = get_file_inventory()
    chunks = [
        file_names[i:i + chunk_size]
        for i in range(0, len(file_names), chunk_size)
    ]

    return {"chunks": chunks, "message": "Init phase complete"}
Ejemplo n.º 15
0
def main():
    aws_lambda_logging.setup(level=os.environ.get('LOGLEVEL', 'INFO'),
                             boto_level='CRITICAL')
    methods = [make_errors, make_warns, make_fatals, make_info]
    scheduler = sched.scheduler(time.time, time.sleep)

    while True:
        try:
            scheduler.enter(0.5, 1, choice(methods))
            scheduler.enter(0.5, 1, choice(methods))
            scheduler.run()
        except (KeyboardInterrupt, SystemExit):
            break
def test_logging_exception_traceback(root_logger, logger, stdout):
    from aws_lambda_logging import setup
    setup('DEBUG', request_id='request id!', another='value')

    try:
        raise Exception('Boom')
    except Exception:
        logger.exception('This is a test')

    log_dict = json.loads(stdout.getvalue())

    check_log_dict(log_dict)
    assert 'exception' in log_dict
def test_with_unserialisable_value_in_message(root_logger, logger, stdout):
    from aws_lambda_logging import setup
    setup('DEBUG', another='value')

    class X:
        pass

    msg = {'x': X()}
    logger.critical(msg)

    log_dict = json.loads(stdout.getvalue())

    assert log_dict['message']['x'].startswith('<')
def lambda_handler(event, context):
    aws_lambda_logging.setup(level=log_level,
                             aws_request_id=context.aws_request_id)
    for item in event:
        intermediate_files = event['intermediate_files']

    # delete from S3 bucket
    delete_intermediate_results(intermediate_files)

    return {
        "message": event["message"],
        "results": f'Download results from {event["output_file"]}'
    }
def logger_setup(service: str = "service_undefined",
                 level: str = "INFO",
                 **kwargs):
    """Setups root logger to format statements in JSON.

    Includes service name and any additional key=value into logs
    It also accepts both service name or level explicitly via env vars

    Environment variables
    ---------------------
    POWERTOOLS_SERVICE_NAME : str
        service name
    LOG_LEVEL: str
        logging level (e.g. INFO, DEBUG)

    Parameters
    ----------
    service : str, optional
        service name to be appended in logs, by default "service_undefined"
    level : str, optional
        logging.level, by default "INFO"

    Example
    -------
    Setups structured logging in JSON for Lambda functions with explicit service name

        >>> from lambda_python_powertools.logging import logger_setup
        >>> logger = logger_setup(service="payment")
        >>>
        >>> def handler(event, context):
                logger.info("Hello")

    Setups structured logging in JSON for Lambda functions using env vars

        $ export POWERTOOLS_SERVICE_NAME="payment"
        >>> from lambda_python_powertools.logging import logger_setup
        >>> logger = logger_setup()
        >>>
        >>> def handler(event, context):
                logger.info("Hello")

    """
    service = os.getenv("POWERTOOLS_SERVICE_NAME") or service
    log_level = os.getenv("LOG_LEVEL") or level
    logger = logging.getLogger(name=service)
    logger.setLevel(log_level)

    # Patch logger by structuring its outputs as JSON
    aws_lambda_logging.setup(level=log_level, service=service, **kwargs)

    return logger
Ejemplo n.º 20
0
def handler(event, context):
    aws_lambda_logging.setup(level="INFO")
    logger.info("event: " + json.dumps(event))
    pp = pprint.PrettyPrinter(indent=4)
    logger.info(pp.pprint(context))
    dynamodb: DynamodbClient = boto3.client("dynamodb")
    s3: S3Client = boto3.client("s3")
    s3key = uuid.uuid4()
    logger.info("buckets")
    logger.info(pp.pprint(s3.list_buckets()))
    logger.info("dynamodb")
    logger.info(pp.pprint(dynamodb.list_tables()))
    logger.info("s3key: " + s3key)
    return {"message": "Hello, World!"}
def test_setup_with_valid_log_levels(root_logger, logger, stdout, level):
    from aws_lambda_logging import setup
    setup(level, request_id='request id!', another='value')

    logger.critical('This is a test')

    log_dict = json.loads(stdout.getvalue())

    check_log_dict(log_dict)

    assert 'CRITICAL' == log_dict['level']
    assert 'This is a test' == log_dict['message']
    assert 'request id!' == log_dict['request_id']
    assert 'exception' not in log_dict
Ejemplo n.º 22
0
def auth_handler(event, context):
    aws_lambda_logging.setup(level=os.environ.get('LOGLEVEL', 'INFO'),
                             aws_request_id=context.aws_request_id,
                             boto_level='CRITICAL')
    authorization_user = '******'
    authorization_method = 'N/A'
    try:
        authorization_header = event['authorizationToken'] if 'TOKEN' == event[
            'type'] else event['headers']['Authorization']
        if authorization_header:
            authorization_method, authorization_value = authorization_header.split(
                ' ')
            authorization_user, authorization_password = base64.b64decode(
                authorization_value).decode('utf-8').split(':')
            parameter = get_ssm_param(authorization_user)
            if parameter['Value'] == authorization_password:
                log.info({
                    'Method': authorization_method,
                    'User': authorization_user,
                    'Effect': 'Allow'
                })
                effective_resource = re.match(
                    '(arn:aws:execute-api:[^:]+:[^:]+:[^/]+/[^/]+).*',
                    event['methodArn'])[1]
                policy = {
                    'principalId': authorization_user,
                    'policyDocument': {
                        'Version':
                        '2012-10-17',
                        'Statement': [{
                            'Action': 'execute-api:Invoke',
                            'Effect': 'Allow',
                            'Resource': f'{effective_resource}/*'
                        }]
                    }
                }
                log.info({'Authorization': 'Success', 'Policy': policy})
                return policy

    except Exception as e:
        log.error(e)

    # authorization failed
    log.info({
        'Method': authorization_method,
        'User': authorization_user,
        'Effect': 'Deny'
    })
    raise Exception('Unauthorized')
Ejemplo n.º 23
0
def handler(event, context):
    """
    Logs every record in the stream
    """
    aws_lambda_logging.setup(level=os.environ.get('LOGLEVEL', 'INFO'),
                             aws_request_id=context.aws_request_id,
                             boto_level='CRITICAL')
    raw_kinesis_records = event['Records']
    for kinesis_record in iter_deaggregate_records(raw_kinesis_records):
        try:
            kinesis_record['kinesis']['data'] = json.loads(
                base64.b64decode(kinesis_record['kinesis']['data']))
        except json.JSONDecodeError:
            pass
        log.info(kinesis_record)
Ejemplo n.º 24
0
def handler(event: dict, context):
    """
    Listen to CloudTrail Events and publishes them to Kinesis
    """
    aws_lambda_logging.setup(level=os.environ.get('LOGLEVEL', 'INFO'),
                             aws_request_id=context.aws_request_id,
                             boto_level='CRITICAL')
    log.info(event)
    source = event.get('source')
    if source:
        expr = source_path_map.get(source)
        if expr:
            values = [match.value for match in expr.find(event)]
            if values:
                log.debug(values[0])
Ejemplo n.º 25
0
def handler(event, context):
    """
    Minimalistic almost-passthrough Firehose record processor
    """
    aws_lambda_logging.setup(level=os.environ.get('LOGLEVEL', 'INFO'),
                             aws_request_id=context.aws_request_id,
                             boto_level='CRITICAL')
    received_raw_firehose_records = event['records']
    log.info(f'Received {len(received_raw_firehose_records)} events')

    return {
        'records': [
            transform(firehose_record)
            for firehose_record in received_raw_firehose_records
        ]
    }
def test_connectivity(event, context):
    loglevel = os.environ.get('LOGLEVEL', 'DEBUG')
    correlation_id = context.aws_request_id
    aws_lambda_logging.setup(level=loglevel, correlation_id=correlation_id)
    try:
        aws_lambda_logging.setup(env=os.environ.get('ENV'))
    except:
        pass
    logging.debug(json.dumps({'event': event}))

    url = os.environ['CENTREON_URL']
    useralias = os.environ['CENTREON_USERALIAS']
    password = os.environ['CENTREON_PASSWORD']

    jar = get_login(url, useralias, password, correlation_id)
    logout(url, jar, correlation_id)
Ejemplo n.º 27
0
    def __init__(self, connection=None, loglevel="INFO"):

        self.logger = logging.getLogger()
        # loglevel = "INFO"
        logging.basicConfig(level=logging.ERROR)
        aws_lambda_logging.setup(level=loglevel)

        if connection is None:
            host = os.environ.get('MYSQL_HOST')
            username = os.environ.get('MYSQL_USER')
            password = os.environ.get('MYSQL_PASSWORD')
            database = os.environ.get('MYSQL_DATABASE')
            self.connection = pymysql.connect(host, user=username, passwd=password, db=database, cursorclass=pymysql.cursors.DictCursor)
            self.logger.debug("!!!!!!!!!!new connection created")
        else:
            self.connection = connection
        self.table_name = None
Ejemplo n.º 28
0
def handler(event, context):
    aws_lambda_logging.setup(
        level=os.environ.get('LOGLEVEL', 'INFO'),
        aws_request_id=context.aws_request_id,
        boto_level='CRITICAL'
    )

    log.info(event)

    now = datetime.now()
    key = now.strftime(OBJECT_PATTERN)
    try:   
        s3.get_object(
            Bucket=BUCKET,
            Key=key
        )
        log.info(f'Found s3://{BUCKET}/{key}')
    except s3.exceptions.NoSuchKey:
        log.error(f'Missing object at s3://{BUCKET}/{key}')
Ejemplo n.º 29
0
 def handler_example4(event, context):
 aws_lambda_logging.setup(level='INFO')
 
 logger.info('This is example number 4 - INFO level')
 logger.warning('This is example number 4 - WARNING level')
 logger.error('This is example number 4 - ERROR level')
 logger.debug("This is example number 4 - DEBUG level which you won't see in the output")
 
 
 # Example 5 - JSON String log entry
 def handler_example5(event, context):
 aws_lambda_logging.setup(level='INFO')
 
 logger.info('{"Example 5": [1,2,3]}')


 # Example 6 - Variant boto logging level
def handler_example6(event, context):
 aws_lambda_logging.setup(level=loglevel, boto_level=botologlevel)
Ejemplo n.º 30
0
    def wrapped_func(event, context):

        extra_info = {'aws_request_id': context.aws_request_id}
        aws_lambda_logging.setup(level=LOG_LEVEL,
                                 boto_level=BOTO_LOG_LEVEL,
                                 **extra_info)

        logger = logging.getLogger()

        logger.debug("{} invoked!".format(context.function_name))
        logger.debug({'event': event, 'context': context.__dict__})

        try:
            retval = handler_func(event, context)
        except Exception:
            logger.exception("handler failed!")
            raise

        logger.debug("{} complete!".format(context.function_name))
        return retval