Esempio n. 1
0
def update_child_attendance(event: dict, context: LambdaContext) -> dict:
    logger.info(f"in update_child_attendance, event: {event}")

    if "pathParameters" not in event or "child_id" not in event[
            "pathParameters"]:
        return build_response(HTTPStatus.BAD_REQUEST, event)

    try:
        child_id: str = event["pathParameters"]["child_id"]
        now: Decimal = Decimal(datetime.now().timestamp())
        body = event["body"]
        logger.debug(f"input body: {body}")

        attendance = Attendance.parse_raw(event["body"])

        if attendance.attended and attendance.attended.upper() == "TRUE":
            logger.info(
                f"Setting attendance for chile_id: {child_id} at time: {now}")
            return build_response(HTTPStatus.OK,
                                  json.dumps("attendance updated"))

        # if for a reason the attended is false, there is notning to update. just return 200 OK
        return build_response(
            HTTPStatus.OK,
            json.dumps(
                "attendance not updated, FALSE is not currently supported"))
    except (ValidationError, TypeError) as err:
        logger.exception(
            "Failed to update child attendance, retuning bad request", err)
        return build_error_response(err, HTTPStatus.BAD_REQUEST)
    except Exception as err:
        logger.exception("Failed to update child attendance, returning error",
                         err)
        return build_error_response(err)
def test_logger_record_caller_location(stdout, service_name):
    # GIVEN Logger is initialized
    logger = Logger(service=service_name, stream=stdout)

    # WHEN log statement is run
    logger.info("log")

    # THEN 'location' field should have
    # the correct caller resolution
    caller_fn_name = inspect.currentframe().f_code.co_name
    log = capture_logging_output(stdout)
    assert caller_fn_name in log["location"]
def test_logger_append_duplicated(stdout, service_name):
    # GIVEN Logger is initialized with request_id field
    logger = Logger(service=service_name, stream=stdout, request_id="value")

    # WHEN `request_id` is appended to the existing structured log
    # using a different value
    logger.structure_logs(append=True, request_id="new_value")
    logger.info("log")

    # THEN subsequent log statements should have the latest value
    log = capture_logging_output(stdout)
    assert "new_value" == log["request_id"]
Esempio n. 4
0
def test_logger_do_not_log_twice_when_root_logger_is_setup(stdout):
    # GIVEN Lambda configures the root logger with a handler
    root_logger = logging.getLogger()
    root_logger.addHandler(logging.StreamHandler(stream=stdout))

    # WHEN we create a new Logger and child Logger
    logger = Logger(stream=stdout)
    child_logger = Logger(child=True, stream=stdout)
    logger.info("hello")
    child_logger.info("hello again")

    # THEN it should only contain only two log entries
    # since child's log records propagated to root logger should be rejected
    logs = list(stdout.getvalue().strip().split("\n"))
    assert len(logs) == 2
def test_logger_do_not_log_twice(stdout):
    # GIVEN Lambda configures the root logger with a handler
    logging.basicConfig(
        format="%(asctime)-15s %(clientip)s %(user)-8s %(message)s",
        level="INFO")
    root_logger = logging.getLogger()
    root_logger.addHandler(logging.StreamHandler(stream=stdout))

    # WHEN we create a new Logger
    logger = Logger(stream=stdout)
    logger.info("hello")

    # THEN it should fail to unpack because root logger handler
    # should be removed as part of our Logger initialization
    assert not root_logger.handlers
    with pytest.raises(ValueError, match=r".*expected 2, got 1.*"):
        [log_one, log_two] = stdout.getvalue().strip().split("\n")
def test_logger_extra_kwargs(stdout, service_name):
    # GIVEN Logger is initialized
    logger = Logger(service=service_name, stream=stdout)

    # WHEN `request_id` is an extra field in a log message to the existing structured log
    fields = {"request_id": "blah"}

    logger.info("with extra fields", extra=fields)
    logger.info("without extra fields")

    extra_fields_log, no_extra_fields_log = capture_multiple_logging_statements_output(stdout)

    # THEN first log should have request_id field in the root structure
    assert "request_id" in extra_fields_log

    # THEN second log should not have request_id in the root structure
    assert "request_id" not in no_extra_fields_log
def test_logger_log_twice_when_log_filter_isnt_present_and_root_logger_is_setup(monkeypatch, stdout, service_name):
    # GIVEN Lambda configures the root logger with a handler
    root_logger = logging.getLogger()
    root_logger.addHandler(logging.StreamHandler(stream=stdout))

    # WHEN we create a new Logger and child Logger
    # and log deduplication filter for child messages are disabled
    # see #262 for more details on why this is needed for Pytest Live Log feature
    monkeypatch.setenv(constants.LOGGER_LOG_DEDUPLICATION_ENV, "true")
    logger = Logger(service=service_name, stream=stdout)
    child_logger = Logger(service=service_name, child=True, stream=stdout)
    logger.info("PARENT")
    child_logger.info("CHILD")

    # THEN it should only contain only two log entries
    # since child's log records propagated to root logger should be rejected
    logs = list(stdout.getvalue().strip().split("\n"))
    assert len(logs) == 4
def test_logger_append_remove_keys(stdout, service_name):
    # GIVEN a Logger is initialized
    logger = Logger(service=service_name, stream=stdout)
    extra_keys = {"request_id": "id", "context": "value"}

    # WHEN keys are updated
    logger.append_keys(**extra_keys)
    logger.info("message with new keys")

    # And removed
    logger.remove_keys(extra_keys.keys())
    logger.info("message after keys being removed")

    # THEN additional keys should only be present in the first log statement
    extra_keys_log, keys_removed_log = capture_multiple_logging_statements_output(
        stdout)

    assert extra_keys.items() <= extra_keys_log.items()
    assert (extra_keys.items() <= keys_removed_log.items()) is False
Esempio n. 9
0
def upload_s3_object_presigned(source_file: str, object_name: str,
                               bucket_name: str) -> str:
    response = create_presigned_post(bucket_name, object_name)
    if response is None:
        raise Exception(
            "Error generating presigned url for {object_name=}, {bucket_name=}"
        )

    with open(source_file, 'rb') as f:
        files = {'file': (object_name, f)}
        http_response = requests.post(response['url'],
                                      data=response['fields'],
                                      files=files)

    # If successful, returns HTTP status code 204
    logger.info(f'File upload HTTP status code: {http_response.status_code}')
    if http_response.status_code > 300:
        logger.error(
            f"Error uploading object with presigned url {http_response.content=}"
        )
        raise Exception(f"Error uploading {object_name=} to {bucket_name=}")
Esempio n. 10
0
def set_user_type(event: dict, context: LambdaContext) -> dict:
    try:
        event: APIGatewayProxyEvent = APIGatewayProxyEvent(event)
        user_id = event.request_context.authorizer.claims["sub"]
        user_name = event.request_context.authorizer.claims["cognito:username"]
        userpool_id = str(
            event.request_context.authorizer.claims["iss"]).split("/")[-1]
        logger.info(f"user id: {user_id}")
        user_type = UpdateUserTypeDto.parse_raw(event.body)

        client: CognitoIdentityProviderClient = boto3.client("cognito-idp")
        client.admin_update_user_attributes(
            UserPoolId=userpool_id,
            Username=user_name,
            UserAttributes=[
                AttributeTypeTypeDef(Name="custom:user_type",
                                     Value=user_type.user_type.value)
            ])
        return _build_response(HTTPStatus.OK, "{}")
    except (ValidationError, TypeError) as err:
        return _build_error_response(err, HTTPStatus.BAD_REQUEST)
    except Exception as err:
        return _build_error_response(err)
Esempio n. 11
0
def delete_seeker(event: dict, context: LambdaContext) -> dict:
    try:
        event: APIGatewayProxyEvent = APIGatewayProxyEvent(event)
        user_id = event.request_context.authorizer.claims["sub"]

        job_seeker_repository.delete(user_id)

        user_name = event.request_context.authorizer.claims["cognito:username"]
        userpool_id = str(
            event.request_context.authorizer.claims["iss"]).split("/")[-1]
        logger.info(f"user id: {user_id}")

        client: CognitoIdentityProviderClient = boto3.client("cognito-idp")
        client.admin_delete_user_attributes(
            UserPoolId=userpool_id,
            Username=user_name,
            UserAttributeNames=["custom:user_type"])

        return _build_response(http_status=HTTPStatus.OK, body='{}')
    except (ValidationError, TypeError) as err:
        return _build_error_response(err, HTTPStatus.BAD_REQUEST)
    except Exception as err:
        return _build_error_response(err)
def test_logger_append_duplicated(stdout):
    logger = Logger(stream=stdout, request_id="value")
    logger.structure_logs(append=True, request_id="new_value")
    logger.info("log")
    log = json.loads(stdout.getvalue())
    assert "new_value" == log["request_id"]
 def handler(event, context):
     custom_method()
     logger.info("Hello")
 def custom_method():
     logger.info("Hello from method")
 def handler(event, context):
     logger.info("Foo")
 def handler(event, _):
     logger.set_correlation_id(event["requestContext"]["requestId"])
     logger.info("Foo")
 def handler(event, context):
     logger.info("Hello")
 def handler(event, context):
     if event.get("add_key"):
         logger.append_keys(my_key="value")
     logger.info("Foo")
 def handler(event, context):
     logger.info("custom handler")