示例#1
0
    def test_redact_with_pii_and_only_redaction(self):
        comprehend_client = MagicMock()

        comprehend_client.contains_pii_entities.return_value = [
            Document(text="Some Random text", pii_classification={'SSN': 0.53})
        ]
        comprehend_client.detect_pii_documents.return_value = [
            Document(text="Some Random text",
                     pii_classification={'SSN': 0.53},
                     pii_entities=[{
                         'Score': 0.534,
                         'Type': 'SSN',
                         'BeginOffset': 0,
                         'EndOffset': 4
                     }])
        ]

        document = redact("Some Random text",
                          Segmenter(DOCUMENT_MAX_SIZE_CONTAINS_PII_ENTITIES),
                          Segmenter(DOCUMENT_MAX_SIZE_DETECT_PII_ENTITIES),
                          Redactor(RedactionConfig()), comprehend_client,
                          RedactionConfig(), DEFAULT_LANGUAGE_CODE)
        comprehend_client.contains_pii_entities.assert_not_called()
        comprehend_client.detect_pii_documents.assert_called_once()
        assert document.redacted_text == "**** Random text"
示例#2
0
 def test_segmenter_no_segmentation_needed(self):
     segmentor = Segmenter(5000, overlap_tokens=3)
     original_text = "Barack Hussein Obama II is an American politician and attorney who served as the " \
                     "44th president of the United States from 2009 to 2017."
     segments = segmentor.segment(original_text)
     assert len(segments) == 1
     assert segments[0].text == original_text
     assert segmentor.de_segment(segments).text == original_text
示例#3
0
 def test_is_overlapping_annotations(self):
     segmentor = Segmenter(5000)
     assert segmentor._is_overlapping_annotations(
         {
             'Score': 0.634,
             'Type': 'ADDRESS',
             'BeginOffset': 54,
             'EndOffset': 65
         }, {
             'Score': 0.234,
             'Type': 'ADDRESS',
             'BeginOffset': 58,
             'EndOffset': 65
         }) == 0
示例#4
0
    def test_redact_with_no_pii_and_classification(self):
        comprehend_client = MagicMock()

        comprehend_client.contains_pii_entities.return_value = [
            Document(text="Some Random text", pii_classification={})
        ]
        document = redact("Some Random text",
                          Segmenter(DOCUMENT_MAX_SIZE_CONTAINS_PII_ENTITIES),
                          Segmenter(DOCUMENT_MAX_SIZE_DETECT_PII_ENTITIES),
                          Redactor(RedactionConfig()), comprehend_client,
                          RedactionConfig(), DEFAULT_LANGUAGE_CODE)
        comprehend_client.contains_pii_entities.assert_called_once()
        comprehend_client.detect_pii_documents.assert_not_called()
        assert document.redacted_text == "Some Random text"
示例#5
0
 def test_segmenter_basic_text(self):
     segmentor = Segmenter(50, overlap_tokens=3)
     original_text = "Barack Hussein Obama II is an American politician and attorney who served as the " \
                     "44th president of the United States from 2009 to 2017."
     segments = segmentor.segment(original_text)
     expected_segments = [
         "Barack Hussein Obama II is an American politician ",
         "an American politician and attorney who served as ",
         "who served as the 44th president of the United ",
         "of the United States from 2009 to 2017."
     ]
     for expected_segment, actual_segment in zip(expected_segments,
                                                 segments):
         assert expected_segment == actual_segment.text
     shuffle(segments)
     assert segmentor.de_segment(segments).text == original_text
 def time_bound_task():
     nonlocal processed_document
     nonlocal document
     PartialObjectRequestValidator.validate(event)
     pii_classification_segmenter = Segmenter(DOCUMENT_MAX_SIZE_CONTAINS_PII_ENTITIES)
     pii_redaction_segmenter = Segmenter(DOCUMENT_MAX_SIZE_DETECT_PII_ENTITIES)
     redactor = Redactor(redaction_config)
     time1 = time.time()
     text, http_headers, status_code = s3.download_file_from_presigned_url(object_get_context[INPUT_S3_URL],
                                                                           event[USER_REQUEST][HEADERS])
     time2 = time.time()
     LOG.info(f"Downloaded the file in : {(time2 - time1)} seconds")
     document = redact(text, pii_classification_segmenter, pii_redaction_segmenter, redactor,
                       comprehend, redaction_config, language_code)
     processed_document = True
     time1 = time.time()
     LOG.info(f"Pii redaction completed within {(time1 - time2)} seconds. Returning back the response to S3")
     redacted_text_bytes = document.redacted_text.encode('utf-8')
     http_headers[CONTENT_LENGTH] = len(redacted_text_bytes)
     s3.respond_back_with_data(redacted_text_bytes, http_headers, object_get_context[REQUEST_ROUTE],
                               object_get_context[REQUEST_TOKEN], status_code)
示例#7
0
    def test_classify_with_no_pii(self):
        comprehend_client = MagicMock()

        comprehend_client.contains_pii_entities.return_value = [
            Document(text="Some Random text", pii_classification={})
        ]
        entities = classify("Some Random text",
                            Segmenter(DOCUMENT_MAX_SIZE_CONTAINS_PII_ENTITIES),
                            comprehend_client, ClassificationConfig(),
                            DEFAULT_LANGUAGE_CODE)
        comprehend_client.contains_pii_entities.assert_called_once()
        assert len(entities) == 0
示例#8
0
 def test_segmenter_unicode_chars(self):
     segmentor = Segmenter(100, overlap_tokens=3)
     original_text = "╩ХтАв╠Бс┤етАв╠А╩ФуБгтЩб Emoticons ЁЯШЬ ╩ХтАв╠Бс┤етАв╠А╩ФуБгтЩб Emoticons ЁЯШЬ сЧ╖сЩУ ├▓┬е┬е┬е┬е┬е┬е┬есЧвсЦЗсУосШРсУ░ямбсЧйсТк тДмтДо ┬втЧО├╕┼В Bс┤З ╩Пс┤Пс┤Ь╩Аsс┤З╩Я╥У рд╡рд┐рдХрд┐рдкреАрдбрд┐рдпрд╛ рд╕рднреА рд╡рд┐рд╖рдпреЛрдВ рдкрд░ рдкреНрд░рд╛рдорд╛рдгрд┐рдХ рдФрд░ рдЙрдкрдпреЛрдЧ, " \
                     "рдкрд░рд┐рд╡рд░реНрддрди рд╡ рдкреБрдирд░реНрд╡рд┐рддрд░рдг рдХреЗ рд▓рд┐рдП рд╕реНрд╡рддрдиреНрддреНрд░ рдЬреНрдЮрд╛рдирдХреЛрд╢ рдмрдирд╛рдиреЗ h├аnb╟Оob─Бo, h├аnb╟Оo ц▒ЙхабхМЕ/ц╝вхабхМЕ, ц▒Йхаб/ц╝вхаб тАУ hamburger"
     segments = segmentor.segment(original_text)
     expected_segments = [
         "╩ХтАв╠Бс┤етАв╠А╩ФуБгтЩб Emoticons ЁЯШЬ ╩ХтАв╠Бс┤етАв╠А╩ФуБгтЩб Emoticons ЁЯШЬ сЧ╖сЩУ ",
         "Emoticons ЁЯШЬ сЧ╖сЩУ ├▓┬е┬е┬е┬е┬е┬е┬есЧвсЦЗсУосШРсУ░ямбсЧйсТк тДмтДо ┬втЧО├╕┼В Bс┤З ",
         "тДмтДо ┬втЧО├╕┼В Bс┤З ╩Пс┤Пс┤Ь╩Аsс┤З╩Я╥У рд╡рд┐рдХрд┐рдкреАрдбрд┐рдпрд╛ рд╕рднреА ",
         "╩Пс┤Пс┤Ь╩Аsс┤З╩Я╥У рд╡рд┐рдХрд┐рдкреАрдбрд┐рдпрд╛ рд╕рднреА рд╡рд┐рд╖рдпреЛрдВ рдкрд░ ",
         "рд╕рднреА рд╡рд┐рд╖рдпреЛрдВ рдкрд░ рдкреНрд░рд╛рдорд╛рдгрд┐рдХ рдФрд░ рдЙрдкрдпреЛрдЧ, ",
         "рдкреНрд░рд╛рдорд╛рдгрд┐рдХ рдФрд░ рдЙрдкрдпреЛрдЧ, рдкрд░рд┐рд╡рд░реНрддрди рд╡ ",
         "рдЙрдкрдпреЛрдЧ, рдкрд░рд┐рд╡рд░реНрддрди рд╡ рдкреБрдирд░реНрд╡рд┐рддрд░рдг рдХреЗ рд▓рд┐рдП ",
         "рдкреБрдирд░реНрд╡рд┐рддрд░рдг рдХреЗ рд▓рд┐рдП рд╕реНрд╡рддрдиреНрддреНрд░ ",
         "рдХреЗ рд▓рд┐рдП рд╕реНрд╡рддрдиреНрддреНрд░ рдЬреНрдЮрд╛рдирдХреЛрд╢ рдмрдирд╛рдиреЗ h├аnb╟Оob─Бo, ",
         "рдЬреНрдЮрд╛рдирдХреЛрд╢ рдмрдирд╛рдиреЗ h├аnb╟Оob─Бo, h├аnb╟Оo ц▒ЙхабхМЕ/ц╝вхабхМЕ, ц▒Йхаб/ц╝вхаб ",
         "h├аnb╟Оo ц▒ЙхабхМЕ/ц╝вхабхМЕ, ц▒Йхаб/ц╝вхаб тАУ hamburger"
     ]
     assert len(expected_segments) == len(segments)
     for expected_segment, actual_segment in zip(expected_segments,
                                                 segments):
         assert expected_segment == actual_segment.text
     assert segmentor.de_segment(segments).text == original_text
def redact(text, classification_segmenter: Segmenter, detection_segmenter: Segmenter,
           redactor: Redactor, comprehend: ComprehendClient, redaction_config: RedactionConfig, language_code) -> Document:
    """
    Redact pii data from given text. Logic for redacting:- .

    1. Segment text into subsegments of reasonable sizes (max doc size supported by comprehend) for doing initial classification
    2. For each subsegment ,
        2.1 call comprehend's classify-pii-document api to determine if it contains any PII data
        2.2 if it contains pii then split it to smaller chunks(e.g. <=5KB), else skip to the next subsegment
        2.3 for each chunk
             2.3.1 call comprehend's detect-pii-entities to extract the pii entities
             2.3.2 redact the pii entities from the chunk
        2.4 merge all chunks
    3. merge all subsegments
    """
    if REDACTION_API_ONLY:
        doc = Document(text)
        documents = [doc]
        docs_for_entity_detection = detection_segmenter.segment(doc.text, doc.char_offset)
    else:
        documents = comprehend.contains_pii_entities(classification_segmenter.segment(text), language_code)
        pii_docs = [doc for doc in documents if len(get_interested_pii(doc, redaction_config)) > 0]
        if not pii_docs:
            LOG.debug("Document doesn't have any pii. Nothing to redact.")
            text = classification_segmenter.de_segment(documents).text
            return Document(text, redacted_text=text)
        docs_for_entity_detection = []
        for pii_doc in pii_docs:
            docs_for_entity_detection.extend(detection_segmenter.segment(pii_doc.text, pii_doc.char_offset))

    docs_with_pii_entities = comprehend.detect_pii_documents(docs_for_entity_detection, language_code)
    resultant_doc = classification_segmenter.de_segment(documents + docs_with_pii_entities)
    assert len(resultant_doc.text) == len(text), "Not able to recover original document after segmentation and desegmentation."
    redacted_text = redactor.redact(text, resultant_doc.pii_entities)
    resultant_doc.redacted_text = redacted_text
    return resultant_doc
def classify(text, classification_segmenter: Segmenter, comprehend: ComprehendClient,
             detection_config: ClassificationConfig, language_code) -> List[str]:
    """
    Detect pii data from given text. Logic for detecting:- .

    1. Segment text into segments of reasonable sizes (max doc size supported by comprehend) for
       doing initial classification
    2. For each segment,
        2.1 call comprehend's classify-pii-document api to determine if it contains any PII data
        2.2 if it contains pii that is in the detection config then return those pii, else move to the next segment
    3. If no pii detected, return empty list, else list of pii types found that is also in the detection config
       and above the given threshold
    """
    pii_classified_documents = comprehend.contains_pii_entities(classification_segmenter.segment(text), language_code)
    pii_types = set()
    for doc in pii_classified_documents:
        doc_pii_types = get_interested_pii(doc, detection_config)
        pii_types |= set(doc_pii_types)
    return list(pii_types)
示例#11
0
 def test_desegment_overlapping_results(self):
     segments = [
         Document(
             text=
             "Some Random SSN Some Random email-id Some Random name and address and some credit card number",
             char_offset=0,
             pii_classification={
                 'SSN': 0.234,
                 'EMAIL': 0.765,
                 'NAME': 0.124,
                 'ADDRESS': 0.976
             },
             pii_entities=[{
                 'Score': 0.234,
                 'Type': 'SSN',
                 'BeginOffset': 12,
                 'EndOffset': 36
             }, {
                 'Score': 0.765,
                 'Type': 'EMAIL',
                 'BeginOffset': 28,
                 'EndOffset': 36
             }, {
                 'Score': 0.534,
                 'Type': 'NAME',
                 'BeginOffset': 49,
                 'EndOffset': 53
             }, {
                 'Score': 0.234,
                 'Type': 'ADDRESS',
                 'BeginOffset': 58,
                 'EndOffset': 65
             }]),
         Document(
             text="Some Random name and address and some credit card number",
             char_offset=37,
             pii_classification={
                 'SSN': 0.234,
                 'EMAIL': 0.765,
                 'USERNAME': 0.424,
                 'ADDRESS': 0.976
             },
             pii_entities=[{
                 'Score': 0.234,
                 'Type': 'USERNAME',
                 'BeginOffset': 12,
                 'EndOffset': 16
             }, {
                 'Score': 0.634,
                 'Type': 'ADDRESS',
                 'BeginOffset': 17,
                 'EndOffset': 28
             }, {
                 'Score': 0.234,
                 'Type': 'CREDIT_DEBIT_NUMBER',
                 'BeginOffset': 38,
                 'EndOffset': 56
             }])
     ]
     segmentor = Segmenter(5000)
     expected_merged_document = Document(
         text=
         "Some Random SSN Some Random email-id Some Random name and address and some credit card number",
         char_offset=37,
         pii_classification={
             'SSN': 0.234,
             'EMAIL': 0.765,
             'NAME': 0.124,
             'USERNAME': 0.424,
             'ADDRESS': 0.976
         },
         pii_entities=[{
             'Score': 0.234,
             'Type': 'SSN',
             'BeginOffset': 12,
             'EndOffset': 36
         }, {
             'Score': 0.765,
             'Type': 'EMAIL',
             'BeginOffset': 28,
             'EndOffset': 36
         }, {
             'Score': 0.534,
             'Type': 'NAME',
             'BeginOffset': 49,
             'EndOffset': 53
         }, {
             'Score': 0.634,
             'Type': 'ADDRESS',
             'BeginOffset': 54,
             'EndOffset': 65
         }, {
             'Score': 0.234,
             'Type': 'CREDIT_DEBIT_NUMBER',
             'BeginOffset': 75,
             'EndOffset': 93
         }])
     actual_merged_doc = segmentor.de_segment(segments)
     assert expected_merged_document.text == actual_merged_doc.text
     assert expected_merged_document.pii_classification == actual_merged_doc.pii_classification
     assert expected_merged_document.pii_entities == actual_merged_doc.pii_entities
示例#12
0
 def test_segmenter_constructor_invalid_args(self):
     try:
         Segmenter(3)
         assert False, "Expected an InvalidConfigurationException"
     except InvalidConfigurationException:
         return
def pii_access_control_handler(event, context):
    """Detect Lambda function handler."""
    LOG.info(f'Received event with requestId: {event[REQUEST_ID]}')
    LOG.debug(f'Raw event {event}')

    InputEventValidator.validate(event)
    invoke_args = json.loads(event[S3OL_CONFIGURATION][PAYLOAD]) if event[S3OL_CONFIGURATION][PAYLOAD] else {}
    language_code = invoke_args.get(LANGUAGE_CODE, DEFAULT_LANGUAGE_CODE)
    detection_config = ClassificationConfig(**invoke_args)
    object_get_context = event[GET_OBJECT_CONTEXT]
    s3ol_access_point = event[S3OL_CONFIGURATION][S3OL_ACCESS_POINT_ARN]

    s3 = S3Client(s3ol_access_point)
    cloud_watch = CloudWatchClient()
    comprehend = ComprehendClient(session_id=event[REQUEST_ID], user_agent=DEFAULT_USER_AGENT, endpoint_url=COMPREHEND_ENDPOINT_URL,
                                  s3ol_access_point=s3ol_access_point)
    exception_handler = ExceptionHandler(s3)

    LOG.debug("Pii Entity Types to be detected:" + str(detection_config.pii_entity_types))

    pii_classification_segmenter = Segmenter(DOCUMENT_MAX_SIZE_CONTAINS_PII_ENTITIES)

    processed_document = False
    processed_pii_document = False
    pii_entities = []

    try:
        def time_bound_task():
            nonlocal processed_document
            nonlocal processed_pii_document
            nonlocal pii_entities
            PartialObjectRequestValidator.validate(event)
            time1 = time.time()
            text, http_headers, status_code = s3.download_file_from_presigned_url(object_get_context[INPUT_S3_URL],
                                                                                  event[USER_REQUEST][HEADERS])
            time2 = time.time()
            LOG.info(f"Downloaded the file in : {(time2 - time1)} seconds")
            pii_entities = classify(text, pii_classification_segmenter, comprehend, detection_config, language_code)
            time1 = time.time()

            processed_document = True
            LOG.info(f"Pii detection completed within {(time1 - time2)} seconds. Returning back the response to S3")
            if len(pii_entities) > 0:
                processed_pii_document = True
                raise RestrictedDocumentException()
            else:
                text_bytes = text.encode('utf-8')
                http_headers[CONTENT_LENGTH] = len(text_bytes)
                s3.respond_back_with_data(text_bytes, http_headers, object_get_context[REQUEST_ROUTE],
                                          object_get_context[REQUEST_TOKEN],
                                          status_code)

        execute_task_with_timeout(context.get_remaining_time_in_millis() - RESERVED_TIME_FOR_CLEANUP, time_bound_task)
    except Exception as generated_exception:
        exception_handler.handle_exception(generated_exception, object_get_context[REQUEST_ROUTE], object_get_context[REQUEST_TOKEN])
    finally:
        if PUBLISH_CLOUD_WATCH_METRICS:
            publish_metrics(cloud_watch, s3, comprehend, processed_document, processed_pii_document, language_code,
                            s3ol_access_point, pii_entities)

    LOG.info("Responded back to s3 successfully")