def test_main(mocker): global TOKENIZATION_RESULT args = {'modelName': 'modelName', 'modelStoreType': 'list', 'emailSubject': 'word1', 'emailBody': 'word2 word3', 'minTextLength': '0', 'labelProbabilityThreshold': '0', 'wordThreshold': '0', 'topWordsLimit': '10', 'returnError': 'true'} mocker.patch.object(demisto, 'args', return_value=args) mocker.patch.object(demisto, 'executeCommand', side_effect=executeCommand) mocker.patch('demisto_ml.decode_model', return_value="Model", create=True) mocker.patch('demisto_ml.filter_model_words', return_value=("text", 2), create=True) mocker.patch('demisto_ml.explain_model_words', return_value={"Label": 'Valid', 'Probability': 0.7, 'PositiveWords': ['word1'], 'NegativeWords': ['word2']}, create=True) TOKENIZATION_RESULT = {'originalText': '%s %s' % (args['emailSubject'], args['emailBody']), 'tokenizedText': '%s %s' % (args['emailSubject'], args['emailBody']), 'originalWordsToTokens': {'word1': ['word1'], 'word2': ['word2'], 'word3': ['word3']}, } res = main() correct_res = {'OriginalText': 'word1 word2 word3', 'Probability': 0.7, 'NegativeWords': ['word2'], 'TextTokensHighlighted': '<b>word1</b> word2 word3', 'PositiveWords': ['word1'], 'Label': 'Valid'} assert res['Contents'] == correct_res args['emailBodyHTML'] = args.pop('emailBody') TOKENIZATION_RESULT = {'originalText': '%s %s' % (args['emailSubject'], args['emailBodyHTML']), 'tokenizedText': '%s %s' % (args['emailSubject'], args['emailBodyHTML']), 'originalWordsToTokens': {'word1': ['word1'], 'word2': ['word2'], 'word3': ['word3']}, } main() assert res['Contents'] == correct_res
def test_no_positive_words(mocker): # make sure that if no positive words were found, TextTokensHighlighted output is equivalent to original text global TOKENIZATION_RESULT phishing_mock = PhishingModelMock() d = { "Label": 'Valid', 'Probability': 0.7, 'PositiveWords': [], 'NegativeWords': ['word2'] } args = { 'modelName': 'modelName', 'modelStoreType': 'list', 'emailSubject': 'word1', 'emailBody': 'word2 word3', 'minTextLength': '0', 'labelProbabilityThreshold': '0', 'wordThreshold': '0', 'topWordsLimit': '10', 'returnError': 'true' } mocker.patch.object(demisto, 'args', return_value=args) mocker.patch.object(demisto, 'executeCommand', side_effect=executeCommand) phishing_mock = PhishingModelMock(("text", 2)) mocker.patch('demisto_ml.phishing_model_loads', return_value=phishing_mock, create=True) mocker.patch.object(demisto, 'incidents', return_value=[{ 'isPlayground': True }]) mocker.patch.object(phishing_mock, 'filter_model_words', return_value=("text", 2), create=True) mocker.patch.object(phishing_mock, 'explain_model_words', return_value=d, create=True) TOKENIZATION_RESULT = { 'originalText': '%s %s' % (args['emailSubject'], args['emailBody']), 'tokenizedText': '%s %s' % (args['emailSubject'], args['emailBody']), 'originalWordsToTokens': { 'word1': ['word1'], 'word2': ['word2'], 'word3': ['word3'] }, } res = main() assert res['Contents']['TextTokensHighlighted'] == TOKENIZATION_RESULT[ 'originalText']
def test_main(mocker): global TOKENIZATION_RESULT phishing_mock = PhishingModelMock() d = { "Label": 'Valid', 'Probability': 0.7, 'PositiveWords': ['word1'], 'NegativeWords': ['word2'] } args = { 'modelName': 'modelName', 'modelStoreType': 'list', 'emailSubject': 'word1', 'emailBody': 'word2 word3', 'minTextLength': '0', 'labelProbabilityThreshold': '0', 'wordThreshold': '0', 'topWordsLimit': '10', 'returnError': 'true' } mocker.patch.object(demisto, 'args', return_value=args) mocker.patch.object(demisto, 'incidents', return_value=[{ 'isPlayground': True }]) mocker.patch.object(demisto, 'executeCommand', side_effect=executeCommand) mocker.patch('demisto_ml.phishing_model_loads_handler', return_value=phishing_mock, create=True) mocker.patch.object(phishing_mock, 'filter_model_words', return_value=("text", 2), create=True) mocker.patch.object(phishing_mock, 'explain_model_words', return_value=d, create=True) TOKENIZATION_RESULT = { 'originalText': '%s %s' % (args['emailSubject'], args['emailBody']), 'tokenizedText': '%s %s' % (args['emailSubject'], args['emailBody']), 'originalWordsToTokens': { 'word1': ['word1'], 'word2': ['word2'], 'word3': ['word3'] }, } res = main() correct_res = { 'OriginalText': concatenate_subject_body(args['emailSubject'], args['emailBody']), 'Probability': 0.7, 'NegativeWords': ['word2'], 'TextTokensHighlighted': concatenate_subject_body(bold(args['emailSubject']), args['emailBody']), 'PositiveWords': ['word1'], 'Label': 'Valid' } assert res['Contents'] == correct_res args['emailBodyHTML'] = args.pop('emailBody') TOKENIZATION_RESULT = { 'originalText': concatenate_subject_body(args['emailSubject'], args['emailBodyHTML']), 'tokenizedText': concatenate_subject_body(args['emailSubject'], args['emailBodyHTML']), 'originalWordsToTokens': { 'word1': ['word1'], 'word2': ['word2'], 'word3': ['word3'] }, } main() assert res['Contents'] == correct_res