def test_build_analyze_query_all_logs_nonempty_stacktrace_launches_with_the_same_name( self): """Tests building analyze query""" search_cfg = TestEsQuery.get_default_search_config() launch = launch_objects.Launch( **{ "analyzerConfig": { "analyzerMode": "LAUNCH_NAME", "numberOfLogLines": -1 }, "launchId": 12, "launchName": "Launch name", "project": 1 }) log = { "_id": 1, "_index": 1, "_source": { "unique_id": "unique", "test_case_hash": 1, "test_item": "123", "message": "hello world", "merged_small_logs": "", "detected_message": "hello world", "detected_message_with_numbers": "hello world 1", "stacktrace": "invoke.method(arg)", "only_numbers": "1", "found_exceptions": "AssertionError", "potential_status_codes": "300 401" } } query_from_service = AutoAnalyzerService( self.app_config, search_cfg).build_analyze_query(launch, log) demo_query = utils.get_fixture( self. query_all_logs_nonempty_stacktrace_launches_with_the_same_name, to_json=True) query_from_service.should.equal(demo_query)
def test_build_analyze_query_two_log_lines_only_current_launch_wo_exceptions( self): """Tests building analyze query""" search_cfg = TestEsQuery.get_default_search_config() launch = launch_objects.Launch( **{ "analyzerConfig": { "analyzerMode": "CURRENT_LAUNCH", "numberOfLogLines": 2 }, "launchId": 12, "launchName": "Launch name", "project": 1 }) log = { "_id": 1, "_index": 1, "_source": { "unique_id": "unique", "test_case_hash": 1, "test_item": "123", "message": "hello world", "merged_small_logs": "", "detected_message": "hello world", "detected_message_with_numbers": "hello world 1", "stacktrace": "", "only_numbers": "1", "found_exceptions": "", "potential_status_codes": "" } } query_from_service = AutoAnalyzerService( self.app_config, search_cfg).build_analyze_query(launch, log) demo_query = utils.get_fixture( self.query_two_log_lines_only_current_launch_wo_exceptions, to_json=True) query_from_service.should.equal(demo_query)
def init_amqp(_amqp_client): """Initialize rabbitmq queues, exchange and stars threads for queue messages processing""" with _amqp_client.connection.channel() as channel: try: declare_exchange(channel, APP_CONFIG) except Exception as err: logger.error("Failed to declare amqp objects") logger.error(err) return threads = [] es_client = EsClient(APP_CONFIG, SEARCH_CONFIG) if APP_CONFIG["instanceTaskType"] == "train": threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive, (APP_CONFIG["exchangeName"], "train_models", True, False, lambda channel, method, props, body: amqp_handler.handle_inner_amqp_request(channel, method, props, body, RetrainingService( APP_CONFIG, SEARCH_CONFIG).train_models)))) else: threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive, (APP_CONFIG["exchangeName"], "index", True, False, lambda channel, method, props, body: amqp_handler.handle_amqp_request(channel, method, props, body, es_client.index_logs, prepare_response_data=amqp_handler. prepare_index_response_data)))) threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive, (APP_CONFIG["exchangeName"], "analyze", True, False, lambda channel, method, props, body: amqp_handler.handle_amqp_request(channel, method, props, body, AutoAnalyzerService( APP_CONFIG, SEARCH_CONFIG).analyze_logs, prepare_response_data=amqp_handler. prepare_analyze_response_data)))) threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive, (APP_CONFIG["exchangeName"], "delete", True, False, lambda channel, method, props, body: amqp_handler.handle_amqp_request(channel, method, props, body, DeleteIndexService( APP_CONFIG, SEARCH_CONFIG).delete_index, prepare_data_func=amqp_handler. prepare_delete_index, prepare_response_data=amqp_handler. output_result)))) threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive, (APP_CONFIG["exchangeName"], "clean", True, False, lambda channel, method, props, body: amqp_handler.handle_amqp_request(channel, method, props, body, es_client.delete_logs, prepare_data_func=amqp_handler. prepare_clean_index, prepare_response_data=amqp_handler. output_result)))) threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive, (APP_CONFIG["exchangeName"], "search", True, False, lambda channel, method, props, body: amqp_handler.handle_amqp_request(channel, method, props, body, SearchService(APP_CONFIG, SEARCH_CONFIG).search_logs, prepare_data_func=amqp_handler. prepare_search_logs, prepare_response_data=amqp_handler. prepare_analyze_response_data)))) threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive, (APP_CONFIG["exchangeName"], "suggest", True, False, lambda channel, method, props, body: amqp_handler.handle_amqp_request(channel, method, props, body, SuggestService( APP_CONFIG, SEARCH_CONFIG).suggest_items, prepare_data_func=amqp_handler. prepare_test_item_info, prepare_response_data=amqp_handler. prepare_analyze_response_data)))) threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive, (APP_CONFIG["exchangeName"], "cluster", True, False, lambda channel, method, props, body: amqp_handler.handle_amqp_request(channel, method, props, body, ClusterService( APP_CONFIG, SEARCH_CONFIG).find_clusters, prepare_data_func=amqp_handler. prepare_launch_info, prepare_response_data=amqp_handler. prepare_analyze_response_data)))) threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive, (APP_CONFIG["exchangeName"], "stats_info", True, False, lambda channel, method, props, body: amqp_handler.handle_inner_amqp_request(channel, method, props, body, es_client.send_stats_info)))) threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive, (APP_CONFIG["exchangeName"], "namespace_finder", True, False, lambda channel, method, props, body: amqp_handler.handle_amqp_request(channel, method, props, body, NamespaceFinderService( APP_CONFIG, SEARCH_CONFIG).update_chosen_namespaces)))) return threads
def test_analyze_logs(self): """Test analyzing logs""" tests = [ { "test_calls": [{"method": httpretty.GET, "uri": "/1", "status": HTTPStatus.OK, }, ], "index_rq": utils.get_fixture(self.launch_wo_test_items), "expected_count": 0, "expected_issue_type": "", "boost_predict": ([], []) }, { "test_calls": [{"method": httpretty.GET, "uri": "/1", "status": HTTPStatus.OK, }, ], "index_rq": utils.get_fixture( self.launch_w_test_items_wo_logs), "expected_count": 0, "expected_issue_type": "", "boost_predict": ([], []) }, { "test_calls": [{"method": httpretty.GET, "uri": "/2", "status": HTTPStatus.OK, }, ], "index_rq": utils.get_fixture( self.launch_w_test_items_w_empty_logs), "expected_count": 0, "expected_issue_type": "", "boost_predict": ([], []) }, { "test_calls": [{"method": httpretty.GET, "uri": "/2", "status": HTTPStatus.OK, }, ], "msearch_results": [utils.get_fixture(self.no_hits_search_rs, to_json=True), utils.get_fixture(self.no_hits_search_rs, to_json=True)], "index_rq": utils.get_fixture(self.launch_w_test_items_w_logs), "expected_count": 0, "expected_issue_type": "", "boost_predict": ([], []) }, { "test_calls": [{"method": httpretty.GET, "uri": "/2", "status": HTTPStatus.NOT_FOUND, }, ], "index_rq": utils.get_fixture(self.launch_w_test_items_w_logs), "expected_count": 0, "expected_issue_type": "", "boost_predict": ([], []) }, { "test_calls": [{"method": httpretty.GET, "uri": "/2", "status": HTTPStatus.OK, }], "msearch_results": [utils.get_fixture(self.no_hits_search_rs, to_json=True), utils.get_fixture(self.one_hit_search_rs, to_json=True)], "index_rq": utils.get_fixture(self.launch_w_test_items_w_logs), "expected_count": 1, "expected_issue_type": "AB001", "boost_predict": ([1], [[0.2, 0.8]]) }, { "test_calls": [{"method": httpretty.GET, "uri": "/2", "status": HTTPStatus.OK, }], "msearch_results": [utils.get_fixture(self.one_hit_search_rs, to_json=True), utils.get_fixture(self.two_hits_search_rs, to_json=True)], "index_rq": utils.get_fixture(self.launch_w_test_items_w_logs), "expected_count": 1, "expected_issue_type": "AB001", "boost_predict": ([1, 0], [[0.2, 0.8], [0.7, 0.3]]) }, { "test_calls": [{"method": httpretty.GET, "uri": "/2", "status": HTTPStatus.OK, }], "msearch_results": [utils.get_fixture(self.two_hits_search_rs, to_json=True), utils.get_fixture(self.three_hits_search_rs, to_json=True)], "index_rq": utils.get_fixture(self.launch_w_test_items_w_logs), "expected_count": 1, "expected_issue_type": "AB001", "boost_predict": ([1, 1], [[0.2, 0.8], [0.3, 0.7]]) }, { "test_calls": [{"method": httpretty.GET, "uri": "/2", "status": HTTPStatus.OK, }], "msearch_results": [utils.get_fixture(self.no_hits_search_rs, to_json=True), utils.get_fixture(self.three_hits_search_rs, to_json=True)], "index_rq": utils.get_fixture(self.launch_w_test_items_w_logs), "expected_count": 1, "expected_issue_type": "PB001", "boost_predict": ([0, 1], [[0.8, 0.2], [0.3, 0.7]]) }, { "test_calls": [{"method": httpretty.GET, "uri": "/2", "status": HTTPStatus.OK, }], "msearch_results": [utils.get_fixture(self.no_hits_search_rs, to_json=True), utils.get_fixture(self.three_hits_search_rs, to_json=True)], "index_rq": utils.get_fixture(self.launch_w_test_items_w_logs), "expected_count": 1, "expected_issue_type": "AB001", "boost_predict": ([1, 0], [[0.2, 0.8], [0.7, 0.3]]) }, { "test_calls": [{"method": httpretty.GET, "uri": "/2", "status": HTTPStatus.OK, }], "msearch_results": [utils.get_fixture(self.two_hits_search_rs, to_json=True)], "index_rq": utils.get_fixture( self.launch_w_test_items_w_logs_to_be_merged), "expected_count": 0, "expected_issue_type": "", "boost_predict": ([], []) }, { "test_calls": [{"method": httpretty.GET, "uri": "/2", "status": HTTPStatus.OK, }], "msearch_results": [ utils.get_fixture(self.no_hits_search_rs, to_json=True), utils.get_fixture(self.three_hits_search_rs_with_one_unique_id, to_json=True)], "index_rq": utils.get_fixture( self.launch_w_test_items_w_logs), "expected_count": 1, "expected_issue_type": "AB001", "boost_predict": ([1], [[0.2, 0.8]]) } ] for idx, test in enumerate(tests): with sure.ensure('Error in the test case number: {0}', idx): self._start_server(test["test_calls"]) config = self.get_default_search_config() analyzer_service = AutoAnalyzerService(app_config=self.app_config, search_cfg=config) _boosting_decision_maker = BoostingDecisionMaker() _boosting_decision_maker.get_feature_ids = MagicMock(return_value=[0]) _boosting_decision_maker.predict = MagicMock(return_value=test["boost_predict"]) if "msearch_results" in test: analyzer_service.es_client.es_client.msearch = MagicMock( return_value={"responses": test["msearch_results"]}) analyzer_service.boosting_decision_maker = _boosting_decision_maker launches = [launch_objects.Launch(**launch) for launch in json.loads(test["index_rq"])] response = analyzer_service.analyze_logs(launches) response.should.have.length_of(test["expected_count"]) if test["expected_issue_type"] != "": test["expected_issue_type"].should.equal(response[0].issueType) if "expected_id" in test: test["expected_id"].should.equal(response[0].relevantItem) TestAutoAnalyzerService.shutdown_server(test["test_calls"])