예제 #1
0
    def test_add_metric_to_batch_no_modification(self):
        eval_metrics_array, aggregator_value, target_value, metrics_value, observations, doc = \
            self._preperate_dummy_data_terms()

        # Create expected result
        observations["target"] = [target_value]
        observations["aggregator"] = [aggregator_value]
        expected_eval_terms = defaultdict()
        expected_eval_terms[aggregator_value] = defaultdict(list)
        expected_eval_terms[aggregator_value]["metrics"] = [metrics_value]
        expected_eval_terms[aggregator_value]["observations"] = [observations]
        expected_eval_terms[aggregator_value]["raw_docs"] = [doc]

        result = MetricsAnalyzer.add_metric_to_batch(eval_metrics_array,
                                                     aggregator_value,
                                                     target_value,
                                                     metrics_value,
                                                     observations, doc)
        self.assertEqual(result, expected_eval_terms)
예제 #2
0
    def test_evaluate_batch_for_outliers_fetch_remain_metrics(self):
        self.test_settings.change_configuration_path(
            "/app/tests/unit_tests/files/metrics_test_01.conf")
        analyzer = AnalyzerFactory.create(
            "/app/tests/unit_tests/files/use_cases/metrics/metrics_dummy_test.conf"
        )

        eval_metrics_array, aggregator_value, target_value, metrics_value, observations = \
            self._preperate_data_terms_with_doc()
        doc = DummyDocumentsGenerate().generate_document()
        metrics = MetricsAnalyzer.add_metric_to_batch(eval_metrics_array,
                                                      aggregator_value,
                                                      target_value,
                                                      metrics_value,
                                                      observations, doc)

        result = analyzer._evaluate_batch_for_outliers(metrics, False)
        # outliers, not_enough_value, document_need_to_be_recompute
        self.assertEqual(result, ([], metrics))
예제 #3
0
    def test_evaluate_batch_for_outliers_add_outlier(self):
        self.test_settings.change_configuration_path(
            "/app/tests/unit_tests/files/metrics_test_02.conf")
        analyzer = AnalyzerFactory.create(
            "/app/tests/unit_tests/files/use_cases/metrics/metrics_dummy_test_2.conf"
        )

        eval_metrics_array, aggregator_value, target_value, metrics_value, observations = \
            self._preperate_data_terms_with_doc(metrics_value=12)
        doc_without_outlier = copy.deepcopy(doc_without_outlier_test_file)
        self.test_es.add_doc(doc_without_outlier)
        metrics = MetricsAnalyzer.add_metric_to_batch(
            eval_metrics_array, aggregator_value, target_value, metrics_value,
            observations, doc_without_outlier)

        outliers, remaining_metrics = analyzer._evaluate_batch_for_outliers(
            metrics, True)
        analyzer.process_outlier(outliers[0])
        result = [elem for elem in es._scan()][0]
        doc_with_outlier = copy.deepcopy(doc_with_outlier_test_file)
        self.maxDiff = None
        self.assertEqual(result, doc_with_outlier)
예제 #4
0
    def test_add_metric_to_batch_empty(self):
        eval_metrics_array = defaultdict()
        aggregator_value = ""
        target_value = ""
        metrics_value = ""
        observations = {}
        doc = {}
        # Create expected result
        observations["target"] = [target_value]
        observations["aggregator"] = [aggregator_value]
        expected_eval_terms = defaultdict()
        expected_eval_terms[aggregator_value] = defaultdict(list)
        expected_eval_terms[aggregator_value]["metrics"] = [metrics_value]
        expected_eval_terms[aggregator_value]["observations"] = [observations]
        expected_eval_terms[aggregator_value]["raw_docs"] = [doc]

        result = MetricsAnalyzer.add_metric_to_batch(eval_metrics_array,
                                                     aggregator_value,
                                                     target_value,
                                                     metrics_value,
                                                     observations, doc)
        self.assertEqual(result, expected_eval_terms)
예제 #5
0
    def test_remove_metric_from_batch_simple_value(self):
        eval_metrics_array = defaultdict()
        aggregator_value = "agg"
        target_value = "dummy_target"
        metrics_value = "dummy_metric"
        observations = {}
        dummy_doc_gen = DummyDocumentsGenerate()
        doc = dummy_doc_gen.generate_document()

        batch = MetricsAnalyzer.add_metric_to_batch(eval_metrics_array,
                                                    aggregator_value,
                                                    target_value,
                                                    metrics_value,
                                                    observations, doc)
        result = MetricsAnalyzer.remove_metric_from_batch(
            batch[aggregator_value], 0)

        expected_aggregator_value = defaultdict(list)
        expected_aggregator_value["metrics"] = []
        expected_aggregator_value["observations"] = []
        expected_aggregator_value["raw_docs"] = []

        self.assertEqual(result, expected_aggregator_value)