Exemplo n.º 1
0
    def testFetchDataWithMapData(self):
        evaluations = {
            "0_0": [
                ({
                    "epoch": 1
                }, {
                    "no_fetch_impl_metric": (3.7, 0.5)
                }),
                ({
                    "epoch": 2
                }, {
                    "no_fetch_impl_metric": (3.8, 0.5)
                }),
                ({
                    "epoch": 3
                }, {
                    "no_fetch_impl_metric": (3.9, 0.5)
                }),
                ({
                    "epoch": 4
                }, {
                    "no_fetch_impl_metric": (4.0, 0.5)
                }),
            ],
        }

        self.experiment.add_tracking_metric(metric=MapMetric(
            name="no_fetch_impl_metric"))
        self.experiment.new_trial()
        self.experiment.trials[0].mark_running(no_runner_required=True)
        first_epoch = MapData.from_map_evaluations(
            evaluations={
                arm_name: partial_results[0:1]
                for arm_name, partial_results in evaluations.items()
            },
            trial_index=0,
        )
        self.experiment.attach_data(first_epoch)
        remaining_epochs = MapData.from_map_evaluations(
            evaluations={
                arm_name: partial_results[1:4]
                for arm_name, partial_results in evaluations.items()
            },
            trial_index=0,
        )
        self.experiment.attach_data(remaining_epochs)
        self.experiment.trials[0].mark_completed()

        expected_data = remaining_epochs
        actual_data = self.experiment.lookup_data(
            keep_latest_map_values_only=False)
        self.assertEqual(expected_data, actual_data)
Exemplo n.º 2
0
 def testFetchDataWithMapData(self):
     evaluations = {
         "0_0": [
             ({
                 "epoch": 1
             }, {
                 "tracking": (3.7, 0.5)
             }),
             ({
                 "epoch": 2
             }, {
                 "tracking": (3.8, 0.5)
             }),
             ({
                 "epoch": 3
             }, {
                 "tracking": (3.9, 0.5)
             }),
             ({
                 "epoch": 4
             }, {
                 "tracking": (4.0, 0.5)
             }),
         ],
     }
     self.experiment.new_trial()
     self.experiment.trials[0].mark_running(no_runner_required=True)
     first_epoch = MapData.from_map_evaluations(
         evaluations={
             arm_name: partial_results[0:1]
             for arm_name, partial_results in evaluations.items()
         },
         trial_index=0,
     )
     self.experiment.attach_data(first_epoch)
     remaining_epochs = MapData.from_map_evaluations(
         evaluations={
             arm_name: partial_results[1:4]
             for arm_name, partial_results in evaluations.items()
         },
         trial_index=0,
     )
     self.experiment.attach_data(remaining_epochs)
     self.experiment.trials[0].mark_completed()
     expected_data = MapData.from_map_evaluations(evaluations=evaluations,
                                                  trial_index=0)
     actual_data = self.experiment.fetch_data()
     self.assertEqual(expected_data, actual_data)
Exemplo n.º 3
0
def get_map_data(trial_index: int = 0) -> MapData:
    evaluations = {
        "status_quo": [
            ({"epoch": 1}, {"ax_test_metric": (1.0, 0.5)}),
            ({"epoch": 2}, {"ax_test_metric": (2.0, 0.5)}),
            ({"epoch": 3}, {"ax_test_metric": (3.0, 0.5)}),
            ({"epoch": 4}, {"ax_test_metric": (4.0, 0.5)}),
        ],
        "0_0": [
            ({"epoch": 1}, {"ax_test_metric": (3.7, 0.5)}),
            ({"epoch": 2}, {"ax_test_metric": (3.8, 0.5)}),
            ({"epoch": 3}, {"ax_test_metric": (3.9, 0.5)}),
            ({"epoch": 4}, {"ax_test_metric": (4.0, 0.5)}),
        ],
        "0_1": [
            ({"epoch": 1}, {"ax_test_metric": (3.0, 0.5)}),
            ({"epoch": 2}, {"ax_test_metric": (5.0, 0.5)}),
            ({"epoch": 3}, {"ax_test_metric": (6.0, 0.5)}),
            ({"epoch": 4}, {"ax_test_metric": (1.0, 0.5)}),
        ],
    }
    return MapData.from_map_evaluations(
        evaluations=evaluations,  # pyre-ignore [6]: Spurious param type mismatch.
        trial_index=trial_index,
        map_keys=["epoch"],
    )
Exemplo n.º 4
0
    def testFromMapEvaluations(self):
        map_data = MapData.from_map_evaluations(
            evaluations={
                "0_1": [
                    ({
                        "f1": 1.0,
                        "f2": 0.5
                    }, {
                        "b": (3.7, 0.5)
                    }),
                    ({
                        "f1": 1.0,
                        "f2": 0.75
                    }, {
                        "b": (3.8, 0.5)
                    }),
                ]
            },
            trial_index=0,
        )
        self.assertEqual(len(map_data.df), 2)
        self.assertEqual(map_data.map_keys, ["f1", "f2"])

        with self.assertRaises(ValueError):
            MapData.from_map_evaluations(
                evaluations={
                    "0_1": [
                        ({
                            "f1": 1.0,
                            "f2": 0.5
                        }, {
                            "b": (3.7, 0.5)
                        }),
                        ({
                            "epoch": 1.0,
                            "mc_samples": 0.75
                        }, {
                            "b": (3.8, 0.5)
                        }),
                    ]
                },
                trial_index=0,
            )
Exemplo n.º 5
0
def data_and_evaluations_from_raw_data(
    raw_data: Dict[str, TEvaluationOutcome],
    metric_names: List[str],
    trial_index: int,
    sample_sizes: Dict[str, int],
    start_time: Optional[int] = None,
    end_time: Optional[int] = None,
) -> Tuple[Dict[str, TEvaluationOutcome], AbstractDataFrameData]:
    """Transforms evaluations into Ax Data.

    Each evaluation is either a trial evaluation: {metric_name -> (mean, SEM)}
    or a fidelity trial evaluation for multi-fidelity optimizations:
    [(fidelities, {metric_name -> (mean, SEM)})].

    Args:
        raw_data: Mapping from arm name to raw_data.
        metric_names: Names of metrics used to transform raw data to evaluations.
        trial_index: Index of the trial, for which the evaluations are.
        sample_sizes: Number of samples collected for each arm, may be empty
            if unavailable.
        start_time: Optional start time of run of the trial that produced this
            data, in milliseconds.
        end_time: Optional end time of run of the trial that produced this
            data, in milliseconds.
    """
    evaluations = {
        arm_name: raw_data_to_evaluation(
            raw_data=raw_data[arm_name],
            metric_names=metric_names,
            start_time=start_time,
            end_time=end_time,
        )
        for arm_name in raw_data
    }
    if all(isinstance(evaluations[x], dict) for x in evaluations.keys()):
        # All evaluations are no-fidelity evaluations.
        data = Data.from_evaluations(
            evaluations=cast(Dict[str, TTrialEvaluation], evaluations),
            trial_index=trial_index,
            sample_sizes=sample_sizes,
            start_time=start_time,
            end_time=end_time,
        )
    elif all(isinstance(evaluations[x], list) for x in evaluations.keys()):
        # All evaluations are map evaluations.
        data = MapData.from_map_evaluations(
            evaluations=cast(Dict[str, TMapTrialEvaluation], evaluations),
            trial_index=trial_index,
        )
    else:
        raise ValueError(  # pragma: no cover
            "Evaluations included a mixture of no-fidelity and with-fidelity "
            "evaluations, which is not currently supported."
        )
    return evaluations, data
Exemplo n.º 6
0
    def test_from_map_evaluations(self):
        map_data = MapData.from_map_evaluations(
            evaluations={
                "0_1": [
                    ({
                        "f1": 1.0,
                        "f2": 0.5
                    }, {
                        "b": (3.7, 0.5)
                    }),
                    ({
                        "f1": 1.0,
                        "f2": 0.75
                    }, {
                        "b": (3.8, 0.5)
                    }),
                ]
            },
            trial_index=0,
        )

        self.assertEqual(len(map_data.map_df), 2)
        self.assertEqual(set(map_data.map_keys), {"f1", "f2"})

        with self.assertRaisesRegex(
                ValueError, "Inconsistent map_key sets in evaluations"):
            MapData.from_map_evaluations(
                evaluations={
                    "0_1": [
                        ({
                            "f1": 1.0,
                            "f2": 0.5
                        }, {
                            "b": (3.7, 0.5)
                        }),
                    ]
                },
                map_key_infos=[MapKeyInfo(key="f1", default_value=0.0)],
                trial_index=0,
            )