Пример #1
0
 def setUp(self) -> None:
     self.experiment = SimpleExperiment(
         name="test_branin",
         search_space=get_branin_search_space(),
         evaluation_function=sum_evaluation_function,
         objective_name="sum",
     )
     self.arms = [
         Arm(parameters={
             "x1": 0.75,
             "x2": 1
         }),
         Arm(parameters={
             "x1": 2,
             "x2": 7
         }),
         Arm(parameters={
             "x1": 10,
             "x2": 8
         }),
         Arm(parameters={
             "x1": -2,
             "x2": 10
         }),
     ]
Пример #2
0
def simple_experiment_from_json(
        object_json: Dict[str, Any]) -> SimpleExperiment:
    """Load AE SimpleExperiment from JSON."""
    experiment_info = _get_experiment_info(object_json)

    description_json = object_json.pop("description")
    is_test_json = object_json.pop("is_test")
    optimization_config = object_from_json(
        object_json.pop("optimization_config"))
    # not relevant to simple experiment
    del object_json["tracking_metrics"]
    del object_json["runner"]

    kwargs = {k: object_from_json(v) for k, v in object_json.items()}
    kwargs["evaluation_function"] = unimplemented_evaluation_function
    kwargs["objective_name"] = optimization_config.objective.metric.name
    kwargs["minimize"] = optimization_config.objective.minimize
    kwargs["outcome_constraints"] = optimization_config.outcome_constraints

    experiment = SimpleExperiment(**kwargs)
    experiment.description = object_from_json(description_json)
    experiment.is_test = object_from_json(is_test_json)

    _load_experiment_info(exp=experiment, exp_info=experiment_info)
    return experiment
Пример #3
0
    def _init_experiment_from_sqa(self, experiment_sqa: SQAExperiment) -> Experiment:
        """First step of conversion within experiment_from_sqa."""
        opt_config, tracking_metrics = self.opt_config_and_tracking_metrics_from_sqa(
            metrics_sqa=experiment_sqa.metrics
        )
        search_space = self.search_space_from_sqa(
            parameters_sqa=experiment_sqa.parameters,
            parameter_constraints_sqa=experiment_sqa.parameter_constraints,
        )
        if search_space is None:
            raise SQADecodeError(  # pragma: no cover
                "Experiment SearchSpace cannot be None."
            )
        status_quo = (
            Arm(
                parameters=experiment_sqa.status_quo_parameters,
                name=experiment_sqa.status_quo_name,
            )
            if experiment_sqa.status_quo_parameters is not None
            else None
        )
        if len(experiment_sqa.runners) == 0:
            runner = None
        elif len(experiment_sqa.runners) == 1:
            runner = self.runner_from_sqa(experiment_sqa.runners[0])
        else:
            raise ValueError(  # pragma: no cover
                "Multiple runners on experiment "
                "only supported for MultiTypeExperiment."
            )

        subclass = (experiment_sqa.properties or {}).get("subclass")
        if subclass == "SimpleExperiment":
            if opt_config is None:
                raise SQADecodeError(  # pragma: no cover
                    "SimpleExperiment must have an optimization config."
                )
            experiment = SimpleExperiment(
                name=experiment_sqa.name,
                search_space=search_space,
                objective_name=opt_config.objective.metric.name,
                minimize=opt_config.objective.minimize,
                outcome_constraints=opt_config.outcome_constraints,
                status_quo=status_quo,
            )
            experiment.description = experiment_sqa.description
            experiment.is_test = experiment_sqa.is_test
        else:
            experiment = Experiment(
                name=experiment_sqa.name,
                description=experiment_sqa.description,
                search_space=search_space,
                optimization_config=opt_config,
                tracking_metrics=tracking_metrics,
                runner=runner,
                status_quo=status_quo,
                is_test=experiment_sqa.is_test,
            )
        return experiment
Пример #4
0
def get_simple_experiment() -> SimpleExperiment:
    experiment = SimpleExperiment(name="test_branin",
                                  search_space=get_branin_search_space(),
                                  objective_name="sum")

    experiment.description = "foobar"

    return experiment
Пример #5
0
    def testOptionalObjectiveName(self) -> None:
        experiment = SimpleExperiment(
            name="test_branin",
            search_space=get_branin_search_space(),
            evaluation_function=sum_evaluation_function_v2,
        )

        for i in range(len(self.arms)):
            experiment.new_trial(generator_run=GeneratorRun(arms=[self.arms[i]]))
        self.assertFalse(experiment.eval().df.empty)
Пример #6
0
    def testUnimplementedEvaluationFunction(self) -> None:
        experiment = SimpleExperiment(
            name="test_branin",
            search_space=get_branin_search_space(),
            objective_name="sum",
        )
        with self.assertRaises(Exception):
            experiment.evaluation_function(parameterization={})

        experiment.evaluation_function = sum_evaluation_function
Пример #7
0
def simple_experiment_from_json(object_json: Dict[str, Any]) -> SimpleExperiment:
    """Load AE SimpleExperiment from JSON."""
    time_created_json = object_json.pop("time_created")
    trials_json = object_json.pop("trials")
    experiment_type_json = object_json.pop("experiment_type")
    data_by_trial_json = object_json.pop("data_by_trial")
    description_json = object_json.pop("description")
    is_test_json = object_json.pop("is_test")
    optimization_config = object_from_json(object_json.pop("optimization_config"))

    # not relevant to simple experiment
    del object_json["tracking_metrics"]
    del object_json["runner"]

    kwargs = {k: object_from_json(v) for k, v in object_json.items()}
    kwargs["evaluation_function"] = unimplemented_evaluation_function
    kwargs["objective_name"] = optimization_config.objective.metric.name
    kwargs["minimize"] = optimization_config.objective.minimize
    kwargs["outcome_constraints"] = optimization_config.outcome_constraints
    experiment = SimpleExperiment(**kwargs)

    experiment.description = object_from_json(description_json)
    experiment.is_test = object_from_json(is_test_json)
    experiment._time_created = object_from_json(time_created_json)
    experiment._trials = trials_from_json(experiment, trials_json)
    for trial in experiment._trials.values():
        for arm in trial.arms:
            experiment._arms_by_signature[arm.signature] = arm
    if experiment.status_quo is not None:
        sq_sig = experiment.status_quo.signature
        experiment._arms_by_signature[sq_sig] = experiment.status_quo
    experiment._experiment_type = object_from_json(experiment_type_json)
    experiment._data_by_trial = data_from_json(data_by_trial_json)
    return experiment
Пример #8
0
def get_simple_experiment() -> SimpleExperiment:
    experiment = SimpleExperiment(
        name="test_branin",
        search_space=get_branin_search_space(),
        status_quo=Arm(parameters={"x1": 0.0, "x2": 0.0}),
        objective_name="sum",
    )

    experiment.description = "foobar"

    return experiment
Пример #9
0
    def testEvaluationFunctionV4Numpy(self) -> None:
        experiment = SimpleExperiment(
            name="test_branin",
            search_space=get_branin_search_space(),
            objective_name="sum",
            evaluation_function=sum_evaluation_function_v4_numpy,
        )

        for i in range(len(self.arms)):
            experiment.new_trial(generator_run=GeneratorRun(arms=[self.arms[i]]))
        self.assertFalse(experiment.eval().df.empty)
Пример #10
0
 def testDeprecation(self) -> None:
     with patch.object(warnings, "warn") as mock_warn:
         SimpleExperiment(
             name="test_branin",
             search_space=get_branin_search_space(),
             objective_name="sum",
         )
         mock_warn.assert_called_once()
Пример #11
0
 def with_evaluation_function(
     parameters: List[TParameterRepresentation],
     evaluation_function: TEvaluationFunction,
     experiment_name: Optional[str] = None,
     objective_name: Optional[str] = None,
     minimize: bool = False,
     parameter_constraints: Optional[List[str]] = None,
     outcome_constraints: Optional[List[str]] = None,
     total_trials: int = 20,
     arms_per_trial: int = 1,
     wait_time: int = 0,
     random_seed: Optional[int] = None,
     generation_strategy: Optional[GenerationStrategy] = None,
 ) -> "OptimizationLoop":
     """Constructs a synchronous `OptimizationLoop` using an evaluation
     function."""
     exp_parameters = [parameter_from_json(p) for p in parameters]
     parameter_map = {p.name: p for p in exp_parameters}
     experiment = SimpleExperiment(
         name=experiment_name,
         search_space=SearchSpace(
             parameters=exp_parameters,
             parameter_constraints=None
             if parameter_constraints is None else [
                 constraint_from_str(c, parameter_map)
                 for c in parameter_constraints
             ],
         ),
         objective_name=objective_name,
         evaluation_function=evaluation_function,
         minimize=minimize,
         outcome_constraints=[
             outcome_constraint_from_str(c)
             for c in (outcome_constraints or [])
         ],
     )
     return OptimizationLoop(
         experiment=experiment,
         total_trials=total_trials,
         arms_per_trial=arms_per_trial,
         random_seed=random_seed,
         wait_time=wait_time,
         generation_strategy=generation_strategy,
     )
Пример #12
0
    def _init_experiment_from_sqa(self,
                                  experiment_sqa: SQAExperiment) -> Experiment:
        """First step of conversion within experiment_from_sqa."""
        opt_config, tracking_metrics = self.opt_config_and_tracking_metrics_from_sqa(
            metrics_sqa=experiment_sqa.metrics)
        search_space = self.search_space_from_sqa(
            parameters_sqa=experiment_sqa.parameters,
            parameter_constraints_sqa=experiment_sqa.parameter_constraints,
        )
        if search_space is None:
            raise SQADecodeError(  # pragma: no cover
                "Experiment SearchSpace cannot be None.")
        status_quo = (
            Arm(
                # pyre-fixme[6]: Expected `Dict[str, Optional[Union[bool, float,
                #  int, str]]]` for 1st param but got `Optional[Dict[str,
                #  Optional[Union[bool, float, int, str]]]]`.
                parameters=experiment_sqa.status_quo_parameters,
                name=experiment_sqa.status_quo_name,
            ) if experiment_sqa.status_quo_parameters is not None else None)
        if len(experiment_sqa.runners) == 0:
            runner = None
        elif len(experiment_sqa.runners) == 1:
            runner = self.runner_from_sqa(experiment_sqa.runners[0])
        else:
            raise ValueError(  # pragma: no cover
                "Multiple runners on experiment "
                "only supported for MultiTypeExperiment.")

        # `experiment_sqa.properties` is `sqlalchemy.ext.mutable.MutableDict`
        # so need to convert it to regular dict.
        properties = dict(experiment_sqa.properties or {})
        # Remove 'subclass' from experiment's properties, since its only
        # used for decoding to the correct experiment subclass in storage.
        subclass = properties.pop(Keys.SUBCLASS, None)
        default_data_type = experiment_sqa.default_data_type
        if subclass == "SimpleExperiment":
            if opt_config is None:
                raise SQADecodeError(  # pragma: no cover
                    "SimpleExperiment must have an optimization config.")
            experiment = SimpleExperiment(
                name=experiment_sqa.name,
                search_space=search_space,
                objective_name=opt_config.objective.metric.name,
                minimize=opt_config.objective.minimize,
                outcome_constraints=opt_config.outcome_constraints,
                status_quo=status_quo,
                properties=properties,
                default_data_type=default_data_type,
            )
            experiment.description = experiment_sqa.description
            experiment.is_test = experiment_sqa.is_test
        else:
            experiment = Experiment(
                name=experiment_sqa.name,
                description=experiment_sqa.description,
                search_space=search_space,
                optimization_config=opt_config,
                tracking_metrics=tracking_metrics,
                runner=runner,
                status_quo=status_quo,
                is_test=experiment_sqa.is_test,
                properties=properties,
                default_data_type=default_data_type,
            )
        return experiment
Пример #13
0
def main():

    args = parse_args()

    function_list = [get_function_by_name[name]
            for name in args.function_name_list.split(',')]
    weight_list = list(map(float, args.weight_list.split(',')))
    covariance_matrix = json.loads(args.covariance_matrix)
    evaluate_covariance = args.evaluate_covariance

    init_iter = args.init_iter
    # if init_iter > 1:
    #     raise ValueError("init_iter should be 1.")
    init_batch_size = args.init_batch_size
    update_iter = args.update_iter
    batch_size = args.batch_size
    var_coef = args.var_coef

    var_compute_type = args.var_compute_type
    num_random = args.num_random
    num_bucket = args.num_bucket

    save_path = args.save_path

    # num_control = args.num_control

    minimize = True

    groundtruth_function = get_groundtruth_function(function_list, weight_list)
    #evaluation_function = get_evaluation_function(
    #        function_list, weight_list, covariance_matrix,
    #        evaluate_covariance, var_coef)
    evaluation_function = get_evaluation_function(
            function_list, weight_list, covariance_matrix,
            var_compute_type, num_random, num_bucket)

    exp = SimpleExperiment(
        name=args.function_name_list + args.weight_list,
        search_space=get_search_space(function_list),
        evaluation_function=evaluation_function,
        objective_name="objective_name",
        minimize=minimize,
    )
    t_start = time.time()
    print(f"Start time: {t_start}")
    print(f"Sobol iteration begin...{time.time() - t_start}")
    sobol = Models.SOBOL(exp.search_space)
    for i in range(init_iter):
        if init_batch_size == 1:
            exp.new_trial(generator_run=sobol.gen(init_batch_size))
        else:
            exp.new_batch_trial(generator_run=sobol.gen(init_batch_size))
        print(f"Running sobol optimization trial {i+1}/{init_iter}..."
              f"{time.time() - t_start}")
    print(f"GPEI iteration begin...{time.time() - t_start}")
    for i in range(update_iter):
        gpei = Models.BOTORCH(experiment=exp, data=exp.eval())
        if batch_size == 1:
            exp.new_trial(generator_run=gpei.gen(batch_size))
        else:
            exp.new_batch_trial(generator_run=gpei.gen(batch_size))
        print(f"Running GPEI optimization trial {i+1}/{update_iter}..."
              f"{time.time() - t_start}")

    # Construct Result.
    ## origin data.
    data_df = copy.deepcopy(exp.eval().df)
    compare_func = min if minimize else max
    
    arm_name2mean = {}
    for _, row in data_df.iterrows():
        arm_name2mean[row["arm_name"]] = row["mean"]
    ## parameters true_mean.
    other_columns = {
        "arm_name": [], "parameters": [], "true_mean": [],
        "cur_trial_best_mean": [], "accum_trials_best_mean": []}
    atbm = None # accum_trial_best_mean
    for trial in exp.trials.values():
        ctbm = None # cur_trial_best_mean
        for arm in trial.arms:
            other_columns['arm_name'].append(arm.name)
            other_columns['parameters'].append(json.dumps(arm.parameters))
            other_columns['true_mean'].append(
                    groundtruth_function(arm.parameters))
            if ctbm is None:
                ctbm = arm_name2mean[arm.name]
            ctbm = compare_func(ctbm, arm_name2mean[arm.name])
        if atbm is None:
            atbm = ctbm
        atbm = compare_func(atbm, ctbm)
        other_columns['cur_trial_best_mean'].extend([ctbm] * len(trial.arms))
        other_columns['accum_trials_best_mean'].extend([atbm] * len(trial.arms))
    other_df = DataFrame(other_columns)

    result_df = data_df.set_index('arm_name').join(
            other_df.set_index('arm_name')).reset_index()
    
    # Save to file.
    print("Save to file.")
    sub_dir_name = "_".join([
        "ax", args.function_name_list.replace(",", "_"),
        args.weight_list.replace(",", "_"), args.covariance_matrix.replace(
            "[", "_").replace("]", "_").replace(",", "_").replace(" ", ""),
        str(args.evaluate_covariance), str(args.init_iter), str(init_batch_size),
        str(args.update_iter), str(args.batch_size), str(args.var_coef),
        str(minimize), str(var_compute_type), str(num_random), str(num_bucket)
        ])
    abs_dir_path = os.path.join(save_path, sub_dir_name)
    Path(abs_dir_path).mkdir(parents=True, exist_ok=True)
    task_id = os.environ.get('TASK_INDEX')
    cur_time = pd.Timestamp.now().strftime('%Y%m%d%H%M%S')
    filename = cur_time + "_" + str(task_id) + ".csv"
    print(os.path.join(abs_dir_path, filename))
    result_df.to_csv(os.path.join(abs_dir_path, filename))
    print("2021-01-19 19:48:00")
    print("Done...")
Пример #14
0
class SimpleExperimentTest(TestCase):
    def setUp(self) -> None:
        self.experiment = SimpleExperiment(
            name="test_branin",
            search_space=get_branin_search_space(),
            evaluation_function=sum_evaluation_function,
            objective_name="sum",
        )
        self.arms = [
            Arm(parameters={"x1": 0.75, "x2": 1}),
            Arm(parameters={"x1": 2, "x2": 7}),
            Arm(parameters={"x1": 10, "x2": 8}),
            Arm(parameters={"x1": -2, "x2": 10}),
        ]

    def testBasic(self) -> None:
        self.assertTrue(self.experiment.is_simple_experiment)
        trial = self.experiment.new_trial()
        with self.assertRaises(NotImplementedError):
            trial.runner = SyntheticRunner()
        with self.assertRaises(NotImplementedError):
            self.experiment.add_tracking_metric(Metric(name="test"))
        with self.assertRaises(NotImplementedError):
            self.experiment.update_tracking_metric(Metric(name="test"))
        self.assertTrue(self.experiment.eval_trial(trial).df.empty)
        batch = self.experiment.new_batch_trial()
        batch.add_arm(Arm(parameters={"x1": 5, "x2": 10}))
        self.assertEqual(self.experiment.eval_trial(batch).df["mean"][0], 15)
        self.experiment.new_batch_trial().add_arm(Arm(parameters={"x1": 15, "x2": 25}))
        self.assertAlmostEqual(self.experiment.eval().df["mean"][1], 40)
        self.assertEqual(batch.fetch_data().df["mean"][0], 15)
        self.assertAlmostEqual(self.experiment.fetch_data().df["mean"][1], 40)

    def testTrial(self) -> None:
        for i in range(len(self.arms)):
            self.experiment.new_trial(generator_run=GeneratorRun(arms=[self.arms[i]]))
        self.assertFalse(self.experiment.eval().df.empty)

    def testUnimplementedEvaluationFunction(self) -> None:
        experiment = SimpleExperiment(
            name="test_branin",
            search_space=get_branin_search_space(),
            objective_name="sum",
        )
        with self.assertRaises(Exception):
            experiment.evaluation_function(parameterization={})

        experiment.evaluation_function = sum_evaluation_function

    def testEvaluationFunctionNumpy(self) -> None:
        experiment = SimpleExperiment(
            name="test_branin",
            search_space=get_branin_search_space(),
            objective_name="sum",
            evaluation_function=sum_evaluation_function_numpy,
        )

        for i in range(len(self.arms)):
            experiment.new_trial(generator_run=GeneratorRun(arms=[self.arms[i]]))
        self.assertFalse(experiment.eval().df.empty)

    def testEvaluationFunctionV2(self) -> None:
        experiment = SimpleExperiment(
            name="test_branin",
            search_space=get_branin_search_space(),
            objective_name="sum",
            evaluation_function=sum_evaluation_function_v2,
        )

        for i in range(len(self.arms)):
            experiment.new_trial(generator_run=GeneratorRun(arms=[self.arms[i]]))
        self.assertFalse(experiment.eval().df.empty)

    def testEvaluationFunctionV2Numpy(self) -> None:
        experiment = SimpleExperiment(
            name="test_branin",
            search_space=get_branin_search_space(),
            objective_name="sum",
            evaluation_function=sum_evaluation_function_v2_numpy,
        )

        for i in range(len(self.arms)):
            experiment.new_trial(generator_run=GeneratorRun(arms=[self.arms[i]]))
        self.assertFalse(experiment.eval().df.empty)

    def testEvaluationFunctionV3(self) -> None:
        experiment = SimpleExperiment(
            name="test_branin",
            search_space=get_branin_search_space(),
            objective_name="sum",
            evaluation_function=sum_evaluation_function_v3,
        )

        for i in range(len(self.arms)):
            experiment.new_trial(generator_run=GeneratorRun(arms=[self.arms[i]]))
        self.assertFalse(experiment.eval().df.empty)

    def testEvaluationFunctionV3Numpy(self) -> None:
        experiment = SimpleExperiment(
            name="test_branin",
            search_space=get_branin_search_space(),
            objective_name="sum",
            evaluation_function=sum_evaluation_function_v3_numpy,
        )

        for i in range(len(self.arms)):
            experiment.new_trial(generator_run=GeneratorRun(arms=[self.arms[i]]))
        self.assertFalse(experiment.eval().df.empty)

    def testEvaluationFunctionV4(self) -> None:
        experiment = SimpleExperiment(
            name="test_branin",
            search_space=get_branin_search_space(),
            objective_name="sum",
            evaluation_function=sum_evaluation_function_v4,
        )

        for i in range(len(self.arms)):
            experiment.new_trial(generator_run=GeneratorRun(arms=[self.arms[i]]))
        self.assertFalse(experiment.eval().df.empty)

    def testEvaluationFunctionV4Numpy(self) -> None:
        experiment = SimpleExperiment(
            name="test_branin",
            search_space=get_branin_search_space(),
            objective_name="sum",
            evaluation_function=sum_evaluation_function_v4_numpy,
        )

        for i in range(len(self.arms)):
            experiment.new_trial(generator_run=GeneratorRun(arms=[self.arms[i]]))
        self.assertFalse(experiment.eval().df.empty)

    def testOptionalObjectiveName(self) -> None:
        experiment = SimpleExperiment(
            name="test_branin",
            search_space=get_branin_search_space(),
            evaluation_function=sum_evaluation_function_v2,
        )

        for i in range(len(self.arms)):
            experiment.new_trial(generator_run=GeneratorRun(arms=[self.arms[i]]))
        self.assertFalse(experiment.eval().df.empty)
Пример #15
0
    def test_REMBOStrategy(self, mock_fit_gpytorch_model, mock_optimize_acqf):
        # Construct a high-D test experiment with multiple metrics
        hartmann_search_space = SearchSpace(parameters=[
            RangeParameter(
                name=f"x{i}",
                parameter_type=ParameterType.FLOAT,
                lower=0.0,
                upper=1.0,
            ) for i in range(20)
        ])

        exp = SimpleExperiment(
            name="test",
            search_space=hartmann_search_space,
            evaluation_function=hartmann_evaluation_function,
            objective_name="hartmann6",
            minimize=True,
            outcome_constraints=[
                OutcomeConstraint(
                    metric=L2NormMetric(
                        name="l2norm",
                        param_names=[f"x{i}" for i in range(6)],
                        noise_sd=0.2,
                    ),
                    op=ComparisonOp.LEQ,
                    bound=1.25,
                    relative=False,
                )
            ],
        )

        # Instantiate the strategy
        gs = REMBOStrategy(D=20, d=6, k=4, init_per_proj=4)

        # Check that arms and data are correctly segmented by projection
        exp.new_batch_trial(generator_run=gs.gen(experiment=exp, n=2))
        self.assertEqual(len(gs.arms_by_proj[0]), 2)
        self.assertEqual(len(gs.arms_by_proj[1]), 0)

        exp.new_batch_trial(generator_run=gs.gen(experiment=exp, n=2))

        self.assertEqual(len(gs.arms_by_proj[0]), 2)
        self.assertEqual(len(gs.arms_by_proj[1]), 2)

        # Iterate until the first projection fits a GP
        for _ in range(4):
            exp.new_batch_trial(generator_run=gs.gen(experiment=exp, n=2))
            mock_fit_gpytorch_model.assert_not_called()

        self.assertEqual(len(gs.arms_by_proj[0]), 4)
        self.assertEqual(len(gs.arms_by_proj[1]), 4)
        self.assertEqual(len(gs.arms_by_proj[2]), 2)
        self.assertEqual(len(gs.arms_by_proj[3]), 2)

        # Keep iterating until GP is used for gen
        for i in range(4):
            # First two trials will go towards 3rd and 4th proj. getting enough
            if i < 1:  # data for GP.
                self.assertLess(len(gs.arms_by_proj[2]), 4)
            if i < 2:
                self.assertLess(len(gs.arms_by_proj[3]), 4)

            exp.new_batch_trial(generator_run=gs.gen(experiment=exp, n=2))
            if i < 2:
                mock_fit_gpytorch_model.assert_not_called()
            else:
                # After all proj. have > 4 arms' worth of data, GP can be fit.
                self.assertFalse(
                    any(len(x) < 4 for x in gs.arms_by_proj.values()))
                mock_fit_gpytorch_model.assert_called()

        self.assertTrue(len(gs.model_transitions) > 0)
        gs2 = gs.clone_reset()
        self.assertEqual(gs2.D, 20)
        self.assertEqual(gs2.d, 6)
Пример #16
0
    def experiment_from_sqa(self, experiment_sqa: SQAExperiment) -> Experiment:
        """Convert SQLAlchemy Experiment to Ax Experiment."""
        opt_config, tracking_metrics = self.opt_config_and_tracking_metrics_from_sqa(
            metrics_sqa=experiment_sqa.metrics)
        search_space = self.search_space_from_sqa(
            parameters_sqa=experiment_sqa.parameters,
            parameter_constraints_sqa=experiment_sqa.parameter_constraints,
        )
        if search_space is None:
            raise SQADecodeError(  # pragma: no cover
                "Experiment SearchSpace cannot be None.")
        runner = (self.runner_from_sqa(experiment_sqa.runner)
                  if experiment_sqa.runner else None)
        status_quo = (Arm(
            parameters=experiment_sqa.status_quo_parameters,
            name=experiment_sqa.status_quo_name,
        ) if experiment_sqa.status_quo_parameters is not None else None)

        if (experiment_sqa.properties is not None
                and experiment_sqa.properties.get("subclass")
                == "SimpleExperiment"):
            if opt_config is None:
                raise SQADecodeError(  # pragma: no cover
                    "SimpleExperiment must have an optimization config.")
            experiment = SimpleExperiment(
                name=experiment_sqa.name,
                search_space=search_space,
                objective_name=opt_config.objective.metric.name,
                minimize=opt_config.objective.minimize,
                outcome_constraints=opt_config.outcome_constraints,
                status_quo=status_quo,
            )
            experiment.description = experiment_sqa.description
            experiment.is_test = experiment_sqa.is_test
        else:
            experiment = Experiment(
                name=experiment_sqa.name,
                description=experiment_sqa.description,
                search_space=search_space,
                optimization_config=opt_config,
                tracking_metrics=tracking_metrics,
                runner=runner,
                status_quo=status_quo,
                is_test=experiment_sqa.is_test,
            )

        trials = [
            self.trial_from_sqa(trial_sqa=trial, experiment=experiment)
            for trial in experiment_sqa.trials
        ]

        data_by_trial = defaultdict(dict)
        for data_sqa in experiment_sqa.data:
            trial_index = data_sqa.trial_index
            timestamp = data_sqa.time_created
            data_by_trial[trial_index][timestamp] = self.data_from_sqa(
                data_sqa=data_sqa)
        data_by_trial = {
            trial_index: OrderedDict(sorted(data_by_timestamp.items()))
            for trial_index, data_by_timestamp in data_by_trial.items()
        }

        experiment._trials = {trial.index: trial for trial in trials}
        for trial in trials:
            for arm in trial.arms:
                experiment._arms_by_signature[arm.signature] = arm
        if experiment.status_quo is not None:
            sq_sig = experiment.status_quo.signature
            experiment._arms_by_signature[sq_sig] = experiment.status_quo
        experiment._time_created = experiment_sqa.time_created
        experiment._experiment_type = self.get_enum_name(
            value=experiment_sqa.experiment_type,
            enum=self.config.experiment_type_enum)
        experiment._data_by_trial = dict(data_by_trial)

        return experiment