Esempio n. 1
0
    def setup(self):
        """
        Tests whether the initialization works correctly.
        Tests:
            - optimizer correct
            - minimization correct
            - param_defs correct
        """
        optimizer = "RandomSearch"
        name = "test_init_experiment"
        self.param_defs = {
            "x": MinMaxNumericParamDef(0, 1),
            "name": NominalParamDef(["A", "B", "C"])
        }
        minimization = True

        optimizer_params = {"multiprocessing": "none"}

        exp = experiment.Experiment(name, self.param_defs, minimization)

        self.EAss = ExperimentAssistant(optimizer,
                                        exp,
                                        optimizer_arguments=optimizer_params)

        assert_equal(self.EAss._optimizer.__class__.__name__, optimizer)
        assert_equal(self.EAss._optimizer_arguments, optimizer_params)
        assert_equal(self.EAss._experiment.minimization_problem, minimization)
Esempio n. 2
0
    def init_experiment(self, name, optimizer, param_defs, exp_id=None,
                        notes=None, optimizer_arguments=None,
                        minimization=True):
        """
        Initializes an experiment.

        Parameters
        ----------
        name : string
            name of the experiment.
        optimizer : string
            String representation of the optimizer.
        param_defs : dict of parameter definitions
            Dictionary of parameter definition classes.
        optimizer_arguments : dict, optional
            A dictionary defining the operation of the optimizer. See the
            respective documentation of the optimizers.
            Default is None, which are default values.
        exp_id : string or None, optional
            The id of the experiment, which will be used to reference it.
            Should be a proper uuid, and especially has to be unique. If it is
            not, an error may be returned.
        notes : jsonable object or None, optional
            Any note that you'd like to put in the experiment. Could be used
            to provide some details on the experiment, on the start time or the
            user starting it.
        minimization : bool, optional
            Whether the problem is one of minimization. Defaults to True.

        Returns
        -------
        exp_id : string
            String representing the id of the experiment or "failed" if failed.

        Raises
        ------
        ValueError :
            Iff there already is an experiment with the exp_id for this lab
            assistant. Does not occur if no exp_id is given.
        """
        if exp_id in self._exp_assistants.keys():
            raise ValueError("Already an experiment with id %s registered."
                             %exp_id)

        if exp_id is None:
            while True:
                exp_id = uuid.uuid4().hex
                if exp_id not in self._exp_assistants.keys():
                    break

        exp_ass = ExperimentAssistant(optimizer, optimizer_arguments=optimizer_arguments,
                            write_directory_base=self._lab_run_directory,
                            csv_write_frequency=1)
        exp_ass.init_experiment(name, param_defs, exp_id, notes, minimization)
        self._exp_assistants[exp_id] = exp_ass
        self._logger.info("Experiment initialized successfully.")
        return exp_id
Esempio n. 3
0
    def init_experiment(self, name, optimizer, param_defs, exp_id=None,
                        notes=None, optimizer_arguments=None, minimization=True):
        """
        Initializes a new experiment.
        This actually initializes self.cv many experiments.
        Internally, the experiments are called name_i, where name is the
        experiment_name and i is the number of the experiment.

        Parameters
        ----------
        name : string
            The name of the experiment. This has to be unique.
        optimizer : Optimizer instance or string
            This is an optimizer implementing the corresponding functions: It
            gets an experiment instance, and returns one or multiple candidates
            which should be evaluated next.
            Alternatively, it can be a string corresponding to the optimizer,
            as defined by apsis.utilities.optimizer_utils.
        param_defs : dict of ParamDef.
            This is the parameter space defining the experiment.
        optimizer_arguments : dict, optional
            These are arguments for the optimizer. Refer to their documentation
            as to which are available.
        minimization : bool, optional
            Whether the problem is one of minimization or maximization.
        """
        self._logger.info("Initializing new experiment \"%s\". "
                     " Parameter definitions: %s. Minimization is %s"
                     %(name, param_defs, minimization))
        if exp_id in self._exp_assistants:
            raise ValueError("Already an experiment with id %s registered."
                             %name)
        if exp_id is None:
            while True:
                exp_id = uuid.uuid4().hex
                if exp_id not in self._exp_assistants.keys():
                    break
        self._exp_assistants[exp_id] = []
        self.candidates_pending[exp_id] = []
        for i in range(self.cv):
            exp_ass = ExperimentAssistant(optimizer, optimizer_arguments=optimizer_arguments,
                                     write_directory_base=self._lab_run_directory,
                                     csv_write_frequency=1)
            exp_ass.init_experiment(name + "_" + str(i), param_defs, exp_id,
                                    notes, minimization)
            self._exp_assistants[exp_id].append(exp_ass)
            self.candidates_pending[exp_id].append([])
        self._logger.info("Experiment initialized successfully.")
        return exp_id
    def setup(self):
        """
        Tests whether the initialization works correctly.
        Tests:
            - optimizer correct
            - minimization correct
            - param_defs correct
        """
        optimizer = "RandomSearch"
        name = "test_init_experiment"
        self.param_defs = {
            "x": MinMaxNumericParamDef(0, 1),
            "name": NominalParamDef(["A", "B", "C"])
        }
        minimization = True

        optimizer_params = {
            "multiprocessing": "none"
        }

        self.EAss = ExperimentAssistant(optimizer, optimizer_arguments=optimizer_params)
        self.EAss.init_experiment(name, param_defs=self.param_defs, minimization=minimization)

        assert_equal(self.EAss._optimizer.__class__.__name__, optimizer)
        assert_equal(self.EAss._optimizer_arguments, optimizer_params)
        assert_equal(self.EAss._experiment.minimization_problem, minimization)
    def test_init_experiment(self):
        optimizer = "RandomSearch"
        name = "test_init_experiment"
        self.param_defs = {
            "x": MinMaxNumericParamDef(0, 1),
            "name": NominalParamDef(["A", "B", "C"])
        }
        minimization = True

        optimizer_params = {
            "multiprocessing": "none"
        }
        self.EAss = ExperimentAssistant(optimizer, optimizer_arguments=optimizer_params)
        self.EAss.init_experiment(name, param_defs=self.param_defs, minimization=minimization)

        with assert_raises(ValueError):
            self.EAss.init_experiment(name, param_defs=self.param_defs, minimization=minimization)

        with assert_raises(ValueError):
            self.EAss.set_experiment("this value does not matter.")
Esempio n. 6
0
    def _load_exp_assistant_from_path(self, path):
        """
        This loads a complete exp_assistant from path.

        Specifically, it looks for exp_assistant.json in the path and restores
        optimizer_class, optimizer_arguments and write_dir from this. It then
        loads the experiment from the write_dir/experiment.json, then
        initializes both.

        Parameters
        ----------
        path : string
            The path from which to initialize. This must contain an
            exp_assistant.json as specified.
        """
        self._logger.debug("Loading Exp_assistant from path %s" % path)
        with open(path + "/exp_assistant.json", 'r') as infile:
            exp_assistant_json = json.load(infile)

        optimizer_class = exp_assistant_json["optimizer_class"]
        optimizer_arguments = exp_assistant_json["optimizer_arguments"]
        exp_ass_write_dir = exp_assistant_json["write_dir"]
        ensure_directory_exists(exp_ass_write_dir)
        self._logger.debug(
            "\tLoaded exp_parameters: "
            "optimizer_class: %s, optimizer_arguments: %s,"
            "write_dir: %s" %
            (optimizer_class, optimizer_arguments, exp_ass_write_dir))
        exp = self._load_experiment(path)
        self._logger.debug("\tLoaded Experiment. %s" % exp.to_dict())

        exp_ass = ExperimentAssistant(optimizer_class=optimizer_class,
                                      experiment=exp,
                                      optimizer_arguments=optimizer_arguments,
                                      write_dir=exp_ass_write_dir)

        if exp_ass.exp_id in self._exp_assistants:
            raise ValueError("Loaded exp_id is duplicated in experiment! id "
                             "is %s" % exp_ass.exp_id)
        self._exp_assistants[exp_ass.exp_id] = exp_ass
        self._logger.info("Successfully loaded experiment from %s." % path)
class TestExperimentAssistant(object):
    """
    Tests the experiment assistant.
    """
    EAss = None
    param_defs = None

    def setup(self):
        """
        Tests whether the initialization works correctly.
        Tests:
            - optimizer correct
            - minimization correct
            - param_defs correct
        """
        optimizer = "RandomSearch"
        name = "test_init_experiment"
        self.param_defs = {
            "x": MinMaxNumericParamDef(0, 1),
            "name": NominalParamDef(["A", "B", "C"])
        }
        minimization = True

        optimizer_params = {
            "multiprocessing": "none"
        }

        self.EAss = ExperimentAssistant(optimizer, optimizer_arguments=optimizer_params)
        self.EAss.init_experiment(name, param_defs=self.param_defs, minimization=minimization)

        assert_equal(self.EAss._optimizer.__class__.__name__, optimizer)
        assert_equal(self.EAss._optimizer_arguments, optimizer_params)
        assert_equal(self.EAss._experiment.minimization_problem, minimization)

    def test_init_experiment(self):
        optimizer = "RandomSearch"
        name = "test_init_experiment"
        self.param_defs = {
            "x": MinMaxNumericParamDef(0, 1),
            "name": NominalParamDef(["A", "B", "C"])
        }
        minimization = True

        optimizer_params = {
            "multiprocessing": "none"
        }
        self.EAss = ExperimentAssistant(optimizer, optimizer_arguments=optimizer_params)
        self.EAss.init_experiment(name, param_defs=self.param_defs, minimization=minimization)

        with assert_raises(ValueError):
            self.EAss.init_experiment(name, param_defs=self.param_defs, minimization=minimization)

        with assert_raises(ValueError):
            self.EAss.set_experiment("this value does not matter.")

    def teardown(self):
        self.EAss.set_exit()

    def test_get_next_candidate(self):
        """
        Tests the get next candidate function.
        Tests:
            - The candidate's parameters are acceptable
        """

        cand = None
        counter = 0
        while cand is None and counter < 20:
            cand = self.EAss.get_next_candidate()
            time.sleep(0.1)
            counter += 1
        if counter == 20:
            raise Exception("Received no result in the first 2 seconds.")
        assert_is_none(cand.result)
        params = cand.params
        assert_less_equal(params["x"], 1)
        assert_greater_equal(params["x"], 0)
        assert_in(params["name"], self.param_defs["name"].values)
        self.EAss.update(cand, "pausing")
        time.sleep(1)
        new_cand = None
        while new_cand is None and counter < 20:
            new_cand = self.EAss.get_next_candidate()
            time.sleep(0.1)
            counter += 1
        if counter == 20:
            raise Exception("Received no result in the first 2 seconds.")
        assert_equal(new_cand, cand)


    def test_update(self):
        """
        Tests whether update works.
            - candidate exists in the list
            - result is equal
            - the status message incorrect error works
            - the candidate instance check works
        """
        cand = self.EAss.get_next_candidate()
        cand.result = 1
        self.EAss.update(cand)
        assert_items_equal(self.EAss._experiment.candidates_finished, [cand])
        assert_equal(self.EAss._experiment.candidates_finished[0].result, 1)

        self.EAss.update(cand, "pausing")
        self.EAss.update(cand, "working")
        with assert_raises(ValueError):
            self.EAss.update(cand, status="No status.")

        with assert_raises(ValueError):
            self.EAss.update(False)

    def test_get_best_candidate(self):
        """
        Tests whether get_best_candidate works.
            - Whether the best of the two candidates is the one it should be.
        """
        cand_one = self.EAss.get_next_candidate()
        cand_one.result = 1
        self.EAss.update(cand_one)

        cand_two = self.EAss.get_next_candidate()
        cand_two.result = 0
        self.EAss.update(cand_two)

        assert_equal(cand_two, self.EAss.get_best_candidate())

    def test_all_plots_working(self):
        """
        Tests whether all of the plot functions work. Does not test for correctness.
        """
        cand = self.EAss.get_next_candidate()
        cand.result = 1
        self.EAss.update(cand)

        cand = self.EAss.get_next_candidate()
        cand.result = 0

        cand = self.EAss.get_next_candidate()
        cand.result = 2
        self.EAss.plot_result_per_step()

    def test_get_candidates_dict(self):
        candidates_dict = self.EAss.get_candidates()
        assert_true(isinstance(candidates_dict, dict))
        for l in ["finished", "pending", "working"]:
            assert_in(l, candidates_dict)
            assert_true(isinstance(candidates_dict[l], list))
Esempio n. 8
0
    def init_experiment(self,
                        name,
                        optimizer,
                        param_defs,
                        exp_id=None,
                        notes=None,
                        optimizer_arguments=None,
                        minimization=True):
        """
        Initializes an experiment.

        Parameters
        ----------
        name : string
            name of the experiment.
        optimizer : string
            String representation of the optimizer.
        param_defs : dict of parameter definitions
            Dictionary of parameter definition classes.
        optimizer_arguments : dict, optional
            A dictionary defining the operation of the optimizer. See the
            respective documentation of the optimizers.
            Default is None, which are default values.
        exp_id : string or None, optional
            The id of the experiment, which will be used to reference it.
            Should be a proper uuid, and especially has to be unique. If it is
            not, an error may be returned.
        notes : jsonable object or None, optional
            Any note that you'd like to put in the experiment. Could be used
            to provide some details on the experiment, on the start time or the
            user starting it.
        minimization : bool, optional
            Whether the problem is one of minimization. Defaults to True.

        Returns
        -------
        exp_id : string
            String representing the id of the experiment or "failed" if failed.

        Raises
        ------
        ValueError :
            Iff there already is an experiment with the exp_id for this lab
            assistant. Does not occur if no exp_id is given.
        """
        self._logger.debug("Initializing new experiment. Parameters: "
                           "name: %s, optimizer: %s, param_defs: %s, "
                           "exp_id: %s, notes: %s, optimizer_arguments: %s, "
                           "minimization: %s" %
                           (name, optimizer, param_defs, exp_id, notes,
                            optimizer_arguments, minimization))
        if exp_id in self._exp_assistants.keys():
            raise ValueError("Already an experiment with id %s registered." %
                             exp_id)

        if exp_id is None:
            while True:
                exp_id = uuid.uuid4().hex
                if exp_id not in self._exp_assistants.keys():
                    break
            self._logger.debug("\tGenerated new exp_id: %s" % exp_id)

        if not self._write_dir:
            exp_assistant_write_directory = None
        else:
            exp_assistant_write_directory = os.path.join(self._write_dir +
                                                         "/" + exp_id)
            ensure_directory_exists(exp_assistant_write_directory)
        self._logger.debug("\tExp_ass directory: %s" %
                           exp_assistant_write_directory)

        exp = experiment.Experiment(name, param_defs, exp_id, notes,
                                    minimization)

        exp_ass = ExperimentAssistant(optimizer,
                                      experiment=exp,
                                      optimizer_arguments=optimizer_arguments,
                                      write_dir=exp_assistant_write_directory)
        self._exp_assistants[exp_id] = exp_ass
        self._logger.info("Experiment initialized successfully with id %s." %
                          exp_id)
        self._write_state_to_file()
        return exp_id
Esempio n. 9
0
    def clone_experiments_by_id(self, exp_id, optimizer,
                                  optimizer_arguments, new_exp_name):
        """
        Take an existing experiment managed by this lab assistant,
        fully clone it and store it under a new name to use it with a new
        optimizer. This functionality can be used to initialize several experiments
        of several optimizers with the same points.
        For the given exp_name all underlying experiment instances are cloned and renamed.
        Then a new experiment assistant is instantiated given the cloned and renamed
        experiment using the given optimizer. The new experiment assistants are stored
        and managed inside this lab assistant. The old experiment is not touched
        and continues to be part of this lab assistant.
        The parameter definitions and other experiment specific configuration is
        copied over from the old to the new experiment.

        Parameters
        ----------
        exp_id : string
            The id of the experiment to be cloned.
        new_exp_name: string, optional
            The name the cloned experiment will have after creation. If None,
            the old name is reused.
        optimizer : Optimizer instance or string
            This is an optimizer implementing the corresponding functions: It
            gets an experiment instance, and returns one or multiple candidates
            which should be evaluated next.
            Alternatively, it can be a string corresponding to the optimizer,
            as defined by apsis.utilities.optimizer_utils.
        optimizer_arguments : dict, optional
            These are arguments for the optimizer. Refer to their documentation
            as to which are available.
        """

        while True:
            new_exp_id = uuid.uuid4().hex
            if new_exp_id not in self._exp_assistants.keys():
                break

        self._exp_assistants[new_exp_id] = []


        #every experiment has self.cv many assistants
        for i in range(len(self._exp_assistants[exp_id])):
            old_exp_assistant = self._exp_assistants[exp_id][i]
            old_exp = old_exp_assistant._experiment

            #clone and rename experiment
            new_exp = old_exp.clone()

            new_name_cved = new_exp_name + "_" + str(i)
            new_exp.name = new_name_cved

            #recreate exp assistant
            new_exp_assistant = ExperimentAssistant(optimizer, optimizer_arguments=optimizer_arguments,
                                     write_directory_base=self._lab_run_directory,
                                     csv_write_frequency=1)
            new_exp_assistant.set_experiment(new_exp)
            self._exp_assistants[new_exp_id].append(new_exp_assistant)

        self.candidates_pending[new_exp_id] = copy.deepcopy(self.candidates_pending[exp_id])

        self._logger.info("Experiment " + str(exp_id) + " cloned to " +
                         str(new_exp_id) + " and successfully initialized.")
        return new_exp_id
Esempio n. 10
0
class TestExperimentAssistant(object):
    """
    Tests the experiment assistant.
    """
    EAss = None
    param_defs = None

    def setup(self):
        """
        Tests whether the initialization works correctly.
        Tests:
            - optimizer correct
            - minimization correct
            - param_defs correct
        """
        optimizer = "RandomSearch"
        name = "test_init_experiment"
        self.param_defs = {
            "x": MinMaxNumericParamDef(0, 1),
            "name": NominalParamDef(["A", "B", "C"])
        }
        minimization = True

        optimizer_params = {"multiprocessing": "none"}

        exp = experiment.Experiment(name, self.param_defs, minimization)

        self.EAss = ExperimentAssistant(optimizer,
                                        exp,
                                        optimizer_arguments=optimizer_params)

        assert_equal(self.EAss._optimizer.__class__.__name__, optimizer)
        assert_equal(self.EAss._optimizer_arguments, optimizer_params)
        assert_equal(self.EAss._experiment.minimization_problem, minimization)

    def teardown(self):
        self.EAss.set_exit()

    def test_get_next_candidate(self):
        """
        Tests the get next candidate function.
        Tests:
            - The candidate's parameters are acceptable
        """

        cand = None
        counter = 0
        while cand is None and counter < 20:
            cand = self.EAss.get_next_candidate()
            time.sleep(0.1)
            counter += 1
        if counter == 20:
            raise Exception("Received no result in the first 2 seconds.")
        assert_is_none(cand.result)
        params = cand.params
        assert_less_equal(params["x"], 1)
        assert_greater_equal(params["x"], 0)
        assert_in(params["name"], self.param_defs["name"].values)
        self.EAss.update(cand, "pausing")
        time.sleep(1)
        new_cand = None
        while new_cand is None and counter < 20:
            new_cand = self.EAss.get_next_candidate()
            time.sleep(0.1)
            counter += 1
        if counter == 20:
            raise Exception("Received no result in the first 2 seconds.")
        assert_equal(new_cand, cand)

    def test_update(self):
        """
        Tests whether update works.
            - candidate exists in the list
            - result is equal
            - the status message incorrect error works
            - the candidate instance check works
        """
        cand = self.EAss.get_next_candidate()
        cand.result = 1
        self.EAss.update(cand)
        assert_items_equal(self.EAss._experiment.candidates_finished, [cand])
        assert_equal(self.EAss._experiment.candidates_finished[0].result, 1)

        self.EAss.update(cand, "pausing")
        self.EAss.update(cand, "working")
        with assert_raises(ValueError):
            self.EAss.update(cand, status="No status.")

        with assert_raises(ValueError):
            self.EAss.update(False)

    def test_get_best_candidate(self):
        """
        Tests whether get_best_candidate works.
            - Whether the best of the two candidates is the one it should be.
        """
        cand_one = self.EAss.get_next_candidate()
        cand_one.result = 1
        self.EAss.update(cand_one)

        cand_two = self.EAss.get_next_candidate()
        cand_two.result = 0
        self.EAss.update(cand_two)

        assert_equal(cand_two, self.EAss.get_best_candidate())

    def test_all_plots_working(self):
        """
        Tests whether all of the plot functions work. Does not test for correctness.
        """
        cand = self.EAss.get_next_candidate()
        cand.result = 1
        self.EAss.update(cand)

        cand = self.EAss.get_next_candidate()
        cand.result = 0

        cand = self.EAss.get_next_candidate()
        cand.result = 2
        self.EAss.plot_result_per_step()

    def test_get_candidates_dict(self):
        candidates_dict = self.EAss.get_candidates()
        assert_true(isinstance(candidates_dict, dict))
        for l in ["finished", "pending", "working"]:
            assert_in(l, candidates_dict)
            assert_true(isinstance(candidates_dict[l], list))