Beispiel #1
0
def from_dict(d):
    experiment_logger = logging_utils.get_logger("models.Experiment")
    experiment_logger.log(5, "Reconstructing experiment from dict %d", d)
    name = d["name"]
    param_defs = dict_to_param_defs(d["parameter_definitions"])
    minimization_problem = d["minimization_problem"]
    notes = d["notes"]
    exp_id = d["exp_id"]
    experiment_logger.debug("Reconstructed attributes.")
    cand_dict_finished = d["candidates_finished"]
    cands_finished = []
    for c in cand_dict_finished:
        cands_finished.append(candidate.from_dict(c))
    cand_dict_pending = d["candidates_pending"]
    cands_pending = []
    for c in cand_dict_pending:
        cands_pending.append(candidate.from_dict(c))
    cand_dict_working = d["candidates_working"]
    cands_working = []
    for c in cand_dict_working:
        cands_working.append(candidate.from_dict(c))
    experiment_logger.log(5, "Reconstructed candidates.")
    best_candidate = d["best_candidate"]

    exp = Experiment(name, param_defs, exp_id, notes, minimization_problem)

    exp.candidates_finished = cands_finished
    exp.candidates_pending = cands_pending
    exp.candidates_working = exp.candidates_working
    exp._update_best()
    exp.last_update_time = d.get("last_update_time", time.time())

    experiment_logger.log(5, "Finished reconstruction. Exp is %s.", exp)

    return exp
Beispiel #2
0
def from_dict(d):
    """
    Builds a new candidate from a dictionary.

    Parameters
    ----------
    cand_dict : dictionary
        Uses the same format as in Candidate.to_dict.

    Returns
    -------
    c : Candidate
        The corresponding candidate.
    """
    cand_logger = get_logger("models.Candidate")
    cand_logger.log(5, "Constructing new candidate from dict %s.", d)
    cand_id = None
    if "cand_id" in d:
        cand_id = d["cand_id"]
    c = Candidate(d["params"], cand_id=cand_id)
    c.result = d.get("result", None)
    c.cost = d.get("cost", None)
    c.failed = d.get("failed", False)
    c.last_update_time = d.get("last_update_time")
    c.generated_time = d.get("generated_time")
    c.worker_information = d.get("worker_information", None)
    cand_logger.log(5, "Constructed candidate is %s", c)
    return c
Beispiel #3
0
    def __init__(self, name, optimizer, param_defs, experiment=None, optimizer_arguments=None,
                 minimization=True, write_directory_base="/tmp/APSIS_WRITING",
                 experiment_directory_base=None, csv_write_frequency=1):
        """
        Initializes the BasicExperimentAssistant.

        Parameters
        ----------
        name : string
            The name of the experiment. This does not have to be unique, but is
            for human orientation.
        optimizer : Optimizer instance or string
            This is an optimizer implementing the corresponding functions: It
            gets an experiment instance, and returns one or multiple candidates
            which should be evaluated next.
            Alternatively, it can be a string corresponding to the optimizer,
            as defined by apsis.utilities.optimizer_utils.
        param_defs : dict of ParamDef.
            This is the parameter space defining the experiment.
        experiment : Experiment
            Preinitialize this assistant with an existing experiment.
        optimizer_arguments=None : dict
            These are arguments for the optimizer. Refer to their documentation
            as to which are available.
        minimization=True : bool
            Whether the problem is one of minimization or maximization.
        write_directory_base : string, optional
            The global base directory for all writing. Will only be used
            for creation of experiment_directory_base if this is not given.
        experiment_directory_base : string or None, optional
            The directory to write all the results to. If not
            given a directory with timestamp will automatically be created
            in write_directory_base
        csv_write_frequency : int, optional
            States how often the csv file should be written to.
            If set to 0 no results will be written.
        """
        self.logger = get_logger(self)
        self.logger.info("Initializing experiment assistant.")
        self.optimizer = optimizer
        self.optimizer_arguments = optimizer_arguments

        if experiment is None:
            self.experiment = Experiment(name, param_defs, minimization)
        else:
            self.experiment = experiment

        self.csv_write_frequency = csv_write_frequency

        if self.csv_write_frequency != 0:
            self.write_directory_base = write_directory_base
            if experiment_directory_base is not None:
                self.experiment_directory_base = experiment_directory_base
                ensure_directory_exists(self.experiment_directory_base)
            else:
                self._create_experiment_directory()
        self.logger.info("Experiment assistant successfully initialized.")
Beispiel #4
0
    def __init__(self, params=None):
        self.logger = get_logger(self, specific_log_name=self.LOG_FILE_NAME)

        self.params = params

        if self.params is None:
            self.params = {}

        self.optimization_random_steps = self.params.get(
            "optimization_random_steps", 1000)
    def __init__(self, params=None):
        self.logger = get_logger(self, specific_log_name=self.LOG_FILE_NAME)

        self.params = params

        if self.params is None:
            self.params = {}

        self.optimization_random_steps = self.params.get(
            "optimization_random_steps", 1000)
Beispiel #6
0
    def __init__(self, experiment, optimizer_params):
        """
        Initializes the optimizer.

        Parameters
        ----------
        experiment : Experiment
            The experiment representing the current state of the execution.
        optimizer_params : dict, optional
            Dictionary of the optimizer parameters. If None, some standard
            parameters will be assumed.
            One existing parameter is 'treat_failed', which changes the
            treatment of failed candidates. Possible is
                - 'ignore': Ignores the failed parameters
                - 'fixed_value': Changes all occurences of failed values
                    to a fixed value. Default value is 1e6 or 1e-6.
                - 'worst_mult': Changes all occurences of failed values to a
                    fixed multiplied of the worst result. Specifically, the
                    new value will be (worst_value - best_value) * x +
                    worst_value. The default. Default value is x=10.
            All of these can either be specified as strings (and use a
            standard value) or as tuples, in which case the first entry is
            treated as the string and the second as the value for the parameter.

        Raises
        ------
        ValueError
            Iff the experiment is not supported.
        """
        self._logger = logging_utils.get_logger(self)
        if not self._is_experiment_supported(experiment):
            raise ValueError(
                "Experiment contains unsupported parameters. "
                "Optimizer %s supports %s, experiment parameters "
                "are %s." %
                (self.__class__.__name__, self.SUPPORTED_PARAM_TYPES,
                 experiment.parameter_definitions))
        self._experiment = experiment
        if optimizer_params is None:
            optimizer_params = {}
        self.treat_failed = optimizer_params.get("treat_failed", "worst_mult")
        second_value = None
        if not isinstance(self.treat_failed, tuple):
            if self.treat_failed == "ignore":
                second_value = False
            elif self.treat_failed == "fixed_value":
                if self._experiment.minimization_problem:
                    second_value = 1e6
                else:
                    second_value = 1e-6
            elif self.treat_failed == "worst_mult":
                second_value = 2
            self.treat_failed = (self.treat_failed, second_value)
Beispiel #7
0
 def test_init(self):
     """
     Tests whether the initialization works correctly.
     Tests:
         - Whether the directory for writing is correct
         - _exp_assistants is empty
         - logger name is correctly set.
     """
     if os.name == "nt":
         assert_equal(self.LAss._write_directory_base, "/tmp/APSIS_WRITING")
     assert_items_equal(self.LAss._exp_assistants, {})
     assert_equal(self.LAss._logger,
                  get_logger("apsis.assistants.lab_assistant.LabAssistant"))
 def test_init(self):
     """
     Tests whether the initialization works correctly.
     Tests:
         - Whether the directory for writing is correct
         - exp_assistants is empty
         - logger name is correctly set.
     """
     LAss = PrettyLabAssistant()
     assert_equal(LAss.write_directory_base, "/tmp/APSIS_WRITING")
     assert_items_equal(LAss.exp_assistants, {})
     assert_equal(LAss.logger,
                  get_logger("apsis.assistants.lab_assistant.PrettyLabAssistant"))
    def __init__(self, optimizer_class, optimizer_arguments=None,
                 write_directory_base=None, experiment_directory=None,
                 csv_write_frequency=1):
        """
        Initializes this experiment assistant.

        Note that calling this function does not yet create an experiment, for
        that, use init_experiment. If there is an already existing experiment,
        you can just set self._experiment.

        Parameters
        ----------
        optimizer_class : subclass of Optimizer
            The class of the optimizer, used to initialize it.
        optimizer_arguments : dict, optional
            The dictionary of optimizer arguments. If None, default values will
            be used.
        experiment_directory_base : string, optional
            The folder to which the csv intermediary results and the plots will
            be written. Default is <write_directory_base>/exp_id.
        write_directory_base : string, optional
            The base directory. In the default case, this is dependant on the
            OS. On windows, it is set to ./APSIS_WRITING/. On Linux,
            it is set to /tmp/APSIS_WRITING/. If an
            experiment_directory has been given, this will be ignored.
        csv_write_frequency : int, optional
            This sets the frequency with which the csv file is written. If set
            to 1 (the default), it writes every step. If set to 2, every second
            and so on. Note that it still writes out every step eventually.
        """
        self._logger = get_logger(self)
        self._logger.info("Initializing experiment assistant.")
        self._csv_write_frequency = csv_write_frequency
        self._optimizer = optimizer_class
        self._optimizer_arguments = optimizer_arguments
        if self._csv_write_frequency != 0:
            if experiment_directory is not None:
                self._experiment_directory_base = experiment_directory
                ensure_directory_exists(self._experiment_directory_base)
            else:
                if write_directory_base is None:
                    if os.name == "nt":
                        self._write_directory_base = \
                            os.path.relpath("APSIS_WRITING")
                    else:
                        self._write_directory_base = "/tmp/APSIS_WRITING"
                else:
                    self._write_directory_base = write_directory_base
        self._logger.info("Experiment assistant for successfully "
                         "initialized.")
Beispiel #10
0
def start_apsis(port=5000, validation=False, cv=5):
    """
    Starts apsis.

    Initializes logger, LabAssistant and the REST app.
    """
    global lAss, _logger
    _logger = get_logger("REST_interface")
    if validation:
        lAss = ValidationLabAssistant(cv=cv)
    else:
        lAss = LabAssistant()
    app.run(host='0.0.0.0', debug=False, port=port)
    _logger.info("Finished initialization. Interface running now.")
Beispiel #11
0
    def __init__(self,
                 optimizer_class,
                 experiment,
                 out_queue,
                 in_queue,
                 optimizer_params=None):
        """
        Initializes this backend.

        Parameters
        ----------
        optimizer_class : an Optimizer subclass
            The class of optimizer this should abstract from. The optimizer is
            then initialized here.
        experiment : Experiment
            The experiment representing the current state of the execution.
        optimizer_params : dict, optional
            Dictionary of the optimizer parameters. If None, some standard
            parameters will be assumed.
            Supports the parameter "min_candidates", which sets the number
            of candidates that should be kept ready.
        out_queue : Queue
            The queue on which to put the candidates.
        in_queue : Queue
            The queue on which to receive the new experiments.
        """
        self._logger = logging_utils.get_logger(self)
        self._logger.debug(
            "Initializing queue backend. Parameters: "
            "optimizer_class %s, experiment %s, out_queue %s, "
            "in_queue %s, optimizer_params %s", optimizer_class, experiment,
            out_queue, in_queue, optimizer_params)

        self._out_queue = out_queue
        self._in_queue = in_queue
        if optimizer_params is None:
            optimizer_params = {}
        self._min_candidates = optimizer_params.get("min_candidates", 5)
        self._update_time = optimizer_params.get("update_time", 0.1)
        self._optimizer = optimizer_class(experiment, optimizer_params)
        self._exited = False
        self._experiment = experiment
        self._logger.debug(
            "Had set the parameters to: out_queue is %s, "
            "in_queue %s, optimizer_params %s, "
            "min_candidates %s, update_time %s,"
            " optimizer %s, exited %s, experiment %s", out_queue, in_queue,
            optimizer_params, self._min_candidates, self._update_time,
            self._optimizer, self._exited, self._experiment)
    def __init__(self, params=None):
        """
        Initializes the acquisition function.

        Parameters
        ----------
        params : dict or None, optional
            The dictionary of parameters defining the behaviour of the
            acquisition function. Supports at least max_searcher and
            multi_searcher.
        """
        self._logger = get_logger(self, specific_log_name=self.LOG_FILE_NAME)
        if params is None:
            params = {}
        self.params = params
Beispiel #13
0
    def __init__(self, write_directory_base="/tmp/APSIS_WRITING"):
        """
        Initializes the lab assistant with no experiments.

        Parameters
        ----------
        write_directory_base : String, optional
            The directory to write all the results and plots to.
        """
        self.exp_assistants = {}
        self.logger = get_logger(self)
        self.logger.info("Initializing laboratory assistant.")
        self.write_directory_base = write_directory_base
        self.global_start_date = time.time()

        self._init_directory_structure()
        self.logger.info("laboratory assistant successfully initialized.")
Beispiel #14
0
    def __init__(self, params, cand_id=None, worker_information=None):
        """
        Initializes the unevaluated candidate object.

        Parameters
        ----------
        params : dict of string keys
            A dictionary of parameter value. The keys must correspond to the
            problem definition.
            The dictionary requires one key - and value - per parameter
            defined.
        cand_id : uuid.UUID, optional
            The uuid identifying this candidate. This is used to compare
            candidates over server and client borders.
            Note that this should only be set explicitly if you are
            instantiating an already known candidate with its already known
            UUID. Do not explicitly set the uuid for a new candidate!
        worker_information : string, optional
            This is worker-settable information which might be used for
            communicating things necessary for resuming evaluations et cetera.

        Raises
        ------
        ValueError
            Iff params is not a dictionary.
        """
        if cand_id is None:
            cand_id = uuid.uuid4().hex
        self.cand_id = cand_id
        self._logger = get_logger(self, extra_info="cand_id " + str(cand_id))
        self._logger.debug(
            "Initializing new candidate. Params %s, cand_id %s,"
            "worker_info %s", params, cand_id, worker_information)

        if not isinstance(params, dict):
            self._logger.error("No parameter dict given, received %s instead",
                               params)
            raise ValueError("No parameter dictionary given, received %s "
                             "instead" % params)
        self.failed = False
        self.params = params
        self.worker_information = worker_information
        self.last_update_time = time.time()
        self.generated_time = time.time()
        self._logger.debug("Finished initializing the candidate.")
Beispiel #15
0
    def __init__(self, write_dir=None):
        """
        Initializes the lab assistant.

        Parameters
        ----------
        write_dir: string, optional
            Sets the write directory for the lab assistant. If None (default),
            nothing will be written.
        """
        self._logger = get_logger(self)
        self._logger.info("Initializing lab assistant.")
        self._logger.info("\tWriting results to %s" % write_dir)
        self._write_dir = write_dir

        self._exp_assistants = {}

        reloading_possible = True
        try:
            if self._write_dir:
                with open(self._write_dir + "/lab_assistant.json", "r"):
                    pass
            else:
                self._logger.debug("\tReloading impossible due to no "
                                   "_write_dir specified.")
                reloading_possible = False
        except IOError:
            self._logger.debug("\tReloading impossible due to IOError - "
                               "probably no lab_assistant existing.")
            reloading_possible = False

        if not reloading_possible:
            self._global_start_date = time.time()
        else:
            # set the correct path.
            with open(self._write_dir + "/lab_assistant.json", 'r') as infile:
                lab_assistant_json = json.load(infile)
            self._global_start_date = lab_assistant_json["global_start_date"]
            for p in lab_assistant_json["exp_assistants"].values():
                self._load_exp_assistant_from_path(p)
            self._logger.debug("\tReloaded all exp_assistants.")

        self._write_state_to_file()
        self._logger.info("lab assistant successfully initialized.")
Beispiel #16
0
    def __init__(self, optimizer_class, experiment, optimizer_params=None):
        """
        Initializes a new QueueBasedOptimizer class.

        Parameters
        ----------
        optimizer_class : an Optimizer subclass
            The class of optimizer this should abstract from. The optimizer is
            then initialized here.
        experiment : Experiment
            The experiment representing the current state of the execution.
        optimizer_params : dict, optional
            Dictionary of the optimizer parameters. If None, some standard
            parameters will be assumed.
            Supports the parameter "min_candidates", which sets the number
            of candidates that should be kept ready. Default is 5.
            Supports the parameter "update_time", which sets the minimum time
            in seconds between checking for updates. Default is 0.1s
        """
        self._logger = logging_utils.get_logger(self)
        self._logger.debug(
            "Initializing new QueueBasedLogger. "
            "optimizer_class is %s, experiment %s, "
            "optimizer_params %s", optimizer_class, experiment,
            optimizer_params)
        self._optimizer_in_queue = Queue.Queue()
        self._optimizer_out_queue = Queue.Queue()
        self._optimizer_class = optimizer_class
        self.SUPPORTED_PARAM_TYPES = optimizer_class.SUPPORTED_PARAM_TYPES

        self._logger.debug("Initialized queues. in_queue is %s, out_queue %s",
                           self._optimizer_in_queue, self._optimizer_out_queue)

        p = threading.Thread(target=dispatch_queue_backend,
                             args=(optimizer_class, optimizer_params,
                                   experiment, self._optimizer_out_queue,
                                   self._optimizer_in_queue))
        p.start()
        self._logger.debug("Started thread.")
        super(QueueBasedOptimizer, self).__init__(experiment, optimizer_params)
Beispiel #17
0
    def __init__(self, write_directory_base=None):
        """
        Initializes the lab assistant.

        Parameters
        ----------
        write_directory_base : string, optional
            Sets the base write directory for the lab assistant. If None
            (default) the directory depends on the operating system.
            ./APSIS_WRITING if on Windows, /tmp/APSIS_WRITING otherwise.
        """
        self._logger = get_logger(self)
        if write_directory_base is None:
            if os.name == "nt":
                write_directory_base = os.path.relpath("APSIS_WRITING")
            else:
                write_directory_base = "/tmp/APSIS_WRITING"
        self._logger.info("Initializing lab assistant.")
        self._logger.info("Writing results to %s" %write_directory_base)
        self._write_directory_base = write_directory_base
        self._global_start_date = time.time()
        self._init_directory_structure()
        self._exp_assistants = {}
        self._logger.info("lab assistant successfully initialized.")
Beispiel #18
0
    def __init__(self,
                 optimizer_class,
                 experiment,
                 optimizer_arguments=None,
                 write_dir=None):
        """
        Initializes this experiment assistant.

        Note that calling this function does not yet create an experiment, for
        that, use init_experiment. If there is an already existing experiment,
        you can just set self._experiment.

        Parameters
        ----------
        optimizer_class : subclass of Optimizer
            The class of the optimizer, used to initialize it.
        experiment : Experiment
            The experiment representing the data of this experiment assistant.
        write_dir : basestring, optional
            The directory the state of this experiment assistant is regularly
            written to. If this is None (default), no state will be written.
        optimizer_arguments : dict, optional
            The dictionary of optimizer arguments. If None, default values will
            be used.
        """
        self._logger = get_logger(self,
                                  extra_info="exp_id: " +
                                  str(experiment.exp_id))
        self._logger.info("Initializing experiment assistant.")
        self._optimizer = optimizer_class
        self._optimizer_arguments = optimizer_arguments
        self._write_dir = write_dir
        self._experiment = experiment
        self._init_optimizer()
        self._write_state_to_file()
        self._logger.info("Experiment assistant successfully initialized.")
import numpy as np
import theano.tensor as T

import climin.schedule

import climin.stops
import climin.initialize

from breze.learn.mlp import Mlp
from breze.learn.data import one_hot

from apsis.models.parameter_definition import *
from apsis.assistants.lab_assistant import ValidationLabAssistant
from apsis.utilities.logging_utils import get_logger

logger = get_logger("apsis.demos.demo_MNIST_NN")

start_time = None


def load_MNIST():
    """
    Loads MNIST from the datafile under ./mnist.pkl.gz.

    Returns
    -------
    X, Z : matrix
        Feature and Target matrices of the training set, one-hot encoded.
    VX, VZ : matrix
        Feature and Target matrices of the validation set, one-hot encoded.
    TX, TZ : matrix
from sklearn.preprocessing import scale
from breze.learn.data import one_hot
from apsis.models.parameter_definition import *
from apsis.assistants.lab_assistant import ValidationLabAssistant, BasicLabAssistant
from apsis.utilities.benchmark_functions import branin_func
from apsis.utilities.logging_utils import get_logger
import breze.learn.base
import warnings
import dill
from apsis.utilities.plot_utils import plot_lists
import matplotlib.pyplot as plt
import csv
import math


logger = get_logger("apsis.qm7")

start_time = None


def load_qm7():
    datafile = '/home/hpc/pr63so/ga93yih2/gdb13/gdb13_atm.pkl'
    dataset = pickle.load(open(datafile, 'r'))
    split = 1
    P = dataset['P'][range(0, split)+ range(split+1, 5)].flatten()
    X = dataset['B'][P]
    Z = dataset['T'][P]
    Z = Z.reshape(Z.shape[0], 1)
    train_labels = Z
    Ptest = dataset['P'][split]
    TX = dataset['B'][Ptest]
    def __init__(
        self,
        name,
        optimizer,
        param_defs,
        experiment=None,
        optimizer_arguments=None,
        minimization=True,
        write_directory_base="/tmp/APSIS_WRITING",
        experiment_directory_base=None,
        csv_write_frequency=1,
    ):
        """
        Initializes the BasicExperimentAssistant.

        Parameters
        ----------
        name : string
            The name of the experiment. This does not have to be unique, but is
            for human orientation.
        optimizer : Optimizer instance or string
            This is an optimizer implementing the corresponding functions: It
            gets an experiment instance, and returns one or multiple candidates
            which should be evaluated next.
            Alternatively, it can be a string corresponding to the optimizer,
            as defined by apsis.utilities.optimizer_utils.
        param_defs : dict of ParamDef.
            This is the parameter space defining the experiment.
        experiment : Experiment
            Preinitialize this assistant with an existing experiment.
        optimizer_arguments=None : dict
            These are arguments for the optimizer. Refer to their documentation
            as to which are available.
        minimization=True : bool
            Whether the problem is one of minimization or maximization.
        write_directory_base : string, optional
            The global base directory for all writing. Will only be used
            for creation of experiment_directory_base if this is not given.
        experiment_directory_base : string or None, optional
            The directory to write all the results to. If not
            given a directory with timestamp will automatically be created
            in write_directory_base
        csv_write_frequency : int, optional
            States how often the csv file should be written to.
            If set to 0 no results will be written.
        """
        self.logger = get_logger(self)
        self.logger.info("Initializing experiment assistant.")
        self.optimizer = optimizer
        self.optimizer_arguments = optimizer_arguments

        if experiment is None:
            self.experiment = Experiment(name, param_defs, minimization)
        else:
            self.experiment = experiment

        self.csv_write_frequency = csv_write_frequency

        if self.csv_write_frequency != 0:
            self.write_directory_base = write_directory_base
            if experiment_directory_base is not None:
                self.experiment_directory_base = experiment_directory_base
                ensure_directory_exists(self.experiment_directory_base)
            else:
                self._create_experiment_directory()
        self.logger.info("Experiment assistant successfully initialized.")
from sklearn.datasets import fetch_mldata
from sklearn.cross_validation import train_test_split
from sklearn.svm import NuSVC, SVC, libsvm
import os
import logging
from apsis.assistants.lab_assistant import PrettyLabAssistant, ValidationLabAssistant
from sklearn.utils import shuffle
from sklearn.metrics import accuracy_score

from apsis.models.parameter_definition import *
from apsis.utilities.logging_utils import get_logger
from apsis.utilities.randomization import check_random_state

logger = get_logger("demos.demo_MNIST")

# The goal of this demo is to run an optimization on MNIST with three
# different optimizers. It is used for comparing these optimizers.
# If you want to try different optimizers or regressors, change them in the
# lowest part of this file.


def do_evaluation(LAss, name, regressor, mnist_data_train, mnist_data_test,
                  mnist_target_train, mnist_target_test):
    """
    This does a single evaluation of the regressor.

    It gets the next candidate to evaluate, sets the parameters for the
    regressor, then trains it and predicts on the test data. Afterwards, it
    updates LAss with the new result.

    Parameters
Beispiel #23
0
    def __init__(self, name, parameter_definitions, exp_id=None, notes=None,
                 minimization_problem=True,):
        """
        Initializes an Experiment with a certain parameter definition.

        All of the Candidate lists are set to empty lists, representing an
        experiment with no work done.

        Parameters
        ----------
        name : string
            The name of the experiment. This does not have to be unique, but is
            for human orientation.

        parameter_definitions : dict of ParamDef
            Defines the parameter space of the experiment. Each entry of the
            dictionary has to be a ParamDef, and it is that space over which
            optimization will occur.
        minimization_problem : bool, optional
            Defines whether the experiment's goal is to find a minimum result - for
            example when evaluating errors - or a maximum result - for example when
            evaluating scores. Is True by default.
        notes : string or None, optional
            The note can be used to add additional human-readable information to
            the experiment.
        Raises
        ------
        ValueError :
            Iff parameter_definitions are not a dictionary.
        """
        self._logger = logging_utils.get_logger(self)
        self._logger.debug("Initializing new experiment. name: %s, "
                           "param_definition: %s, exp_id %s, notes %s, "
                           "minimization_problem %s", name,
                           parameter_definitions, exp_id, notes,
                           minimization_problem)
        self.name = name
        if exp_id is None:
            exp_id = uuid.uuid4().hex
            self._logger.debug("Had to create new exp_id, is %s", exp_id)
        self.exp_id = exp_id
        if not isinstance(parameter_definitions, dict):
            self._logger.error("parameter_definitions are not a dict but %s."
                             %parameter_definitions)
            raise ValueError("parameter_definitions are not a dict but %s."
                             %parameter_definitions)
        for p in parameter_definitions:
            if not isinstance(parameter_definitions[p], ParamDef):
                self._logger.error("Parameter definition of %s is not a "
                                   "ParamDef but %s."
                                   %(p, parameter_definitions[p]))

                raise ValueError("Parameter definition of %s is not a ParamDef"
                                 "but %s." %(p, parameter_definitions[p]))
        self.parameter_definitions = parameter_definitions

        self.minimization_problem = minimization_problem

        self.candidates_finished = []
        self.candidates_pending = []
        self.candidates_working = []

        self.last_update_time = time.time()

        self.notes = notes
        self._logger.debug("Initialization of new experiment finished.")
import numpy as np
import theano.tensor as T

import climin.schedule

import climin.stops
import climin.initialize

from breze.learn.mlp import Mlp
from breze.learn.data import one_hot

from apsis.models.parameter_definition import *
from apsis.assistants.lab_assistant import ValidationLabAssistant
from apsis.utilities.logging_utils import get_logger

logger = get_logger("apsis.demos.demo_MNIST_NN")

start_time = None

def load_MNIST():
    """
    Loads MNIST from the datafile under ./mnist.pkl.gz.

    Returns
    -------
    X, Z : matrix
        Feature and Target matrices of the training set, one-hot encoded.
    VX, VZ : matrix
        Feature and Target matrices of the validation set, one-hot encoded.
    TX, TZ : matrix
        Feature and Target matrices of the test set, one-hot encoded.
Beispiel #25
0
from apsis.utilities.benchmark_functions import branin_func
from apsis.assistants.lab_assistant import PrettyLabAssistant, ValidationLabAssistant
from apsis.models.parameter_definition import *
from apsis.utilities.randomization import check_random_state
import logging
from apsis.utilities.logging_utils import get_logger

logger = get_logger("demos.demo_branin")


def single_branin_evaluation_step(LAss, experiment_name):
    """
    Do a single evaluation on the branin function an all what is necessary
    for it
    1. get the next candidate to evaluate from the assistant.
    2. evaluate branin at this pint
    3. tell the assistant about the new result.

    Parameters
    ----------
    LAss : LabAssistant
        The LabAssistant to use.
    experiment_name : string
        The name of the experiment for this evaluation
    """
    to_eval = LAss.get_next_candidate(experiment_name)
    result = branin_func(to_eval.params["x"], to_eval.params["y"])
    to_eval.result = result
    LAss.update(experiment_name, to_eval)

    return to_eval
Beispiel #26
0
from apsis.utilities.logging_utils import get_logger

logger = get_logger("apsis.utils.import_utils")


def import_if_exists(module_name):
    """
    Function tries to import a module but will not fail if the module does
    not exist.

    Parameters
    ----------
    module_name : String
     The name of the module to be imported.

    Returns
    --------
    success : True
        Whether the module was successfully imported.
    module : module or None
        Returns the imported module iff successful, otherwise returns None.
    """
    try:
        module = __import__(module_name)
    except ImportError:
        logger.warning("Module " + str(module_name) +
                        " could not be imported as it could not be found.")
        return False, None
    else:
        return True, module
Beispiel #27
0
from breze.learn.mlp_bobgpu import Mlp, FastDropoutNetwork
from sklearn.preprocessing import scale
from breze.learn.data import one_hot
from apsis.models.parameter_definition import *
from apsis.assistants.lab_assistant import ValidationLabAssistant, BasicLabAssistant
from apsis.utilities.benchmark_functions import branin_func
from apsis.utilities.logging_utils import get_logger
import breze.learn.base
import warnings
import dill
from apsis.utilities.plot_utils import plot_lists
import matplotlib.pyplot as plt
import csv
import math

logger = get_logger("apsis.qm7")

start_time = None


def load_qm7():
    datafile = '/home/hpc/pr63so/ga93yih2/gdb13/gdb13_atm.pkl'
    dataset = pickle.load(open(datafile, 'r'))
    split = 1
    P = dataset['P'][range(0, split) + range(split + 1, 5)].flatten()
    X = dataset['B'][P]
    Z = dataset['T'][P]
    Z = Z.reshape(Z.shape[0], 1)
    train_labels = Z
    Ptest = dataset['P'][split]
    TX = dataset['B'][Ptest]
Beispiel #28
0
from apsis.utilities.logging_utils import get_logger

logger = get_logger("apsis.utils.import_utils")


def import_if_exists(module_name):
    """
    Function tries to import a module but will not fail if the module does
    not exist.

    Parameters
    ----------
    module_name : String
     The name of the module to be imported.

    Returns
    --------
    success : True
        Whether the module was successfully imported.
    module : module or None
        Returns the imported module iff successful, otherwise returns None.
    """
    try:
        module = __import__(module_name)
    except ImportError:
        logger.warning("Module " + str(module_name) +
                       " could not be imported as it could not be found.")
        return False, None
    else:
        return True, module