Пример #1
0
    def __init__(self, conf, expl_dims, competence_measure, win_size,
                 competence_mode, k, progress_mode):

        RandomInterest.__init__(self, conf, expl_dims)

        self.competence_measure = competence_measure
        self.win_size = win_size
        self.competence_mode = competence_mode
        self.dist_max = np.linalg.norm(self.bounds[0, :] - self.bounds[1, :])
        self.k = k
        self.progress_mode = progress_mode
        self.data_xc = Dataset(len(expl_dims), 1)
        self.data_sr = Dataset(len(expl_dims), 0)
        self.current_progress = 0.
        self.current_interest = 0.
Пример #2
0
    def __init__(self, conf, expl_dims, competence_measure, win_size,
                 competence_mode, k, progress_mode):

        InterestModel.__init__(self, expl_dims)

        self.bounds = conf.bounds[:, expl_dims]
        self.ndims = self.bounds.shape[1]
        self.competence_measure = competence_measure
        self.win_size = win_size
        self.competence_mode = competence_mode
        # self.dist_max = np.linalg.norm(self.bounds[0, :] - self.bounds[1, :])
        self.dist_max = 2
        self.k = k
        self.progress_mode = progress_mode
        self.data_xc = Dataset(len(expl_dims), 1)
        self.data_sr = Dataset(len(expl_dims), 0)
        self.current_progress = 1e-5
        self.current_interest = 1e-5
Пример #3
0
    def __init__(self, dim_x, dim_y, **kwargs):
        """Create the forward model

        @param dim_x    the input dimension
        @param dim_y    the output dimension
        """
        self.dim_x = dim_x
        self.dim_y = dim_y
        self.dataset = Dataset(dim_x, dim_y)
        self.conf = kwargs
Пример #4
0
    def __init__(self, dim_x, dim_y, **kwargs):
        """Create the forward model

        @param dim_x    the input dimension
        @param dim_y    the output dimension
        """
        self.dim_x    = dim_x
        self.dim_y    = dim_y
        self.dataset  = Dataset(dim_x, dim_y)
        self.conf     = kwargs
Пример #5
0
    def __init__(self, conf, expl_dims, x_card, cells_win_size, eps_random,
                 measure, win_size, competence_measure, competence_mode, k,
                 progress_mode):

        DiscretizedProgress.__init__(self, conf, expl_dims, x_card,
                                     cells_win_size, eps_random, measure)

        self.bounds = conf.bounds[:, expl_dims]
        self.ndims = self.bounds.shape[1]
        self.competence_measure = competence_measure
        self.win_size = win_size
        self.competence_mode = competence_mode
        # self.dist_max_comp = np.linalg.norm(self.bounds[0, :] - self.bounds[1, :])
        self.dist_max_comp = 2
        self.k = k
        self.progress_mode = progress_mode
        self.data_xc = Dataset(len(expl_dims), 1)
        self.data_sr = Dataset(len(expl_dims), 0)
        self.current_progress = 1e-5
        self.current_interest = 1e-5
 def __init__(self, 
              conf, 
              expl_dims,
              competence_measure,
              win_size,
              competence_mode,
              k,
              progress_mode):
     
     RandomInterest.__init__(self, conf, expl_dims)
     
     self.competence_measure = competence_measure
     self.win_size = win_size
     self.competence_mode = competence_mode
     self.dist_max = np.linalg.norm(self.bounds[0,:] - self.bounds[1,:])
     self.k = k
     self.progress_mode = progress_mode
     self.data_xc = Dataset(len(expl_dims), 1)
     self.data_sr = Dataset(len(expl_dims), 0)
     self.current_progress = 0.
     self.current_interest = 0.
Пример #7
0
 def __init__(self, conf, acquisition, exploration_weight, initial_points, environment, optimisation_iterations, exact_feval):
     ''' :param string acquisition: choose the model of acquisition function between "MPI","EI" and "LCB"
         :param scalar exploration_weight: module the exploration in the acquistion (base 2 for LCB and 0.01 for others)
         :param scalar initial_points: the number of initial points to give for the bayesian optimisation
         :pram Environment environment: environment on which is used the optimisation
         :param scalar optimisation_iterations: number of iterations of the optimisation
         :param boolean exact_feval: must be False if the environment is noisy
     '''
     for attr in ['m_dims', 's_dims']:
         setattr(self, attr, getattr(conf, attr))
     self.dim_x = len(self.m_dims)
     self.dim_y = len(self.s_dims)
     self.acquisition = acquisition
     self.exploration_weight = exploration_weight
     self.initial_points = initial_points
     self.dataset  = Dataset(len(self.m_dims), len(self.s_dims))
     self.conf = conf
     self.mode = 'explore'
     self.environment = environment
     self.optimisation_iterations = optimisation_iterations
     self.exact_feval = exact_feval
Пример #8
0
 def __init__(self, conf, acquisition, exploration_weight, initial_points,
              environment, optimisation_iterations, exact_feval):
     ''' :param string acquisition: choose the model of acquisition function between "MPI","EI" and "LCB"
         :param scalar exploration_weight: module the exploration in the acquistion (base 2 for LCB and 0.01 for others)
         :param scalar initial_points: the number of initial points to give for the bayesian optimisation
         :pram Environment environment: environment on which is used the optimisation
         :param scalar optimisation_iterations: number of iterations of the optimisation
         :param boolean exact_feval: must be False if the environment is noisy
     '''
     for attr in ['m_dims', 's_dims']:
         setattr(self, attr, getattr(conf, attr))
     self.dim_x = len(self.m_dims)
     self.dim_y = len(self.s_dims)
     self.acquisition = acquisition
     self.exploration_weight = exploration_weight
     self.initial_points = initial_points
     self.dataset = Dataset(len(self.m_dims), len(self.s_dims))
     self.conf = conf
     self.mode = 'explore'
     self.environment = environment
     self.optimisation_iterations = optimisation_iterations
     self.exact_feval = exact_feval
Пример #9
0
class MiscRandomInterest(RandomInterest):
    """
    Add some features to the RandomInterest random babbling class.
    
    Allows to query the recent interest in the whole space,
    the recent competence on the babbled points in the whole space, 
    the competence around a given point based on a mean of the knns.   
    
    """
    def __init__(self, conf, expl_dims, competence_measure, win_size,
                 competence_mode, k, progress_mode):

        RandomInterest.__init__(self, conf, expl_dims)

        self.competence_measure = competence_measure
        self.win_size = win_size
        self.competence_mode = competence_mode
        self.dist_max = np.linalg.norm(self.bounds[0, :] - self.bounds[1, :])
        self.k = k
        self.progress_mode = progress_mode
        self.data_xc = Dataset(len(expl_dims), 1)
        self.data_sr = Dataset(len(expl_dims), 0)
        self.current_progress = 0.
        self.current_interest = 0.

    def add_xc(self, x, c):
        self.data_xc.add_xy(x, [c])

    def add_sr(self, x):
        self.data_sr.add_xy(x)

    def update_interest(self, i):
        self.current_progress += (1. / self.win_size) * (i -
                                                         self.current_progress)
        self.current_interest = abs(self.current_progress)

    def update(self, xy, ms, snnp=None, sp=None):
        c = self.competence_measure(xy[self.expl_dims],
                                    ms[self.expl_dims],
                                    dist_max=self.dist_max)
        if self.progress_mode == 'local':
            interest = self.interest_xc(xy[self.expl_dims], c)
            self.update_interest(interest)
        elif self.progress_mode == 'global':
            pass

        self.add_xc(xy[self.expl_dims], c)
        self.add_sr(ms[self.expl_dims])
        return interest

    def n_points(self):
        return len(self.data_xc)

    def competence_global(self, mode='sw'):
        if self.n_points() > 0:
            if mode == 'all':
                return np.mean(self.data_c)
            elif mode == 'sw':
                idxs = range(self.n_points())[-self.win_size:]
                return np.mean([self.data_xc.get_y(idx) for idx in idxs])
            else:
                raise NotImplementedError
        else:
            return 0.

    def mean_competence_pt(self, x):
        if self.n_points() > self.k:
            _, idxs = self.data_xc.nn_x(x, k=self.k)
            return np.mean([self.data_xc.get_y(idx) for idx in idxs])
        else:
            return self.competence()

    def interest_xc(self, x, c):
        if self.n_points() > 0:
            idx_sg_NN = self.data_xc.nn_x(x, k=1)[1][0]
            sr_NN = self.data_sr.get_x(idx_sg_NN)
            c_old = competence_dist(x, sr_NN, dist_max=self.dist_max)
            return c - c_old
            #return np.abs(c - c_old)
        else:
            return 0.

    def interest_pt(self, x):
        if self.n_points() > self.k:
            _, idxs = self.data_xc.nn_x(x, k=self.k)
            idxs = sorted(idxs)
            v = [self.data_xc.get_y(idx) for idx in idxs]
            n = len(v)
            comp_beg = np.mean(v[:int(float(n) / 2.)])
            comp_end = np.mean(v[int(float(n) / 2.):])
            return np.abs(comp_end - comp_beg)
        else:
            return self.interest_global()

    def interest_global(self):
        if self.n_points() < 2:
            return 0.
        else:
            idxs = range(self.n_points())[-self.win_size:]
            v = [self.data_xc.get_y(idx) for idx in idxs]
            n = len(v)
            comp_beg = np.mean(v[:int(float(n) / 2.)])
            comp_end = np.mean(v[int(float(n) / 2.):])
            return np.abs(comp_end - comp_beg)

    def competence(self):
        return self.competence_global()

    def interest(self):
        if self.progress_mode == 'local':
            return self.current_interest
        elif self.progress_mode == 'global':
            return self.interest_global()
        else:
            raise NotImplementedError
Пример #10
0
class MiscDiscretizedInterest(DiscretizedProgress):
    """
    TODO
    Add some features to the RandomInterest random babbling class.

    TODO
    Allows to query the recent interest in the whole space,
    the recent competence on the babbled points in the whole space,
    the competence around a given point based on a mean of the knns.

    """
    def __init__(self, conf, expl_dims, x_card, cells_win_size, eps_random,
                 measure, win_size, competence_measure, competence_mode, k,
                 progress_mode):

        DiscretizedProgress.__init__(self, conf, expl_dims, x_card,
                                     cells_win_size, eps_random, measure)

        self.bounds = conf.bounds[:, expl_dims]
        self.ndims = self.bounds.shape[1]
        self.competence_measure = competence_measure
        self.win_size = win_size
        self.competence_mode = competence_mode
        # self.dist_max_comp = np.linalg.norm(self.bounds[0, :] - self.bounds[1, :])
        self.dist_max_comp = 2
        self.k = k
        self.progress_mode = progress_mode
        self.data_xc = Dataset(len(expl_dims), 1)
        self.data_sr = Dataset(len(expl_dims), 0)
        self.current_progress = 1e-5
        self.current_interest = 1e-5

    def add_xc(self, x, c):
        self.data_xc.add_xy(x, [c])

    def add_sr(self, x):
        self.data_sr.add_xy(x)

    def update_interest(self, i):
        self.current_progress += (1. / self.win_size) * (i -
                                                         self.current_progress)
        self.current_interest = abs(self.current_progress)

    def update(self, xy, ms, snnp=None, sp=None):
        # We update the competence in each cell
        comp = self.measure(xy,
                            ms,
                            dist_min=self.dist_min,
                            dist_max=self.dist_max)
        x = xy[self.expl_dims]
        x_index = self.space.index(x)
        ms_expl = ms[self.expl_dims]
        ms_index = self.space.index(ms_expl)

        # Only give competence if observed s is in the same cell as goal x
        # to avoid random fluctuations of progress due to random choices in the other cells and not to competence variations
        if ms_index == x_index:
            self.discrete_progress.update_from_index_and_competence(
                x_index, self.normalize_measure(comp))

        # Novelty bonus: if novel cell is reached, give it competence (= interest for win_size iterations)
        if sum([qi for qi in self.discrete_progress.queues[ms_index]]) == 0.:
            self.discrete_progress.update_from_index_and_competence(
                ms_index, self.normalize_measure(self.comp_max))

        # We track interest of module
        c = self.competence_measure(xy[self.expl_dims],
                                    ms[self.expl_dims],
                                    dist_max=self.dist_max_comp)
        if self.progress_mode == 'local':
            interest = self.interest_xc(xy[self.expl_dims], c)
            self.update_interest(interest)
        elif self.progress_mode == 'global':
            pass

        self.add_xc(xy[self.expl_dims], c)
        self.add_sr(ms[self.expl_dims])
        return interest

    def n_points(self):
        return len(self.data_xc)

    def competence_global(self, mode='sw'):
        if self.n_points() > 0:
            if mode == 'all':
                return np.mean(self.data_c)
            elif mode == 'sw':
                idxs = range(self.n_points())[-self.win_size:]
                return np.mean([self.data_xc.get_y(idx) for idx in idxs])
            else:
                raise NotImplementedError
        else:
            return 0.

    def mean_competence_pt(self, x):
        if self.n_points() > self.k:
            _, idxs = self.data_xc.nn_x(x, k=self.k)
            return np.mean([self.data_xc.get_y(idx) for idx in idxs])
        else:
            return self.competence()

    def interest_xc(self, x, c):
        if self.n_points() > 0:
            idx_sg_NN = self.data_xc.nn_x(x, k=1)[1][0]
            sr_NN = self.data_sr.get_x(idx_sg_NN)
            c_old = self.competence_measure(x,
                                            sr_NN,
                                            dist_max=self.dist_max_comp)
            # c_old = competence_dist(x, sr_NN, dist_max=self.dist_max) # Bug ? why use competence_dist ?
            return c - c_old
            # return np.abs(c - c_old)
        else:
            return 0.

    def interest_pt(self, x):
        if self.n_points() > self.k:
            _, idxs = self.data_xc.nn_x(x, k=self.k)
            idxs = sorted(idxs)
            v = [self.data_xc.get_y(idx) for idx in idxs]
            n = len(v)
            comp_beg = np.mean(v[:int(float(n) / 2.)])
            comp_end = np.mean(v[int(float(n) / 2.):])
            return np.abs(comp_end - comp_beg)
        else:
            return self.interest_global()

    def interest_global(self):
        if self.n_points() < 2:
            return 0.
        else:
            idxs = range(self.n_points())[-self.win_size:]
            v = [self.data_xc.get_y(idx) for idx in idxs]
            n = len(v)
            comp_beg = np.mean(v[:int(float(n) / 2.)])
            comp_end = np.mean(v[int(float(n) / 2.):])
            return np.abs(comp_end - comp_beg)

    def competence(self):
        return self.competence_global()

    def interest(self):
        if self.progress_mode == 'local':
            return self.current_interest
        elif self.progress_mode == 'global':
            return self.interest_global()
        else:
            raise NotImplementedError
Пример #11
0
class MiscGaussianInterest(InterestModel):
    """
    Add some features to the RandomInterest random babbling class.

    Allows to query the recent interest in the whole space,
    the recent competence on the babbled points in the whole space,
    the competence around a given point based on a mean of the knns.

    """
    def __init__(self, conf, expl_dims, competence_measure, win_size,
                 competence_mode, k, progress_mode):

        InterestModel.__init__(self, expl_dims)

        self.bounds = conf.bounds[:, expl_dims]
        self.ndims = self.bounds.shape[1]
        self.competence_measure = competence_measure
        self.win_size = win_size
        self.competence_mode = competence_mode
        # self.dist_max = np.linalg.norm(self.bounds[0, :] - self.bounds[1, :])
        self.dist_max = 2
        self.k = k
        self.progress_mode = progress_mode
        self.data_xc = Dataset(len(expl_dims), 1)
        self.data_sr = Dataset(len(expl_dims), 0)
        self.current_progress = 1e-5
        self.current_interest = 1e-5

    def sample(self):
        return np.clip(np.random.randn(self.ndims),
                       a_min=self.bounds[0],
                       a_max=self.bounds[1])

    def sample_given_context(self, c, c_dims):
        '''
        Sample randomly on dimensions not in context
            c: context value on c_dims dimensions, not used
            c_dims: w.r.t sensori space dimensions
        '''
        return self.sample()[list(set(range(self.ndims)) - set(c_dims))]

    def add_xc(self, x, c):
        self.data_xc.add_xy(x, [c])

    def add_sr(self, x):
        self.data_sr.add_xy(x)

    def update_interest(self, i):
        self.current_progress += (1. / self.win_size) * (i -
                                                         self.current_progress)
        self.current_interest = abs(self.current_progress)

    def update(self, xy, ms, snnp=None, sp=None):
        c = self.competence_measure(xy[self.expl_dims],
                                    ms[self.expl_dims],
                                    dist_max=self.dist_max)
        if self.progress_mode == 'local':
            interest = self.interest_xc(xy[self.expl_dims], c)
            self.update_interest(interest)
        elif self.progress_mode == 'global':
            pass

        self.add_xc(xy[self.expl_dims], c)
        self.add_sr(ms[self.expl_dims])
        return interest

    def n_points(self):
        return len(self.data_xc)

    def competence_global(self, mode='sw'):
        if self.n_points() > 0:
            if mode == 'all':
                return np.mean(self.data_c)
            elif mode == 'sw':
                idxs = range(self.n_points())[-self.win_size:]
                return np.mean([self.data_xc.get_y(idx) for idx in idxs])
            else:
                raise NotImplementedError
        else:
            return 0.

    def mean_competence_pt(self, x):
        if self.n_points() > self.k:
            _, idxs = self.data_xc.nn_x(x, k=self.k)
            return np.mean([self.data_xc.get_y(idx) for idx in idxs])
        else:
            return self.competence()

    def interest_xc(self, x, c):
        if self.n_points() > 0:
            idx_sg_NN = self.data_xc.nn_x(x, k=1)[1][0]
            sr_NN = self.data_sr.get_x(idx_sg_NN)
            c_old = self.competence_measure(x, sr_NN, dist_max=self.dist_max)
            # c_old = competence_dist(x, sr_NN, dist_max=self.dist_max) # Bug ? why use competence_dist ?
            return c - c_old
            # return np.abs(c - c_old)
        else:
            return 0.

    def interest_pt(self, x):
        if self.n_points() > self.k:
            _, idxs = self.data_xc.nn_x(x, k=self.k)
            idxs = sorted(idxs)
            v = [self.data_xc.get_y(idx) for idx in idxs]
            n = len(v)
            comp_beg = np.mean(v[:int(float(n) / 2.)])
            comp_end = np.mean(v[int(float(n) / 2.):])
            return np.abs(comp_end - comp_beg)
        else:
            return self.interest_global()

    def interest_global(self):
        if self.n_points() < 2:
            return 0.
        else:
            idxs = range(self.n_points())[-self.win_size:]
            v = [self.data_xc.get_y(idx) for idx in idxs]
            n = len(v)
            comp_beg = np.mean(v[:int(float(n) / 2.)])
            comp_end = np.mean(v[int(float(n) / 2.):])
            return np.abs(comp_end - comp_beg)

    def competence(self):
        return self.competence_global()

    def interest(self):
        if self.progress_mode == 'local':
            return self.current_interest
        elif self.progress_mode == 'global':
            return self.interest_global()
        else:
            raise NotImplementedError
Пример #12
0
class BayesianOptimisation(SensorimotorModel):
    ''' Sensorimotor model using Bayesian optimisation to infer inverse prediction'''
    def __init__(self, conf, acquisition, exploration_weight, initial_points, environment, optimisation_iterations, exact_feval):
        ''' :param string acquisition: choose the model of acquisition function between "MPI","EI" and "LCB"
            :param scalar exploration_weight: module the exploration in the acquistion (base 2 for LCB and 0.01 for others)
            :param scalar initial_points: the number of initial points to give for the bayesian optimisation
            :pram Environment environment: environment on which is used the optimisation
            :param scalar optimisation_iterations: number of iterations of the optimisation
            :param boolean exact_feval: must be False if the environment is noisy
        '''
        for attr in ['m_dims', 's_dims']:
            setattr(self, attr, getattr(conf, attr))
        self.dim_x = len(self.m_dims)
        self.dim_y = len(self.s_dims)
        self.acquisition = acquisition
        self.exploration_weight = exploration_weight
        self.initial_points = initial_points
        self.dataset  = Dataset(len(self.m_dims), len(self.s_dims))
        self.conf = conf
        self.mode = 'explore'
        self.environment = environment
        self.optimisation_iterations = optimisation_iterations
        self.exact_feval = exact_feval

    def infer(self, in_dims, out_dims, x):
        if in_dims == self.m_dims and out_dims == self.s_dims:    # forward
            ''' For now only return the nearest neighbor of the motor action '''
            assert len(x) == self.dim_x, "Wrong dimension for x. Expected %i, got %i" % (self.dim_x, len(x))
            # Find the nearest neighbor of the motor action x
            _, index = self.dataset.nn_x(x, k=1)
            return self.dataset.get_y(index[0])

        elif in_dims == self.s_dims and out_dims == self.m_dims:  # inverse
            if self.mode == 'exploit':
                self.acquisition = 0
            assert len(x) == self.dim_y, "Wrong dimension for x. Expected %i, got %i" % (self.dim_y, len(x))

            # Find the motor action that lead to the nearest neighbor of the sensitive goal
            _, index = self.dataset.nn_y(x, k=1)
            x0 = self.dataset.get_x(index[0])
            # Find the k nearest neighbors of this motor action
            _, index = self.dataset.nn_x(x0, k=self.initial_points)
            X = []
            Y = []
            for i in range(len(index)):
                X.append(self.dataset.get_x(index[i]))
                Y.append(self.dataset.get_y(index[i]))

            # Initialisation of the Bayesian optimisation
            func = F_to_minimize(np.array(x), self.dim_x, self.environment)
            X_init = np.array(X)
            Y_init = func.dist_array(Y)
            bounds = []
            for i in range(self.dim_x):
                bounds.append({'name': 'var_'+  str(i), 'type': 'continuous', 'domain': [self.conf.m_mins[i],self.conf.m_maxs[i]]})
            space                 = GPyOpt.Design_space(bounds)
            objective             = GPyOpt.core.task.SingleObjective(func.f)
            model                 = GPyOpt.models.GPModel(optimize_restarts=5,verbose=False, exact_feval = self.exact_feval)
            acquisition_optimizer = GPyOpt.optimization.AcquisitionOptimizer(space)
            if self.acquisition == 'EI':
                acquisition       = GPyOpt.acquisitions.AcquisitionEI(model, space, acquisition_optimizer, jitter = self.exploration_weight)
            elif self.acquisition == 'MPI':
                acquisition       = GPyOpt.acquisitions.AcquisitionMPI(model, space, acquisition_optimizer, jitter = self.exploration_weight)
            elif self.acquisition == 'LCB':
                acquisition       = GPyOpt.acquisitions.AcquisitionLCB(model, space, acquisition_optimizer, exploration_weight = self.exploration_weight)
            else:
                raise NotImplementedError
            evaluator             = GPyOpt.core.evaluators.Sequential(acquisition)
            bo = GPyOpt.methods.ModularBayesianOptimization(model, space, objective, acquisition, evaluator, X_init = X_init, Y_init = Y_init)

            # Run the optimisation, the eps = -np.inf is set to force the optimisation to do the required number of iterations
            bo.run_optimization(max_iter = self.optimisation_iterations, eps = -np.inf)

            # Update the model woth the list of points explored during the optimisation
            self.list_s = []
            for (m,s) in func.pointsList:
                self.update(m,s)
            return bo.x_opt

        else:
            raise NotImplementedError


    def update(self, m, s):
        self.dataset.add_xy(m, s)

    def forward_prediction(self, m):
        """ Compute the expected sensory effect of the motor command m. It is a shortcut for self.infer(self.conf.m_dims, self.conf.s_dims, m)
        """
        return self.infer(self.conf.m_dims, self.conf.s_dims, m)


    def inverse_prediction(self, s_g):
        """ Compute a motor command to reach the sensory goal s_g. It is a shortcut for self.infer(self.conf.s_dims, self.conf.m_dims, s_g)
        """
        return self.infer(self.conf.s_dims, self.conf.m_dims, s_g)
Пример #13
0
class BayesianOptimisation(SensorimotorModel):
    ''' Sensorimotor model using Bayesian optimisation to infer inverse prediction'''
    def __init__(self, conf, acquisition, exploration_weight, initial_points,
                 environment, optimisation_iterations, exact_feval):
        ''' :param string acquisition: choose the model of acquisition function between "MPI","EI" and "LCB"
            :param scalar exploration_weight: module the exploration in the acquistion (base 2 for LCB and 0.01 for others)
            :param scalar initial_points: the number of initial points to give for the bayesian optimisation
            :pram Environment environment: environment on which is used the optimisation
            :param scalar optimisation_iterations: number of iterations of the optimisation
            :param boolean exact_feval: must be False if the environment is noisy
        '''
        for attr in ['m_dims', 's_dims']:
            setattr(self, attr, getattr(conf, attr))
        self.dim_x = len(self.m_dims)
        self.dim_y = len(self.s_dims)
        self.acquisition = acquisition
        self.exploration_weight = exploration_weight
        self.initial_points = initial_points
        self.dataset = Dataset(len(self.m_dims), len(self.s_dims))
        self.conf = conf
        self.mode = 'explore'
        self.environment = environment
        self.optimisation_iterations = optimisation_iterations
        self.exact_feval = exact_feval

    def infer(self, in_dims, out_dims, x):
        if in_dims == self.m_dims and out_dims == self.s_dims:  # forward
            ''' For now only return the nearest neighbor of the motor action '''
            assert len(
                x
            ) == self.dim_x, "Wrong dimension for x. Expected %i, got %i" % (
                self.dim_x, len(x))
            # Find the nearest neighbor of the motor action x
            _, index = self.dataset.nn_x(x, k=1)
            return self.dataset.get_y(index[0])

        elif in_dims == self.s_dims and out_dims == self.m_dims:  # inverse
            if self.mode == 'exploit':
                self.acquisition = 0
            assert len(
                x
            ) == self.dim_y, "Wrong dimension for x. Expected %i, got %i" % (
                self.dim_y, len(x))

            # Find the motor action that lead to the nearest neighbor of the sensitive goal
            _, index = self.dataset.nn_y(x, k=1)
            x0 = self.dataset.get_x(index[0])
            # Find the k nearest neighbors of this motor action
            _, index = self.dataset.nn_x(x0, k=self.initial_points)
            X = []
            Y = []
            for i in range(len(index)):
                X.append(self.dataset.get_x(index[i]))
                Y.append(self.dataset.get_y(index[i]))

            # Initialisation of the Bayesian optimisation
            func = F_to_minimize(np.array(x), self.dim_x, self.environment)
            X_init = np.array(X)
            Y_init = func.dist_array(Y)
            bounds = []
            for i in range(self.dim_x):
                bounds.append({
                    'name':
                    'var_' + str(i),
                    'type':
                    'continuous',
                    'domain': [self.conf.m_mins[i], self.conf.m_maxs[i]]
                })
            space = GPyOpt.Design_space(bounds)
            objective = GPyOpt.core.task.SingleObjective(func.f)
            model = GPyOpt.models.GPModel(optimize_restarts=5,
                                          verbose=False,
                                          exact_feval=self.exact_feval)
            acquisition_optimizer = GPyOpt.optimization.AcquisitionOptimizer(
                space)
            if self.acquisition == 'EI':
                acquisition = GPyOpt.acquisitions.AcquisitionEI(
                    model,
                    space,
                    acquisition_optimizer,
                    jitter=self.exploration_weight)
            elif self.acquisition == 'MPI':
                acquisition = GPyOpt.acquisitions.AcquisitionMPI(
                    model,
                    space,
                    acquisition_optimizer,
                    jitter=self.exploration_weight)
            elif self.acquisition == 'LCB':
                acquisition = GPyOpt.acquisitions.AcquisitionLCB(
                    model,
                    space,
                    acquisition_optimizer,
                    exploration_weight=self.exploration_weight)
            else:
                raise NotImplementedError
            evaluator = GPyOpt.core.evaluators.Sequential(acquisition)
            bo = GPyOpt.methods.ModularBayesianOptimization(model,
                                                            space,
                                                            objective,
                                                            acquisition,
                                                            evaluator,
                                                            X_init=X_init,
                                                            Y_init=Y_init)

            # Run the optimisation, the eps = -np.inf is set to force the optimisation to do the required number of iterations
            bo.run_optimization(max_iter=self.optimisation_iterations,
                                eps=-np.inf)

            # Update the model woth the list of points explored during the optimisation
            self.list_s = []
            for (m, s) in func.pointsList:
                self.update(m, s)
            return bo.x_opt

        else:
            raise NotImplementedError

    def update(self, m, s):
        self.dataset.add_xy(m, s)

    def forward_prediction(self, m):
        """ Compute the expected sensory effect of the motor command m. It is a shortcut for self.infer(self.conf.m_dims, self.conf.s_dims, m)
        """
        return self.infer(self.conf.m_dims, self.conf.s_dims, m)

    def inverse_prediction(self, s_g):
        """ Compute a motor command to reach the sensory goal s_g. It is a shortcut for self.infer(self.conf.s_dims, self.conf.m_dims, s_g)
        """
        return self.infer(self.conf.s_dims, self.conf.m_dims, s_g)
class MiscRandomInterest(RandomInterest):
    """
    Add some features to the RandomInterest random babbling class.
    
    Allows to query the recent interest in the whole space,
    the recent competence on the babbled points in the whole space, 
    the competence around a given point based on a mean of the knns.   
    
    """
    def __init__(self, 
                 conf, 
                 expl_dims,
                 competence_measure,
                 win_size,
                 competence_mode,
                 k,
                 progress_mode):
        
        RandomInterest.__init__(self, conf, expl_dims)
        
        self.competence_measure = competence_measure
        self.win_size = win_size
        self.competence_mode = competence_mode
        self.dist_max = np.linalg.norm(self.bounds[0,:] - self.bounds[1,:])
        self.k = k
        self.progress_mode = progress_mode
        self.data_xc = Dataset(len(expl_dims), 1)
        self.data_sr = Dataset(len(expl_dims), 0)
        self.current_progress = 0.
        self.current_interest = 0.
              
            
    def add_xc(self, x, c):
        self.data_xc.add_xy(x, [c])
        
    def add_sr(self, x):
        self.data_sr.add_xy(x)
        
    def update_interest(self, i):
        self.current_progress += (1. / self.win_size) * (i - self.current_progress)
        self.current_interest = abs(self.current_progress)

    def update(self, xy, ms, snnp=None, sp=None):
        c = self.competence_measure(xy[self.expl_dims], ms[self.expl_dims], dist_max=self.dist_max)
        if self.progress_mode == 'local':
            interest = self.interest_xc(xy[self.expl_dims], c)
            self.update_interest(interest)
        elif self.progress_mode == 'global':
            pass
        
        self.add_xc(xy[self.expl_dims], c)
        self.add_sr(ms[self.expl_dims])
        return interest
    
    def n_points(self):
        return len(self.data_xc)
    
    def competence_global(self, mode='sw'):
        if self.n_points() > 0:
            if mode == 'all':
                return np.mean(self.data_c)
            elif mode == 'sw':
                idxs = range(self.n_points())[- self.win_size:]
                return np.mean([self.data_xc.get_y(idx) for idx in idxs])
            else:
                raise NotImplementedError
        else:
            return 0.
        
    def mean_competence_pt(self, x):
        if self.n_points() > self.k: 
            _, idxs = self.data_xc.nn_x(x, k=self.k)
            return np.mean([self.data_xc.get_y(idx) for idx in idxs])
        else:
            return self.competence()
                
    def interest_xc(self, x, c):
        if self.n_points() > 0:
            idx_sg_NN = self.data_xc.nn_x(x, k=1)[1][0]
            sr_NN = self.data_sr.get_x(idx_sg_NN)
            c_old = competence_dist(x, sr_NN, dist_max=self.dist_max)
            return c - c_old
            #return np.abs(c - c_old)
        else:
            return 0.
        
    def interest_pt(self, x):
        if self.n_points() > self.k:
            _, idxs = self.data_xc.nn_x(x, k=self.k)
            idxs = sorted(idxs)
            v = [self.data_xc.get_y(idx) for idx in idxs]
            n = len(v)
            comp_beg = np.mean(v[:int(float(n)/2.)])
            comp_end = np.mean(v[int(float(n)/2.):])
            return np.abs(comp_end - comp_beg)
        else:
            return self.interest_global()
            
    def interest_global(self): 
        if self.n_points() < 2:
            return 0.
        else:
            idxs = range(self.n_points())[- self.win_size:]
            v = [self.data_xc.get_y(idx) for idx in idxs]
            n = len(v)
            comp_beg = np.mean(v[:int(float(n)/2.)])
            comp_end = np.mean(v[int(float(n)/2.):])
            return np.abs(comp_end - comp_beg)
        
    def competence(self): return self.competence_global()
        
    def interest(self):
        if self.progress_mode == 'local':
            return self.current_interest
        elif self.progress_mode == 'global':
            return self.interest_global()
        else:
            raise NotImplementedError
Пример #15
0
class ForwardModel(object):
    """Class describing the ForwardModel interface"""
    @classmethod
    def from_dataset(cls, dataset, **kwargs):
        """Construct a Nearest Neighbor forward model from an existing dataset."""
        m = cls(dataset.dim_x, dataset.dim_y, **kwargs)
        m.dataset = dataset
        return m

    @classmethod
    def from_robot(cls, robot, **kwargs):
        """Construct a Nearest Neighbor forward model from an existing dataset."""
        m = cls(len(robot.m_feats), len(robot.s_feats), **kwargs)
        return m

    def __init__(self, dim_x, dim_y, **kwargs):
        """Create the forward model

        @param dim_x    the input dimension
        @param dim_y    the output dimension
        """
        self.dim_x = dim_x
        self.dim_y = dim_y
        self.dataset = Dataset(dim_x, dim_y)
        self.conf = kwargs

    def reset(self):
        self.dataset.reset()

    def size(self):
        return len(self.dataset)

    def add_xy(self, x, y):
        """Add an observation to the forward model

        @param x  an array of float of length dim_in
        @param y  an array of float of length dim_out
        """
        self.dataset.add_xy(x, y)

    def add_xy_batch(self, x_list, y_list):
        self.dataset.add_xy_batch(x_list, y_list)

    def get_x(self, index):
        return self.dataset.get_x(index)

    def get_y(self, index):
        return self.dataset.get_y(index)

    def get_xy(self, index):
        return self.dataset.get_xy(index)

    def predict_y(self, xq, **kwargs):
        """Provide an prediction of xq in the output space

        @param xq  an array of float of length dim_x
        """
        raise NotImplementedError

    def config(self):
        """Return a string with the configuration"""
        return ", ".join('%s:%s' % (key, value)
                         for key, value in self.conf.items())
Пример #16
0
class ForwardModel(object):
    """Class describing the ForwardModel interface"""

    @classmethod
    def from_dataset(cls, dataset, **kwargs):
        """Construct a Nearest Neighbor forward model from an existing dataset."""
        m = cls(dataset.dim_x, dataset.dim_y, **kwargs)
        m.dataset = dataset
        return m

    @classmethod
    def from_robot(cls, robot, **kwargs):
        """Construct a Nearest Neighbor forward model from an existing dataset."""
        m = cls(len(robot.m_feats), len(robot.s_feats), **kwargs)
        return m

    def __init__(self, dim_x, dim_y, **kwargs):
        """Create the forward model

        @param dim_x    the input dimension
        @param dim_y    the output dimension
        """
        self.dim_x    = dim_x
        self.dim_y    = dim_y
        self.dataset  = Dataset(dim_x, dim_y)
        self.conf     = kwargs

    def reset(self):
        self.dataset.reset()

    def size(self):
        return len(self.dataset)

    def add_xy(self, x, y):
        """Add an observation to the forward model

        @param x  an array of float of length dim_in
        @param y  an array of float of length dim_out
        """
        self.dataset.add_xy(x, y)
        
    def add_xy_batch(self, x_list, y_list): self.dataset.add_xy_batch(x_list, y_list)

    def get_x(self, index):
        return self.dataset.get_x(index)

    def get_y(self, index):
        return self.dataset.get_y(index)

    def get_xy(self, index):
        return self.dataset.get_xy(index)

    def predict_y(self, xq, **kwargs):
        """Provide an prediction of xq in the output space

        @param xq  an array of float of length dim_x
        """
        raise NotImplementedError

    def config(self):
        """Return a string with the configuration"""
        return ", ".join('%s:%s' % (key, value) for key, value in self.conf.items())