def test_get_number_parameters_kernel(self):
        assert get_number_parameters_kernel(
            [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME],
            [2, 1, 1]) == 2

        with self.assertRaises(NameError):
            get_number_parameters_kernel(['a'], [2])
Exemplo n.º 2
0
    def parameters_from_list_to_dict(params, **kwargs):
        """
        Converts a list of parameters to dictionary using the order of the kernel.

        :param params: [float]
        :param kwargs:{
            'dimensions': [float],
            'kernels': [str],
            SAME_CORRELATION: (boolean),
        }
        :return: {
           PARAM_NAME: [float] or float
        }
        """

        parameters = {}

        for dim, kernel in zip(kwargs['dimensions'], kwargs['kernels']):
            if kernel == MATERN52_NAME:
                n_params = get_number_parameters_kernel([kernel], [dim])
                param_dict = Matern52.parameters_from_list_to_dict(
                    params[0:n_params])
                params = params[n_params:]
                parameters.update(param_dict)
            elif kernel == TASKS_KERNEL_NAME:
                n_params = get_number_parameters_kernel([kernel], [dim],
                                                        **kwargs)
                param_dict = TasksKernel.parameters_from_list_to_dict(
                    params[0:n_params])
                params = params[n_params:]
                parameters.update(param_dict)

        return parameters
Exemplo n.º 3
0
    def __init__(self,
                 n_tasks,
                 lower_triang,
                 same_correlation=False,
                 **kernel_parameters):
        """

        :param n_tasks: (int) number of tasks
        :param lower_triang: (ParameterEntity) If L(i, j) = exp(lower_triang[cum_sum(i)+j]), then
            Z = L * L^T where Z[i,j] = cov(Task_i, Task_j).
            If same_correlation is True, then
        :param same_correlation: (boolena) If True, it uses the same correlation for all tasks.
            We then have two parameters in total: var(task_i, task_i) and cov(task_i, task_j).
            In that case, the lower_triang consists of only the log(r) and log(covariance), where
            variance = covariance * (n_tasks - 1) + r (this guarantees that the matrix is P.D.)
        """

        name = TASKS_KERNEL_NAME
        dimension = 1

        if not same_correlation:
            dimension_parameters = get_number_parameters_kernel([name],
                                                                [n_tasks])
        else:
            dimension_parameters = min(n_tasks, 2)

        super(TasksKernel, self).__init__(name, dimension,
                                          dimension_parameters)

        self.same_correlation = same_correlation
        self.lower_triang = lower_triang
        self.n_tasks = n_tasks
        self.base_cov_matrix = None
        self.chol_base_cov_matrix = None
Exemplo n.º 4
0
    def define_default_kernel(cls,
                              dimension,
                              bounds=None,
                              default_values=None,
                              parameters_priors=None,
                              **kwargs):
        """
        :param dimension: (int) Number of tasks.
        :param bounds: [[float, float]], lower bound and upper bound for each entry. This parameter
                is to compute priors in a smart way.
        :param default_values: np.array(k)
        :param parameters_priors: {
                        LOWER_TRIANG_NAME: [float],
                    }
        :param kwargs: {SAME_CORRELATION: boolean}

        :return: TasksKernel
        """

        same_correlation = kwargs.get(SAME_CORRELATION, False)

        if not same_correlation:
            n_params = get_number_parameters_kernel([TASKS_KERNEL_NAME],
                                                    [dimension])
        else:
            n_params = min(dimension, 2)

        if parameters_priors is None:
            parameters_priors = {}

        if default_values is None:
            tasks_kernel_chol = parameters_priors.get(LOWER_TRIANG_NAME,
                                                      n_params * [0.0])
            default_values = np.array(tasks_kernel_chol)

        kernel = TasksKernel.define_kernel_from_array(dimension,
                                                      default_values, **kwargs)

        if np.all(default_values == 0.0):
            kernel.lower_triang.prior = \
                UniformPrior(n_params, n_params * [-1.0], n_params * [1.0])
        else:
            if dimension == 1:
                kernel.lower_triang.prior = LogNormalSquare(
                    1, 1.0, np.sqrt(default_values[0]))
                kernel.lower_triang.bounds = [(SMALLEST_POSITIVE_NUMBER, None)]
            else:
                cov = np.eye(n_params)
                kernel.lower_triang.prior = MultivariateNormalPrior(
                    n_params, default_values, cov)

        return kernel
    def __init__(self, dimension, length_scale, **kernel_parameters):
        """
        :param dimension: int
        :param length_scale: ParameterEntity
        """

        name = MATERN52_NAME
        dimension_parameters = get_number_parameters_kernel([name],
                                                            [dimension])

        super(Matern52, self).__init__(name, dimension, dimension_parameters)

        self.length_scale = length_scale
Exemplo n.º 6
0
    def define_prior_parameters(data,
                                dimension,
                                same_correlation=False,
                                var_evaluations=None):
        """
        Defines value of the parameters of the prior distributions of the kernel's parameters.

        :param data: {'points': np.array(nx1), 'evaluations': np.array(n),
            'var_noise': np.array(n) or None}. Each point is the is an index of the task.
        :param dimension: int, number of tasks
        :param same_correlation: boolean
        :return:  {
            LOWER_TRIANG_NAME: [float],
        }
        """

        if dimension == 1:
            return {LOWER_TRIANG_NAME: [var_evaluations]}

        tasks_index = data['points'][:, 0]
        data_by_tasks = {}
        enough_data = True
        for i in range(dimension):
            index_task = np.where(tasks_index == i)[0]
            if len(index_task) > 0:
                data_by_tasks[i] = [
                    data['evaluations'][index_task],
                    np.mean(data['evaluations'][index_task])
                ]
            else:
                data_by_tasks[i] = [[]]
            if len(index_task) < 2:
                enough_data = False

        if not same_correlation:
            n_params = get_number_parameters_kernel([TASKS_KERNEL_NAME],
                                                    [dimension])
        else:
            n_params = min(dimension, 2)

        if not enough_data:
            return {LOWER_TRIANG_NAME: n_params * [0.0]}

        # Can we include the variance of noisy evaluations in a smart way to get better estimators?

        cov_estimate = np.zeros((dimension, dimension))

        for i in xrange(dimension):
            for j in xrange(i + 1):
                a1 = len(data_by_tasks[i][0])
                a2 = len(data_by_tasks[j][0])
                d = min(a1, a2)
                if d <= 1:
                    if i == j:
                        if not same_correlation:
                            cov_estimate[i, j] = 1.0
                        else:
                            cov_estimate[i, j] = var_evaluations
                    else:
                        if not same_correlation:
                            cov_estimate[i, j] = 0.0
                            cov_estimate[j, i] = 0.0
                        else:
                            # n_eval = len(data['evaluations'])
                            # z = data['evaluations'][0: n_eval/2]
                            # z = z - np.mean(z)
                            #
                            # z_2 = data['evaluations'][n_eval / 2: n_eval]
                            # z_2 = z_2 - np.mean(z_2)
                            #
                            # cov = [z1 * z2 for z1 in z for z2 in z_2]
                            # cov = np.mean(cov)
                            # cov_estimate[i, j] = cov
                            # cov_estimate[j, i] = cov_estimate[i, j]
                            cov_estimate[i, j] = 0
                            cov_estimate[j, i] = 0
                else:
                    mu1 = data_by_tasks[i][1]
                    mu2 = data_by_tasks[j][1]
                    a = data_by_tasks[i][0][0:d]
                    b = data_by_tasks[j][0][0:d]
                    cov_estimate[i, j] = np.sum(
                        (a - mu1) * (b - mu2)) / (d - 1.0)
                    cov_estimate[j, i] = cov_estimate[i, j]

        if same_correlation:
            var = [cov_estimate[i, i] for i in range(dimension)]
            task_params = []
            task_params.append(np.log(max(np.mean(var), 0.1)))

            if dimension == 1:
                return {LOWER_TRIANG_NAME: task_params}

            cov = [
                cov_estimate[i, j] for i in range(dimension)
                for j in range(dimension) if i != j and cov_estimate[i, j] != 0
            ]
            if np.all(np.array(cov) == 0):
                return {LOWER_TRIANG_NAME: task_params}

            task_params.append(np.log(max(np.mean(cov), 0.1)))

            return {LOWER_TRIANG_NAME: task_params}

        l_params = {}
        for j in range(dimension):
            for i in range(j, dimension):
                if i == j:
                    value = np.sqrt(
                        max(
                            cov_estimate[i, j] - np.sum(
                                np.array([l_params[(i, h)]
                                          for h in range(i)])**2),
                            SMALLEST_POSITIVE_NUMBER))
                    l_params[(i, j)] = value
                    continue
                ls_val = np.sum([
                    l_params[(i, h)] * l_params[(j, h)]
                    for h in range(min(i, j))
                ])
                d = min(i, j)
                value = (cov_estimate[(i, j)] - ls_val) / l_params[(d, d)]
                l_params[(i, j)] = value

        task_params = []
        for i in range(dimension):
            for j in range(i + 1):
                value = l_params[(i, j)]
                task_params.append(np.log(max(value, 0.0001)))

        return {LOWER_TRIANG_NAME: task_params}
def get_kernel_default(kernel_name,
                       dimension,
                       bounds=None,
                       default_values=None,
                       parameters_priors=None,
                       **kernel_parameters):
    """
    Returns a default kernel object associated to the kernel_name
    :param kernel_name: [str]
    :param dimension: [int]. It's the number of tasks for the task kernel.
    :param bounds: [[float, float]], lower bound and upper bound for each entry. This parameter
            is to compute priors in a smart way.
    :param default_values: np.array(k), default values for the parameters of the kernel
    :param parameters_priors: {
            SIGMA2_NAME: float,
            LENGTH_SCALE_NAME: [float],
            LOWER_TRIANG_NAME: [float],
        }
    :param kernel_parameters: additional kernel parameters,
        - SAME_CORRELATION: (boolean) True or False. Parameter used only for task kernel.

    :return: kernel object
    """

    if kernel_name[0] == SCALED_KERNEL:
        if kernel_name[1] == MATERN52_NAME:
            return ScaledKernel.define_default_kernel(dimension[0], bounds,
                                                      default_values,
                                                      parameters_priors,
                                                      *([MATERN52_NAME], ))

    if kernel_name[0] == MATERN52_NAME:
        return Matern52.define_default_kernel(dimension[0], bounds,
                                              default_values,
                                              parameters_priors)

    if kernel_name[0] == TASKS_KERNEL_NAME:
        return TasksKernel.define_default_kernel(dimension[0], bounds,
                                                 default_values,
                                                 parameters_priors,
                                                 **kernel_parameters)

    if kernel_name[0] == PRODUCT_KERNELS_SEPARABLE:
        values = []
        cont = 0
        bounds_ = []
        cont_b = 0
        for name, dim in zip(kernel_name[1:], dimension[1:]):
            n_params = get_number_parameters_kernel([name], [dim],
                                                    **kernel_parameters)
            if default_values is not None:
                value_kernel = default_values[cont:cont + n_params]
            else:
                value_kernel = None

            if bounds is not None:
                if name == MATERN52_NAME:
                    bounds_.append(bounds[cont_b:cont_b + dim])
                    cont_b += dim
                if name == TASKS_KERNEL_NAME:
                    bounds_.append(bounds[cont_b:cont_b + 1])
                    cont_b += 1
            cont += n_params
            values.append(value_kernel)

        if len(bounds_) > 0:
            bounds = bounds_

        return ProductKernels.define_default_kernel(dimension[1:], bounds,
                                                    values, parameters_priors,
                                                    kernel_name[1:],
                                                    **kernel_parameters)