Ejemplo n.º 1
0
    def __init__(self, num_outputs, num_factors=16,
                 rho_init=INITIAL_NOISE_VARIANCE,
                 encoding_type=DEFAULT_ENCODING, **kwargs):

        super(Coregionalization, self).__init__(dimension=1, **kwargs)

        self.encoding_W_flat = IdentityScalarEncoding(
            dimension=num_outputs * num_factors)
        self.encoding_rho = create_encoding(encoding_type, rho_init,
                                            NOISE_VARIANCE_LOWER_BOUND,
                                            NOISE_VARIANCE_UPPER_BOUND,
                                            dimension=1)

        self.num_outputs = num_outputs
        self.num_factors = num_factors

        with self.name_scope():
            self.W_flat_internal = self.params.get(
                "W_internal", shape=(num_outputs * num_factors,),
                init=mx.init.Normal(),  # TODO: Use Xavier initialization here
                dtype=DATA_TYPE)
            self.rho_internal = self.params.get(
                "rho_internal", shape=(1,),
                init=mx.init.Constant(self.encoding_rho.init_val_int),
                dtype=DATA_TYPE)
Ejemplo n.º 2
0
    def __init__(self, input_range, encoding_type=DEFAULT_ENCODING, **kwargs):
        super(OneDimensionalWarping, self).__init__(**kwargs)

        self.input_range = input_range
        self.encoding = create_encoding(
            encoding_type, INITIAL_WARPING, WARPING_LOWER_BOUND,
            WARPING_UPPER_BOUND, 2, LogNormal(0.0, 0.75))
        with self.name_scope():
            self.warping_internal = register_parameter(
                self.params, 'warping', self.encoding, shape=(2,))
Ejemplo n.º 3
0
    def __init__(self, encoding_type=DEFAULT_ENCODING,
                 u1_init=1.0, u3_init=0.0, **kwargs):

        super(FabolasKernelFunction, self).__init__(dimension=1, **kwargs)

        self.encoding_u12 = create_encoding(
            encoding_type, u1_init, COVARIANCE_SCALE_LOWER_BOUND,
            COVARIANCE_SCALE_UPPER_BOUND, 1, None)
        # This is not really needed, but param_encoding_pairs needs an encoding
        # for each parameter
        self.encoding_u3 = IdentityScalarEncoding(init_val=u3_init)
        with self.name_scope():
            self.u1_internal = register_parameter(
                self.params, 'u1', self.encoding_u12)
            self.u2_internal = register_parameter(
                self.params, 'u2', self.encoding_u12)
            self.u3_internal = register_parameter(
                self.params, 'u3', self.encoding_u3)
 def __init__(
         self, kernel: KernelFunction, mean: MeanFunction = None,
         initial_noise_variance=None, encoding_type=None, **kwargs):
     super(MarginalLikelihood, self).__init__(**kwargs)
     if mean is None:
         mean = ScalarMeanFunction()
     if initial_noise_variance is None:
         initial_noise_variance = INITIAL_NOISE_VARIANCE
     if encoding_type is None:
         encoding_type=DEFAULT_ENCODING
     self.encoding = create_encoding(
          encoding_type, initial_noise_variance, NOISE_VARIANCE_LOWER_BOUND,
          NOISE_VARIANCE_UPPER_BOUND, 1, Gamma(mean=0.1, alpha=0.1))
     self.mean = mean
     self.kernel = kernel
     with self.name_scope():
         self.noise_variance_internal = register_parameter(
             self.params, 'noise_variance', self.encoding)
    def __init__(self,
                 dimension,
                 ARD=False,
                 encoding_type=DEFAULT_ENCODING,
                 **kwargs):
        super(Matern52, self).__init__(dimension, **kwargs)
        self.encoding = create_encoding(encoding_type,
                                        INITIAL_COVARIANCE_SCALE,
                                        COVARIANCE_SCALE_LOWER_BOUND,
                                        COVARIANCE_SCALE_UPPER_BOUND, 1,
                                        LogNormal(0.0, 1.0))
        self.ARD = ARD
        self.squared_distance = SquaredDistance(dimension=dimension,
                                                ARD=ARD,
                                                encoding_type=encoding_type)

        with self.name_scope():
            self.covariance_scale_internal = register_parameter(
                self.params, 'covariance_scale', self.encoding)
    def __init__(self,
                 dimension,
                 ARD=False,
                 encoding_type=DEFAULT_ENCODING,
                 **kwargs):
        super(SquaredDistance, self).__init__(**kwargs)

        self.ARD = ARD
        inverse_bandwidths_dimension = 1 if not ARD else dimension
        self.encoding = create_encoding(
            encoding_type, INITIAL_INVERSE_BANDWIDTHS,
            INVERSE_BANDWIDTHS_LOWER_BOUND, INVERSE_BANDWIDTHS_UPPER_BOUND,
            inverse_bandwidths_dimension,
            Uniform(INVERSE_BANDWIDTHS_LOWER_BOUND,
                    INVERSE_BANDWIDTHS_UPPER_BOUND))
        with self.name_scope():
            self.inverse_bandwidths_internal = register_parameter(
                self.params,
                'inverse_bandwidths',
                self.encoding,
                shape=(inverse_bandwidths_dimension, ))
    def __init__(self,
                 kernel_x: KernelFunction,
                 mean_x: MeanFunction,
                 encoding_type=DEFAULT_ENCODING,
                 alpha_init=1.0,
                 mean_lam_init=0.5,
                 gamma_init=0.5,
                 delta_fixed_value=None,
                 delta_init=0.5,
                 max_metric_value=1.0,
                 **kwargs):
        """
        :param kernel_x: Kernel k_x(x, x') over configs
        :param mean_x: Mean function mu_x(x) over configs
        :param encoding_type: Encoding used for alpha, mean_lam, gamma (positive
            values)
        :param alpha_init: Initial value alpha
        :param mean_lam_init: Initial value mean_lam
        :param gamma_init: Initial value gamma
        :param delta_fixed_value: If not None, delta is fixed to this value, and
            does not become a free parameter
        :param delta_init: Initial value delta (if delta_fixed_value is None)
        :param max_metric_value: Maximum value which metric can attend. This is
            used as upper bound on gamma
        """

        super(ExponentialDecayResourcesKernelFunction,
              self).__init__(dimension=kernel_x.dimension + 1, **kwargs)
        self.kernel_x = kernel_x
        self.mean_x = mean_x
        # alpha, mean_lam are parameters of a Gamma distribution, where alpha is
        # a scale parameter, and
        #   E[lambda] = mean_lam, Var[lambda] = mean_lam ** 2 / alpha
        alpha_lower, alpha_upper = 1e-6, 250.0
        alpha_init = self._wrap_initvals(alpha_init, alpha_lower, alpha_upper)
        self.encoding_alpha = create_encoding(encoding_type, alpha_init,
                                              alpha_lower, alpha_upper, 1,
                                              None)
        mean_lam_lower, mean_lam_upper = 1e-4, 50.0
        mean_lam_init = self._wrap_initvals(mean_lam_init, mean_lam_lower,
                                            mean_lam_upper)
        self.encoding_mean_lam = create_encoding(encoding_type, mean_lam_init,
                                                 mean_lam_lower,
                                                 mean_lam_upper, 1, None)
        # If f(x, 0) is the metric value at r -> 0, f(x) at r -> infty,
        # then f(x, 0) = gamma (for delta = 1), or f(x, 0) = gamma + f(x) for
        # delta = 0. gamma should not be largest than the maximum metric
        # value.
        gamma_lower = max_metric_value * 0.0001
        gamma_upper = max_metric_value
        gamma_init = self._wrap_initvals(gamma_init, gamma_lower, gamma_upper)
        self.encoding_gamma = create_encoding(encoding_type, gamma_init,
                                              gamma_lower, gamma_upper, 1,
                                              None)
        if delta_fixed_value is None:
            delta_init = self._wrap_initvals(delta_init, 0.0, 1.0)
            self.encoding_delta = IdentityScalarEncoding(constr_lower=0.0,
                                                         constr_upper=1.0,
                                                         init_val=delta_init)
        else:
            assert 0.0 <= delta_fixed_value <= 1.0, \
                "delta_fixed_value = {}, must lie in [0, 1]".format(
                    delta_fixed_value)
            self.encoding_delta = None
            self.delta_fixed_value = delta_fixed_value

        with self.name_scope():
            self.alpha_internal = register_parameter(self.params, "alpha",
                                                     self.encoding_alpha)
            self.mean_lam_internal = register_parameter(
                self.params, "mean_lam", self.encoding_mean_lam)
            self.gamma_internal = register_parameter(self.params, "gamma",
                                                     self.encoding_gamma)
            if delta_fixed_value is None:
                self.delta_internal = register_parameter(
                    self.params, "delta", self.encoding_delta)