Пример #1
0
    def domain_map(
        cls, F, amplitude: Tensor, length_scale: Tensor
    ) -> Tuple[Tensor, Tensor]:
        """
        This function applies the softmax to the RBF Kernel hyper-parameters.

        Parameters
        ----------
        F: mx.symbol or mx.nd
            A module that can either refer to the Symbol API or the NDArray
            API in MXNet.
        amplitude
            RBF kernel amplitude hyper-parameter of shape (batch_size, 1, 1).
        length_scale
            RBF kernel length scale hyper-parameter of of shape (batch_size, 1, 1).

        Returns
        -------
        Tuple
            Two GP RBF kernel hyper-parameters.
            Each is a Tensor of shape: (batch_size, 1, 1).
        """
        amplitude = softplus(F, amplitude)
        length_scale = softplus(F, length_scale)
        return amplitude, length_scale
Пример #2
0
    def domain_map(cls, F, amplitude, length_scale, frequency):
        r"""
        This function applies the softmax to the Periodic Kernel hyper-parameters.

        Parameters
        ----------
        F
            A module that can either refer to the Symbol API or the NDArray
            API in MXNet.
        amplitude
            Periodic kernel amplitude hyper-parameter of shape (batch_size, 1, 1).
        length_scale
            Periodic kernel length scale hyper-parameter of of shape (batch_size, 1, 1).
        frequency
            Periodic kernel hyper-parameter of shape (batch_size, 1, 1).

        Returns
        -------
        Tuple[Tensor, Tensor, Tensor]
            Three GP Periodic kernel hyper-parameters.
            Each is a Tensor of shape: (batch_size, 1, 1).
        """
        amplitude = softplus(F, amplitude)
        length_scale = softplus(F, length_scale)
        frequency = softplus(F, frequency)
        return amplitude, length_scale, frequency
Пример #3
0
    def get_gp_params(
        self,
        F,
        past_target: Tensor,
        past_time_feat: Tensor,
        feat_static_cat: Tensor,
    ) -> Tuple:
        """
        This function returns the GP hyper-parameters for the model.

        Parameters
        ----------
        F
            A module that can either refer to the Symbol API or the NDArray
            API in MXNet.
        past_target
            Training time series values of shape (batch_size, context_length).
        past_time_feat
            Training features of shape (batch_size, context_length,
            num_features).
        feat_static_cat
            Time series indices of shape (batch_size, 1).

        Returns
        -------
        Tuple
            Tuple of kernel hyper-parameters of length num_hyperparams.
                Each is a Tensor of shape (batch_size, 1, 1).
            Model noise sigma.
                Tensor of shape (batch_size, 1, 1).
        """
        output = self.embedding(feat_static_cat.squeeze()
                                )  # Shape (batch_size, num_hyperparams + 1)
        kernel_args = self.proj_kernel_args(output)
        sigma = softplus(
            F,
            output.slice_axis(  # sigma is the last hyper-parameter
                axis=1,
                begin=self.num_hyperparams,
                end=self.num_hyperparams + 1,
            ),
        )
        if self.params_scaling:
            scalings = self.kernel_output.gp_params_scaling(
                F, past_target, past_time_feat)
            sigma = F.broadcast_mul(sigma, scalings[self.num_hyperparams])
            kernel_args = (F.broadcast_mul(kernel_arg, scaling)
                           for kernel_arg, scaling in zip(
                               kernel_args, scalings[0:self.num_hyperparams]))
        min_value = 1e-5
        max_value = 1e8
        kernel_args = (kernel_arg.clip(min_value,
                                       max_value).expand_dims(axis=2)
                       for kernel_arg in kernel_args)
        sigma = sigma.clip(min_value, max_value).expand_dims(axis=2)
        return kernel_args, sigma
Пример #4
0
    def domain_map(cls, F, xi, beta):
        r"""
        Maps raw tensors to valid arguments for constructing a Generalized
        Pareto distribution.

        Parameters
        ----------
        F:
        xi:
            Tensor of shape `(*batch_shape, 1)`
        beta:
            Tensor of shape `(*batch_shape, 1)`

        Returns
        -------
        Tuple[Tensor, Tensor]:
            Two squeezed tensors, of shape `(*batch_shape)`: both have entries
            mapped to the positive orthant.
        """
        xi = F.maximum(softplus(F, xi), cls.eps())
        beta = F.maximum(softplus(F, beta), cls.eps())
        return xi.squeeze(axis=-1), beta.squeeze(axis=-1)