Пример #1
0
    def nn_predict(self, weights, inputs):

        outputs = None
        for W, b in self.unpack_layers(weights):
            outputs = np.dot(inputs, W) + b
            inputs = softplus(outputs)
        return outputs
Пример #2
0
    def optimize_step(self, k_params, n_params, update=True):
        """
        Runs one step of optimization
        Args:
            k_params (): optimizer params for kernel optimization
            n_params (): optimizer params for noise uptimization
            update (): whether or not to update parameters

        Returns: updated k_params, n_params

        """

        if self.opt_idx is None:
            self.opt_idx = list(range(self.d))
        if self.optimizer is None:
            self.optimizer = SGD()
        if self.kernel_grads is None:
            self.kernel_grads = []
            for i in self.opt_idx:
                self.kernel_grads.append((jacobian(self.kernels[i].eval),
                                          jacobian(self.kernels[i].log_prior)))

        # Optimizing kernel hyperparameters
        for i, d in enumerate(self.opt_idx):
            k_d_params = k_params[i] if k_params[i] is not None else None
            grad_kern_marginal = np.clip(self.grad_marginal_k(i, d),
                                         -self.max_grad, self.max_grad)
            grad_kern_penalty = np.clip(self.grad_penalty_k(i, d),
                                        -self.max_grad, self.max_grad)
            grad_kern = grad_kern_marginal - grad_kern_penalty
            self.kernels[d].params, k_d_params = \
                self.optimizer.step((self.kernels[d].params, grad_kern),
                                    k_d_params)
            k_params[i] = k_d_params

        # Optimizing observation noise
        noise_trans = inv_softplus(self.noise)
        grad_noise = np.clip(self.grad_marginal_noise(), -self.max_grad,
                             self.max_grad)
        grad_noise_trans = expit(noise_trans) * grad_noise
        noise_trans, n_params = \
            self.optimizer.step((noise_trans, grad_noise_trans),
                                n_params)
        self.noise = softplus(noise_trans)
        n_params = n_params

        # updating kernel and calculating loss
        self.construct_Ks()
        loss = -self.marginal()
        if update:
            self.solve()
        return k_params, n_params, loss
Пример #3
0
    def unpack_params(self, params):

        return softplus(params[0]), softplus(params[1]), \
               softplus(params[1]), params[2:]
Пример #4
0
 def unpack_params(self, params):
     return softplus(params[:self.a]), softplus(params[self.a: 2 * self.a]),\
            softplus(params[2 * self.a: len(params)])