示例#1
0
    def conditional_expectation(self, dims, *args):
        dims = np.atleast_1d(dims)
        if np.all([isinstance(x, bool) for x in dims]):
            dims = np.nonzero(dims)[0]

        dims_C = np.setdiff1d(range(self.ndim()), dims)

        if dims_C.size != 0:
            M = self.bases.mean(dims_C, *args)
            I = self.indices
            J = I.keep_dims(dims)
            m = M[0][I.array[:, dims_C[0]]]
            for i in np.arange(1, len(M)):
                m *= M[i][I.array[:, dims_C[i]]]

            if dims.size == 0:
                return m

            ind = np.nonzero(
                np.all(J.array == I.array[:, dims][:, np.newaxis], axis=2))[1]

            d = np.zeros((J.cardinal(), I.cardinal()))
            d[ind, range(I.cardinal())] = m

            h = self.keep_bases(dims)
            # TODO Uncomment when the mappings are implemented
            # h = self.keep_mapping(dims)
            h.indices = J
            h = tensap.FunctionalBasisArray(d, h, I.cardinal())

        else:
            h = tensap.FunctionalBasisArray(np.eye(self.cardinal()), self,
                                            [self.cardinal(), 1])
        return h
    def solve(self):
        '''
        Solution of the minimization problem.

        Returns
        -------
        sol : numpy.ndarray or tensap.FunctionalBasisArray
            The solution of the minimization problem.
        output : dict
            Outputs of the algorithm.
        '''
        self.initialize()

        if self.initial_guess is None:
            self.initial_guess = tf.random.normal([self.basis_eval.shape[1]],
                                                  dtype=tf.float64)
        else:
            self.initial_guess = tf.convert_to_tensor(self.initial_guess,
                                                      dtype=tf.float64)

        basis_eval = self.basis_eval
        training_data = self.training_data

        def risk():
            fun_eval = tf.squeeze(tf.tensordot(basis_eval, var, [1, 0]))
            out = self.loss_function.risk_estimation(fun_eval, training_data)
            return out

        var = tf.Variable(self.initial_guess, dtype=tf.float64)
        for it in arange(self.options['max_iterations']):
            var0 = var.numpy()
            self.optimizer.minimize(risk, var_list=[var])

            stagnation = (tf.linalg.norm(var0 - var) /
                          tf.linalg.norm(var0)).numpy()
            if stagnation < self.options['stagnation']:
                break

        sol = var.numpy()

        output = {}
        if self.test_error:
            f_eval = np.matmul(self.basis_eval_test, sol)
            test_error = self.loss_function.test_error(f_eval, self.test_data)
            if isinstance(test_error, tf.Tensor):
                test_error = test_error.numpy()
            output['test_error'] = test_error

        if self.basis is not None:
            if np.ndim(sol) == 1:
                sol = tensap.FunctionalBasisArray(sol, self.basis)
            else:
                sol = tensap.FunctionalBasisArray(sol, self.basis,
                                                  sol.shape[1])

        return sol, output
示例#3
0
    def projection(self, fun, G):
        '''
        Compute the projection of the function fun onto the functional basis
        using the integration rule G.

        Parameters
        ----------
        fun : function or tensap.Function
            The function to project.
        G : tensap.IntegrationRule
            The integration rule used for the projection.

        Returns
        -------
        tensap.FunctionalBasisArray
            The projection of the function fun onto the functional basis using
            the integration rule G.

        '''

        A = self.eval(G.points)
        W = diags(G.weights)

        y = fun(G.points)
        if self.is_orthonormal:
            u = np.matmul(np.transpose(A), W.dot(y))
        else:
            u = np.linalg.solve(np.matmul(np.transpose(A), W.dot(A)),
                                np.matmul(np.transpose(A), W.dot(y)))
        if u.ndim == 1:
            u = np.reshape(u, [-1, 1])
        return tensap.FunctionalBasisArray(u, self, u.shape[1])
示例#4
0
    def interpolate(self, y, x=None):
        '''
        Provide an interpolation on a functional basis of a function (or values
        of the function) y associated with a set of n interpolation points x.

        Parameters
        ----------
        y : function or list or numpy.ndarray
            The function to interpolate, or values of it.
        x : list or numpy.ndarray, optional
            The interpolation points. The default is None, indicating to
            deduce them from the basis.

        Returns
        -------
        f : tensap.FunctionalBasisArray
            The computed interpolation.

        '''
        if x is None:
            x = self.interpolation_points()
        try:
            y = y(x)
        except Exception:
            pass

        if np.ndim(y) == 1:
            y = np.reshape(y, [-1, 1])

        hx = self.eval(x)
        data = np.linalg.solve(hx, y)
        f = tensap.FunctionalBasisArray(data, self, np.shape(y)[1])
        f.measure = self.measure
        return f
    def solve(self):
        '''
        Solution (Ordinary or Regularized) of the minimization problem and
        cross-validation procedure.

        Returns
        -------
        sol : numpy.ndarray  or tensap.FunctionalBasisArray
            The solution of the minimization problem.
        output : dict
            Outputs of the algorithm.

        '''
        self.initialize()

        if self.basis_adaptation:
            sol, output = self._solve_basis_adaptation()
        elif self.regularization:
            sol, output = self._solve_regularized()
        else:
            sol, output = self._solve_standard()

        if self.test_error:
            f_eval = np.matmul(self.basis_eval_test, sol)
            output['test_error'] = self.loss_function.test_error(
                f_eval, self.test_data,
                sol.norm()**2)

        if self.basis is not None:
            sol = tensap.FunctionalBasisArray(sol, self.basis)

        return sol, output
示例#6
0
    def solve(self):
        '''
        Solution (Ordinary or Regularized) of the Least-Squares problem and
        cross-validation procedure.

        Returns
        -------
        sol : numpy.ndarray or tensap.FunctionalBasisArray
            The solution of the minimization problem.
        output : dict
            Outputs of the algorithm.

        '''
        self.initialize()

        A = self.basis_eval
        y = self.training_data[1]

        if self.shared_coefficients:
            if np.ndim(A) == 3:
                assert (np.ndim(y) == 1 and A.shape[2] == 1) or \
                    (np.ndim(y) == 2 and A.shape[2] == y.shape[1]), \
                    'A.shape[2] should be equal to y.shape[1].'

                A = np.transpose(A, [0, 2, 1])
                A = np.reshape(A, [-1, A.shape[2]], order='F')
                y = np.reshape(y, [-1, 1], order='F')
                self.basis_eval = A
                self.training_data[1] = y
        if np.ndim(y) == 2:
            n = y.shape[1]
        else:
            n = 1

        output = {}

        if self.weights is not None:
            weights = diags(np.sqrt(self.weights))
            A = np.matmul(weights, A)
            y = np.matmul(weights, y)

        if not self.basis_adaptation:
            if not self.regularization:
                sol, output = self._solve_ols()
            else:
                if n == 1:
                    sol, output = self._solve_regularized_ls()
                else:
                    sol = np.zeros([A.shape[1], n])
                    output['error'] = np.zeros(n)
                    output['outputs'] = np.empty(n, dtype=object)
                    for ind in range(n):
                        self.training_data[1] = y[:, ind]
                        sol[:, ind], output_tmp = self._solve_regularized_ls()
                        output['error'][ind] = output_tmp['error']
                        output['outputs'][ind] = output_tmp
        else:
            if n == 1:
                sol, output = self._solve_basis_adaptation()
            else:
                sol = np.zeros([A.shape[1], n])
                output['error'] = np.zeros(n)
                output['outputs'] = np.empty(n, dtype=object)
                for ind in range(n):
                    self.training_data[1] = y[:, ind]
                    sol[:, ind], output_tmp = self._solve_basis_adaptation()
                    output['error'][ind] = output_tmp['error']
                    output['outputs'][ind] = output_tmp

        if self.test_error:
            if n == 1:
                f_eval = np.matmul(self.basis_eval_test, sol)
                output['test_error'] = self.loss_function.test_error(
                    f_eval, self.test_data)
            else:
                output['test_error'] = np.zeros([1, n])
                f_eval = np.matmul(self.basis_eval_test, sol)
                for ind in range(n):
                    test_data = [self.test_data[0], self.test_data[1][:, ind]]
                    output['test_error'][ind] = self.loss_function.test_error(
                        f_eval, test_data)

        if self.basis is not None:
            sol = tensap.FunctionalBasisArray(sol, self.basis, n)

        return sol, output