def Lines(grid: np.ndarray, prior: Tuple[np.ndarray] = None) -> Formula: """Collection of lines """ lines = listmap(lambda c: lambda t: (c - t))(grid) prior = ((np.zeros(len(grid)), 1e-6 * np.identity(len(grid))) if prior is None else prior) return Formula(terms=[lines], prior=prior)
def ReLU(grid: np.ndarray, prior: Tuple[np.ndarray] = None) -> Formula: """Rectified linear unit shaped basis """ relus = listmap(lambda c: lambda t: (t > c) * (c - t))(grid[1:-1]) prior = ((np.zeros(len(grid) - 2), 1e-6 * np.identity(len(grid) - 2)) if prior is None else prior) return Formula(terms=[relus], prior=prior)
def FlippedReLU(grid: np.ndarray, prior: Tuple[np.ndarray] = None) -> Formula: """Mirrored ReLU basis """ relus = listmap(lambda c: lambda t: (t < c) * (c - t))(grid[1:-1]) prior = ((np.zeros(len(grid) - 2), 1e-6 * np.identity(len(grid) - 2)) if prior is None else prior) return Formula(terms=[relus], prior=prior)
def Kron(a, b) -> Formula: """Tensor product of two Formula terms Non-commutative! Let ``u, v`` be eigenvectors of matrices ``A, B``, respectively. Then ``u ⊗ v`` is an eigenvector of ``A ⊗ B`` and ``λμ`` is the corresponding eigenvalue. Parameters ---------- a : Formula Left input b : Formula Right input """ # NOTE: This is somewhat experimental. The terms must correspond to # "zero-mean" r.v.. Then Kronecker product of covariances # corresponds to the product r.v. of independent r.v.'s. # Check the formula of variance of product of independent r.v.'s. # TODO / FIXME: Don't flatten a and b. gen = ( # Careful! Must be same order as in a Kronecker product. (f, g) for f in sum(a.terms, []) for g in sum(b.terms, [])) # Outer product of terms basis = listmap(lambda funcs: lambda t: funcs[0](t) * funcs[1](t))(gen) # Kronecker product of prior means and covariances return Formula( terms=[basis], prior=( np.kron(a.prior[0], b.prior[0]), # Although we kron-multiply precision matrices here (inverse # of covariance), the order of inputs doesn't flip because # (A ⊗ B) ^ -1 = (A ^ -1) ⊗ (B ^ -1) np.kron(a.prior[1], b.prior[1])))
def BSpline1d(grid, order=3, extrapolate=True, prior=None, mu_basis=None, mu_hyper=None) -> Formula: """B-spline basis on a fixed one-dimensional grid Number of spline basis functions is always ``N = len(grid) + order - 2`` TODO: Verify that this doesn't break when scaling the grid (extrapolation + damping) Parameters ---------- grid : np.ndarray Discretization grid order : int Order of the spline function. Polynomial degree is ``order - 1`` extrapolate : bool Extrapolate outside of the grid using basis functions "touching" the endpoints prior : Tuple[np.ndarray] Prior mean and precision matrix mu_basis : List[Callable] Basis for estimating the mean hyperparameter mu_hyper : Tuple[np.ndarray] Hyperprior mean and precision matrix """ mu_basis = [] if mu_basis is None else mu_basis grid_ext = utils.extend_spline_grid(grid, order) def build_basis_element(spline_arg): (knots, extrapolate, loc) = spline_arg def right_damp(t): return t > knots[-1] def left_damp(t): return knots[0] > t def element(t): sp_element = interpolate.BSpline.basis_element( knots, extrapolate if loc in (-1, 1) else False) return sp_element(t) if loc == 0 else ( sp_element(t) * right_damp(t) if loc == -1 else sp_element(t) * left_damp(t)) return utils.compose2(np.nan_to_num, element) basis = listmap(build_basis_element)(utils.gen_spline_args_from_grid_ext( grid_ext, order, extrapolate)) # Default prior is white noise prior = ((np.zeros(len(basis)), 1e-6 * np.identity(len(basis))) if prior is None else prior) return Formula(terms=[mu_basis + basis], prior=prior if mu_hyper is None else utils.concat_gaussians( [mu_hyper, prior]))