def multiply(A:FiniteOp[IntermVecT, OutVecT], B:CombT) -> RkhsObject: # "T = TypeVar("T"); multiply(A:FiniteOp, B:T) -> T" if isinstance(B, FiniteOp): return FiniteOp(B.inp_feat, A.outp_feat, A.matr @ inner(A.inp_feat, B.outp_feat) @ B.matr) else: if len(B) == 1: return FiniteVec.construct_RKHS_Elem(A.outp_feat.k, A.outp_feat.inspace_points, np.squeeze(A.matr @ inner(A.inp_feat, B))) else: pref = A.matr @ inner(A.inp_feat, B) return FiniteVec(A.outp_feat.k, np.tile(A.outp_feat.inspace_points, (pref.shape[1], 1)), np.hstack(pref.T), points_per_split=pref.shape[0])
def __matmul__(self, inp: CombT) -> RkhsObject: if isinstance(inp, FiniteMap): G = inner(self.inp_feat, inp.outp_feat) if not inp.debias_outp: matr = self.matr @ G @ inp.matr inp_bias = (matr @ inp.bias.T).T else: matr = self.matr @ (G - G @ inp.bias.T) @ inp.matr inp_bias = (self.matr @ G @ inp.bias.T).T rval = FiniteMap(inp.inp_feat, self.outp_feat, matr, outp_bias=self.bias + inp_bias) rval.mean_center_inp = inp.mean_center_inp return rval else: if isinstance(inp, DeviceArray): inp = FiniteVec(self.inp_feat.k, np.atleast_2d(inp)) lin_map = (self.matr @ inner(self.inp_feat, inp)).T if self.debias_outp: r = [DecenterOutFeat(lin_map)] else: if self._normalize: lin_map = lin_map / lin_map.sum(1, keepdims=True) r = [LinearReduce(lin_map + self.bias)] if len(inp) == 1: r.append(Sum()) rval = self.outp_feat.extend_reduce(r) return rval
def solve(self, result: FiniteVec): if np.all(self.outp_feat.inspace_points == result.inspace_points): s = np.linalg.solve( self.matr @ inner(self.inp_feat, self.inp_feat), result.prefactors) return FiniteVec.construct_RKHS_Elem(result.k, result.inspace_points, s) else: assert ()
def multiply( A: FiniteOp, B: RkhsObject, copy_tensors=True ) -> RkhsObject: # "T = TypeVar("T"); multiply(A:FiniteOp, B:T) -> T" assert (copy_tensors is False, "copy_tensors == True is not implemented yet") try: return FiniteOp(B.inp_feat, A.outp_feat, A.matr @ inner(A.inp_feat, B.outp_feat) @ B.matr) except AttributeError: if len(B) == 1: #print("len 1") return FiniteVec.construct_RKHS_Elem( A.outp_feat.k, A.outp_feat.inspace_points, np.squeeze(A.matr @ inner(A.inp_feat, B))) else: # print("len "+str(len(B))) pref = A.matr @ inner(A.inp_feat, B) return FiniteVec(A.outp_feat.k, np.tile(A.outp_feat.inspace_points, (pref.shape[1], 1)), np.hstack(pref.T), points_per_split=pref.shape[0])
def __init__(self, cm_op: Cmo[SpVec, FiniteVec], initial_spvec: SpVec, dim_index): assert (len(initial_spvec) == 1) self._inc = (initial_spvec.inspace_points[1:, :dim_index] - initial_spvec.inspace_points[:-1, :dim_index]).mean(0) self._cmo = cm_op self._current_raw = self._cmo.inp_feat._inner_raw(initial_spvec) self._num_obs = initial_spvec.inspace_points.shape[0] self._next_idx = initial_spvec.inspace_points[ -1, :dim_index] + self._inc self._spvec_history = initial_spvec gram = self._cmo.inp_feat._inner_process_raw(self._current_raw) self.current_outp_emb = FiniteVec.construct_RKHS_Elem( self._cmo.outp_feat.k, self._cmo.outp_feat.inspace_points, np.squeeze(self._cmo.matr @ gram))
def solve(self, inp: CombT) -> RkhsObject: """If `inp` is an RKHS vector of length 1 (a mean embedding): Solve the inverse problem to find dP/dρ from equation μ_P = C_ρ dP/dρ where C_ρ is the covariance operator represented by this object (`self`), ρ is the reference distribution, and μ_P is given by `inp`. If `inp` is a `FiniteMap`: Solve the inverse problem to find operator B from equation A = C_ρ B where C_ρ is the covariance operator represented by this object (`self`), and A is given by `inp`. Args: inp (InpVecT): The embedding of the distribution of interest, or the map of interest. """ if isinstance(inp, FiniteMap): reg_inp = inp.outp_feat else: if isinstance(inp, DeviceArray): inp = FiniteVec(self.inp_feat.k, np.atleast_2d(inp)) #assert(len(inp) == 1) reg_inp = inp regul = CovOp.regul(max(reg_inp.nsamps().min(), 1), max(self.inp_feat.nsamps(True), 1)) return (self.inv(regul) @ inp)
def __init__(self, cm_op: Union[Cmo[SpVec, FiniteVec], Cmo[CombVec[SpVec, FiniteVec], FiniteVec]], initial_spvec: SpVec, dim_index: int, idx_ro: RolloutIdx = None): assert len(initial_spvec) == 1 self._cmo = cm_op self._idx_ro = idx_ro if cm_op.inp_feat.__class__ == CombVec: self.uinner = UpdatableSpVecInner(self._cmo.inp_feat.v1, initial_spvec) self.current_outp_emb = self._cmo.outp_feat.sum(True) self.get_embedding = self.__get_embedding_CombVec self.__update_current_outp_emb = lambda: None else: self.uinner = UpdatableSpVecInner(self._cmo.inp_feat, initial_spvec) self.current_outp_emb = FiniteVec.construct_RKHS_Elem( self._cmo.outp_feat.k, self._cmo.outp_feat.inspace_points, np.squeeze(self._cmo.matr @ self.uinner.current_gram)) self.get_embedding = self.__get_embedding_SpVec self.__update_current_outp_emb = self.__update_current_outp_emb_SpVec
def __call__(self, inp: DeviceArray) -> RkhsObject: return self @ FiniteVec(self.inp_feat.k, np.atleast_2d(inp))
def from_Samples(cls, kern, inspace_points, prefactors=None, regul=0.01): return cls(FiniteVec(kern, inspace_points, prefactors), regul=regul)
def __init__(self, inp_feat: FiniteVec, regul=0.01): self.inp_feat = self.outp_feat = self.inp_feat = inp_feat.updated( np.ones(len(inp_feat), dtype=inp_feat.prefactors.dtype)) self.matr = np.diag(inp_feat.prefactors) self._inv = None self.regul = regul