Exemple #1
0
def from_scalar(domain):
    """
    Extension from scalar fields to tensor valued fields. 
    """
    indices = [[i, a.idx] for a in domain\
                          for i in range(a.begin, a.end)]
    shape = [domain.size, len(domain.fibers)]
    return sparse.matrix(shape, indices)
Exemple #2
0
def zeta(K, degree):
    """ Zeta transform: automorphism of K[d]. """
    z, chains = [], zeta_chains(K, degree)
    for d in range(0, degree + 1):
        fibers = [[K[d][ca], K[d][cb]] for ca, cb in chains[d]]
        indices = [ij for p in fibers for ij in extend(*p)]
        n = K[d].size
        z += [sparse.matrix((n, n), indices)]
    return z
Exemple #3
0
def restrict(domain, subdomain):
    """
    Restriction matrix. 
    """
    pairs = [[cb, domain.fibers[cb.key]] for cb in subdomain]
    indices = [[cb.begin + i, ca.begin + i]\
                for cb, ca in pairs\
                for i in range(cb.size)]
    return sparse.matrix([subdomain.size, domain.size], indices)
Exemple #4
0
def pullback(domains, f=None, fmap=None):
    """
    Pullback matrix of a map f: A -> B between domain keys.
    """
    A, B = domains
    f = f if callable(f) else lambda x: x
    fmap = fmap if callable(fmap) else lambda ca: lambda x: x
    indices = [ij for ca in A\
                  for ij in pull(ca, B.get(f(ca.key)), fmap(ca))]
    return sparse.matrix([A.size, B.size], indices)
Exemple #5
0
def Condition(K):
    E = K.microstates
    indices = []

    for Ea in K[0]:
        a = Ea.key[-1].list()
        for i in range(len(a)):
            Ei = E[a[i]]
            fi = lambda p: Ei.shape.index(Ea.shape.coords(p)[i])
            indices += op.push(Ei, Ea, fi)

    mat = sparse.matrix([E.size, K[0].size], indices)
    conditioning = topos.Linear([K[0], E], mat)
    return conditioning
Exemple #6
0
    def lift(cls, N, f):
        src, tgt = cls(N, f.src), cls(N, f.tgt)

        if isinstance(f, topos.Linear):
            data = f.data.coalesce()
            idx = data.indices().t()
            val = data.values()

            a, b = f.src.size, f.tgt.size
            ba = torch.tensor([b, a])[None, :]
            indices = torch.cat([idx + n * ba for n in range(N)])
            values = torch.cat([val for n in range(N)])
            mat = sparse.matrix([N * b, N * a], indices, values)

            return topos.Linear([src, tgt], mat, f"[{f.name}]")

        if isinstance(f, topos.Functional):

            def liftf(xs):
                ys = torch.cat(
                    [f(f.src.field(x)).data for x in xs.data.view([N, -1])])
                return tgt.field(ys)

            return topos.Functional([src, tgt], liftf, f'[{f.name}]')
Exemple #7
0
    def __init__(self, keys, shape=None, degree=None, ftype=Fiber):
        """
        Create a sheaf from a dictionary of fiber shapes.
        """
        self.trivial = (shape == None) and not isinstance(keys, dict)

        super().__init__(keys, shape, degree, ftype)

        #--- Trivialise sheaf ---
        if 'scalars' not in self.__dir__():
            if self.trivial:
                self.scalars = self
            elif isinstance(keys, dict):
                self.scalars = self.__class__(keys.keys(), degree=degree)
            else:
                self.scalars = self.__class__(keys)

        #--- From/to scalars ---
        src, J = self.scalars, from_scalar(self)
        extend = Linear([src, self], J, "J")
        sums = Linear([self, src], J.t(), "\u03a3")
        if not self.trivial:
            sizes = extend @ sums @ self.ones()
            means = (1 / sizes) * sums

        #   =   =   Statistics  =   =   =

        #--- Energies / log-likelihoods ---
        _ln = self.map(lambda d: -torch.log(d), "(-ln)")
        _lnT = self.scalars.map(lambda d: -torch.log(d))
        #--- Gibbs states / densities ---
        exp_ = self.map(lambda d: torch.exp(-d), "(e-)")
        #--- Free energy ---
        freenrj = _lnT @ sums @ exp_
        #--- Normalisation ---
        norm = Functional([self], lambda f: f / sums(f), "(1 / \u03a3)")
        gibbs = (norm @ exp_).rename("(e- / \u03a3 e-)")

        #--- Local Fourier transforms
        N = self.size
        FT, iFT = {}, {}
        for a, Ea in self.items():
            if Ea.shape not in FT:
                FT[Ea.shape] = sparse.Fourier(Ea.shape)
                iFT[Ea.shape] = sparse.iFourier(Ea.shape)
        if N:
            FTs = [(Ea, FT[Ea.shape]) for a, Ea in self.items()]
            iFTs = [(Ea, iFT[Ea.shape]) for a, Ea in self.items()]

            ij = torch.cat([(Fa[0] + Ea.begin).t() for Ea, Fa in FTs])
            ji = torch.cat([(Fa[0] + Ea.begin).t() for Ea, Fa in iFTs])
            Fij = torch.cat([Fa[1] for Ea, Fa in FTs])
            Fji = torch.cat([Fa[1] for Ea, Fa in iFTs])

            fft = sparse.matrix([N, N], ij, Fij)
            ifft = sparse.matrix([N, N], ji, Fji)
        else:
            fft = sparse.matrix([0, 0], [])
            ifft = sparse.matrix([0, 0], [])

        self.maps = {
            "id": Linear([self], eye(self.size)),
            "_ln": _ln,
            "exp_": exp_,
            "freenrj": freenrj,
            "normalise": norm,
            "gibbs": gibbs
        }
        if not self.trivial:
            self.maps |= {
                "extend": extend,
                "sums": sums,
                "means": means,
                "fft": Linear([self], fft, name="Fourier"),
                "ifft": Linear([self], ifft, name="Fourier*")
            }
        for k, fk in self.maps.items():
            setattr(self, k, fk)
Exemple #8
0
    def __init__(self, K, shape=2, degree=-1, sort=1, free=True):
        """ Simplicial complex on the nerve of a hypergraph K. """

        #--- Compute Nerve ---
        K = Hypergraph(K) if not isinstance(K, Hypergraph) else K
        self.hypergraph = K
        nerve = K.nerve(degree, sort=sort)
        self.rank = len(nerve) - 1

        #--- Scalar Fields ---
        self.trivial = (shape == None)
        if not self.trivial:
            N  = [Simplicial(Nk, None, degree=k)\
                           for k, Nk in enumerate(nerve)]
            self.scalars = Nerve(*N)

        #--- Local microstates ---
        if type(shape) == int:
            E = lambda i: shape
        elif callable(shape):
            E = lambda i: shape(i)
        elif isinstance(shape, dict):
            E = lambda i: shape[i]

        self.microstates = Sheaf({i: [E(i)] for i in K.vertices().list()})

        self.vertices = self.microstates.scalars

        #--- Nerve fibers ---
        NE = lambda c: Shape(*[E(i) for i in c[-1].list()])
        nerve = [Simplicial(Nk, NE, degree=k)\
                       for k, Nk in enumerate(nerve)]

        super().__init__(*nerve)

        #--- Effective Energy gradient ---

        if self.rank >= 1:
            d0 = coface(self, 0, 0)
            d1 = coface(self, 0, 1)

            def Deff(U):
                return d0 @ U + torch.log(d1 @ torch.exp(-U))

            self.Deff = Functional.map([self[0], self[1]], Deff, "\u018a")

        #--- Spectral decomposition

        Z = Nerve(*(
            Simplicial({a: [n - 1 for n in Ea.shape]
                        for a, Ea in Kd.items()}) for Kd in self.grades))

        self.cocycles = Z

        #--- Cocycle representation (for measures) ---

        resZ = []
        for Zd, Kd in zip(Z.grades, self.grades):
            nat = lambda a: embed_interaction(Zd[a.key], Kd[a.key])
            resZ += [Kd.pull(Zd, None, "Res Z", nat)]
        ResZ = GradedLinear([self, Z], resZ, 0, "Res Z")
        self.res_Z = ResZ
        self.to_cocycle = ResZ @ self.fft
        self.from_cocycle = self.cozeta @ self.ifft @ ResZ.t()

        #--- Coboundary representation (for potentials) ---

        Is = []
        pairs = zeta_chains(self, self.rank)
        for Kd, Zd, pairs_d in zip(self.grades, Z.grades, pairs):
            ij = []
            for a, b in pairs_d:
                Zb, Ka = Zd[b], Kd[a]
                ij += interaction(Zb, Ka)
            mat = sparse.matrix([Zd.size, Kd.size], ij)
            Is += [Linear([Kd, Zd], mat, name="I") @ Kd.fft]

        self.interaction = GradedLinear([self, Z], Is, name="I")
        self.from_interaction = self.ifft @ self.res_Z.t()