Example #1
0
    def forward(self, rep):
        """
        Linearly mix a represention.

        Parameters
        ----------
        rep : :obj:`list` of :obj:`torch.Tensor`
            Representation to mix.

        Returns
        -------
        rep : :obj:`list` of :obj:`torch.Tensor`
            Mixed representation.
        """
        if GTau.from_rep(rep) != self.tau_in:
            raise ValueError('Tau of input rep does not match initialized tau!'
                             ' rep: {} tau: {}'.format(GTau.from_rep(rep), self.tau_in))

        return g_torch.mix(self.weights, rep)
Example #2
0
def cg_product(cg_dict,
               rep1,
               rep2,
               maxdim=inf,
               aggregate=False,
               ignore_check=False):
    """
    Explicit function to calculate the Clebsch-Gordan product.
    See the documentation for CGProduct for more information.

    rep1 : list of :obj:`torch.Tensors`
        First :obj:`GVector` in the CG product
    rep2 : list of :obj:`torch.Tensors`
        First :obj:`GVector` in the CG product
    maxdim : :obj:`int`, optional
        Minimum weight to include in CG Product
    aggregate : :obj:`bool`, optional
        Apply an "aggregation" operation, or a pointwise convolution
        with a :obj:`GVector` as a filter.
    cg_dict : :obj:`CGDict`, optional
        Specify a Clebsch-Gordan dictionary. If not specified, one will be
        generated automatically at runtime based upon maxdim.
    ignore_check : :obj:`bool`
        Ignore GVec initialization check. Necessary for current implementation
        of :obj:`zonal_functions`. Use with caution.
    """
    tau1 = GTau.from_rep(rep1)
    tau2 = GTau.from_rep(rep2)

    assert tau1.channels and (
        tau1.channels == tau2.channels
    ), 'The number of fragments must be same for each part! {} {}'.format(
        tau1, tau2)

    maxk1 = max({key[0] for key in rep1.keys()})
    maxn1 = max({key[1] for key in rep1.keys()})
    maxk2 = max({key[0] for key in rep2.keys()})
    maxn2 = max({key[1] for key in rep2.keys()})
    maxDim = min(max(maxk1 + maxk2, maxn1 + maxn2) + 1, maxdim)

    if (cg_dict.maxdim < maxDim) or (cg_dict.maxdim < max(
            maxk1, maxn1, maxk2, maxn2)):
        raise ValueError(
            'CG Dictionary maxdim ({}) not sufficiently large for (maxdim, L1, L2) = ({} {} {})'
            .format(cg_dict.maxdim, maxdim, max1, max2))
    assert (
        cg_dict.transpose), 'This operation uses transposed CG coefficients!'

    new_rep = {}

    for (k1, n1), irrep1 in rep1.items():
        for (k2, n2), irrep2 in rep2.items():
            if max(k1, n1, k2, n2) > maxDim - 1 or irrep1.shape[
                    -2] == 0 or irrep2.shape[-2] == 0:
                continue
            # cg_mat, aka H, is initially a dictionary {(k,n):rectangular matrix},
            # which when flattened/stacked over keys becomes an orthogonal square matrix
            # we create a sorted list of keys first and then stack the rectangular matrices over keys
            cg_mat_keys = [
                (k, n)
                for k in range(abs(k1 - k2), min(maxdim, k1 + k2 + 1), 2)
                for n in range(abs(n1 - n2), min(maxdim, n1 + n2 + 1), 2)
            ]
            cg_mat = torch.cat(
                [cg_dict[((k1, n1), (k2, n2))][key] for key in cg_mat_keys],
                -2)
            # Pairwise tensor multiply parts, loop over atom parts accumulating each.
            irrep_prod = complex_kron_product(irrep1,
                                              irrep2,
                                              aggregate=aggregate)
            # Multiply by the CG matrix, effectively turning the product into stacked irreps. Channels are preserved
            # Have to add a dummy index because matmul acts over the last two dimensions, so the vector dimension on the right needs to be -2
            cg_decomp = torch.squeeze(
                torch.matmul(cg_mat, torch.unsqueeze(irrep_prod, -1)), -1)
            # Split the result into a list of separate irreps
            split = [(k + 1) * (n + 1) for (k, n) in cg_mat_keys]
            cg_decomp = torch.split(cg_decomp, split, dim=-1)
            # Add the irreps to the dictionary entries, first keeping the channel dimension as a list

            for idx, key in enumerate(cg_mat_keys):
                new_rep.setdefault(key, [])
                new_rep[key].append(cg_decomp[idx])
    # at the end concatenate over the channel dimension back into torch tensors

    new_rep = {key: torch.cat(val, dim=-2) for key, val in new_rep.items()}

    # TODO: Rewrite so ignore_check not necessary
    return GVec(new_rep, ignore_check=ignore_check)