Ejemplo n.º 1
0
    def set_taus(self, tau1=None, tau2=None):
        self._tau1 = GTau(tau1) if tau1 else None
        self._tau2 = GTau(tau2) if tau2 else None

        if self._tau1 and self._tau2:
            if not self.tau1.channels or (self.tau1.channels !=
                                          self.tau2.channels):
                raise ValueError(
                    'The number of fragments must be same for each part! '
                    '{} {}'.format(self.tau1, self.tau2))
Ejemplo n.º 2
0
    def __init__(self, tau_in, tau_out, real=False, weight_init='randn', gain=1, device=None, dtype=None):
        super().__init__(device=device, dtype=dtype)
        tau_in = GTau(tau_in)

        #remove any irreps with zero multiplicity
        tau_in = {key: val for key, val in tau_in.items() if val}
        # Allow one to set the output tau to a pre-specified number of output channels.
        tau_out = GTau(tau_out) if type(tau_out) is not int else tau_out
        if type(tau_out) is int:
            tau_out = {key: tau_out for key, val in tau_in.items() if val}

        self.tau_in = GTau(tau_in)
        self.tau_out = GTau(tau_out)
        self.real = real

        if weight_init == 'randn':
            weights = GWeight.randn(self.tau_in, self.tau_out, device=device, dtype=dtype)
        elif weight_init == 'rand':
            weights = GWeight.rand(self.tau_in, self.tau_out, device=device, dtype=dtype)
            weights = 2 * weights - 1
        else:
            raise NotImplementedError('weight_init can only be randn or rand for now')

        #multiply by gain
        gain = GScalar({key: torch.tensor([gain / max(shape) / (10 ** key[0] if key[0] == key[1] else 1), 0], device=device, dtype=dtype).view([2,1,1]) for key, shape in weights.shapes.items()})
        weights = gain * weights

        self.weights = weights.as_parameter()
Ejemplo n.º 3
0
    def forward(self, rep):
        """
        Linearly mix a represention.

        Parameters
        ----------
        rep : :obj:`list` of :obj:`torch.Tensor`
            Representation to mix.

        Returns
        -------
        rep : :obj:`list` of :obj:`torch.Tensor`
            Mixed representation.
        """
        if GTau.from_rep(rep) != self.tau_in:
            raise ValueError('Tau of input rep does not match initialized tau!'
                             ' rep: {} tau: {}'.format(GTau.from_rep(rep), self.tau_in))

        return g_torch.mix(self.weights, rep)
Ejemplo n.º 4
0
    def __init__(self, taus_in, maxdim=None):
        super().__init__()

        self.taus_in = taus_in = [GTau(tau) for tau in taus_in if tau]

        if maxdim is None:
            maxdim = max(sum(dict(i for tau in taus_in for i in tau.items()), ())) + 1
        self.maxdim = maxdim

        self.taus_in = taus_in
        self.tau_out = {}
        for tau in taus_in:
            for key, val in tau.items():
                if val > 0:
                    if max(key) <= maxdim - 1:
                        self.tau_out.setdefault(key, 0)
                        self.tau_out[key] += val
        self.tau_out = GTau(self.tau_out)

        self.all_keys = list(self.tau_out.keys())
Ejemplo n.º 5
0
    def __init__(self, taus_in, tau_out, maxdim=None,
                 real=False, weight_init='randn', gain=1,
                 device=None, dtype=None):
        super().__init__(device=device, dtype=dtype)

        self.cat_reps = CatReps(taus_in, maxdim=maxdim)
        self.mix_reps = MixReps(self.cat_reps.tau, tau_out,
                                real=real, weight_init=weight_init, gain=gain,
                                device=device, dtype=dtype)

        self.taus_in = taus_in
        self.tau_out = GTau(self.mix_reps.tau)
Ejemplo n.º 6
0
def cg_product_tau(tau1, tau2, maxdim=inf):
    """
    Calulate output multiplicity of the CG Product of two G Vectors
    given the multiplicty of two input G Vectors.

    Parameters
    ----------
    tau1 : :class:`list` of :class:`int`, :class:`GTau`.
        Multiplicity of first representation.

    tau2 : :class:`list` of :class:`int`, :class:`GTau`.
        Multiplicity of second representation.

    maxdim : :class:`int`
        Largest weight to include in CG Product.

    Return
    ------

    tau : :class:`GTau`
        Multiplicity of output representation.

    """
    tau1 = GTau(tau1)
    tau2 = GTau(tau2)
    tau = {}

    for (k1, n1) in tau1.keys():
        for (k2, n2) in tau2.keys():
            if max(k1, n1, k2, n2) >= maxdim:
                continue
            kmin, kmax = abs(k1 - k2), min(k1 + k2, maxdim - 1)
            nmin, nmax = abs(n1 - n2), min(n1 + n2, maxdim - 1)
            for k in range(kmin, kmax + 1, 2):
                for n in range(nmin, nmax + 1, 2):
                    tau.setdefault((k, n), 0)
                    tau[(k, n)] += tau1[(k1, n1)] * tau2[(k2, n2)]

    return GTau(tau)
from lgn.models.autotest.lgn_tests import _gen_rot
device = torch.device('cpu')
dtype = torch.float

from colorama import Fore, Back, Style
"""
Script checking covariance of the CGProduct of two GVec's. Can specify batch size and representation types of the vectors below. 
Simply run this file with no arguments to see the results.
"""

##### INPUTS

nbatch = 1  # number of batches (will be equal for both representations)
natoms = 1  # number of atoms in each batch (will be equal for both representations)
nchan = 1  # number of channels in each irrep of each representation (must be uniform because of our restrictions)
tau1 = GTau({(1, 1): nchan
             })  # the representation types of the two vectors to be multiplied
tau2 = GTau({(1, 1): nchan})
aggregate = True  # whether you want to aggregate. This creates rep1 of batch dimension (nbatch, natoms, natoms) and rep2 of (nbatch, natoms) and then sums over one pair of atom indices.

(alpha, beta, gamma) = (1 + 2j, 1 + 3j, 1 + 1j
                        )  # Complex Euler angles to rotate by

accuracy = 1e-4  # absolute error up to which answers should match

############################################################## TEST

maxk1 = max({key[0] for key in tau1.keys()})
maxn1 = max({key[1] for key in tau1.keys()})
maxk2 = max({key[0] for key in tau2.keys()})
maxn2 = max({key[1] for key in tau2.keys()})
maxdim = max(maxk1 + maxk2, maxn1 + maxn2) + 1
Ejemplo n.º 8
0
 def tau(self):
     return GTau({(0, 0): self.channels_out})
Ejemplo n.º 9
0
    def __init__(self,
                 maxdim,
                 max_zf,
                 num_cg_levels,
                 num_channels,
                 weight_init,
                 level_gain,
                 num_basis_fn,
                 top,
                 input,
                 num_mpnn_layers,
                 activation='leakyrelu',
                 pmu_in=False,
                 add_beams=False,
                 scale=1,
                 full_scalars=False,
                 mlp=True,
                 mlp_depth=None,
                 mlp_width=None,
                 device=torch.device('cpu'),
                 dtype=None,
                 cg_dict=None):

        logging.info('Initializing network!')
        level_gain = expand_var_list(level_gain, num_cg_levels)

        maxdim = expand_var_list(maxdim, num_cg_levels)
        max_zf = expand_var_list(max_zf, num_cg_levels)
        num_channels = expand_var_list(num_channels, num_cg_levels + 1)

        logging.info('maxdim: {}'.format(maxdim))
        logging.info('max_zf: {}'.format(max_zf))
        logging.info('num_channels: {}'.format(num_channels))

        super().__init__(maxdim=max(maxdim + max_zf),
                         device=device,
                         dtype=dtype,
                         cg_dict=cg_dict)
        device, dtype, cg_dict = self.device, self.dtype, self.cg_dict

        logging.info('CGdict maxdim: {}'.format(cg_dict.maxdim))

        self.num_cg_levels = num_cg_levels
        self.num_channels = num_channels
        self.scale = scale
        self.full_scalars = full_scalars
        self.pmu_in = pmu_in

        # Set up spherical harmonics
        if pmu_in:
            self.zonal_fns_in = ZonalFunctions(max(max_zf),
                                               device=device,
                                               dtype=dtype,
                                               cg_dict=cg_dict)
        self.zonal_fns = ZonalFunctionsRel(max(max_zf),
                                           device=device,
                                           dtype=dtype,
                                           cg_dict=cg_dict)

        # Set up position functions, now independent of spherical harmonics
        self.rad_funcs = RadialFilters(max_zf,
                                       num_basis_fn,
                                       num_channels,
                                       num_cg_levels,
                                       device=self.device,
                                       dtype=self.dtype)
        tau_pos = self.rad_funcs.tau

        if num_cg_levels:
            if add_beams:
                num_scalars_in = 2
            else:
                num_scalars_in = 1
        else:
            num_scalars_in = 202  # the second number should match the number of atoms (including beams)

        num_scalars_out = num_channels[0]

        if not pmu_in:
            self.input_func_atom = InputLinear(num_scalars_in,
                                               num_scalars_out,
                                               device=self.device,
                                               dtype=self.dtype)
        else:
            self.input_func_atom = MixReps(GTau({
                **{
                    (0, 0): num_scalars_in
                },
                **{(l, l): 1
                   for l in range(1, max_zf[0] + 1)}
            }),
                                           GTau({(l, l): num_scalars_out
                                                 for l in range(max_zf[0] + 1)
                                                 }),
                                           device=self.device,
                                           dtype=self.dtype)

        tau_in_atom = self.input_func_atom.tau

        self.lgn_cg = LGNCG(maxdim,
                            max_zf,
                            tau_in_atom,
                            tau_pos,
                            num_cg_levels,
                            num_channels,
                            level_gain,
                            weight_init,
                            mlp=mlp,
                            mlp_depth=mlp_depth,
                            mlp_width=mlp_width,
                            activation=activation,
                            device=self.device,
                            dtype=self.dtype,
                            cg_dict=self.cg_dict)

        tau_cg_levels_atom = self.lgn_cg.tau_levels_atom

        self.get_scalars_atom = GetScalarsAtom(tau_cg_levels_atom,
                                               device=self.device,
                                               dtype=self.dtype)

        num_scalars_atom = self.get_scalars_atom.num_scalars

        if top.lower().startswith('lin'):
            self.output_layer_atom = OutputLinear(num_scalars_atom,
                                                  bias=True,
                                                  device=self.device,
                                                  dtype=self.dtype)
        elif top.lower().startswith('pmlp'):
            self.output_layer_atom = OutputPMLP(num_scalars_atom,
                                                num_mixed=mlp_width,
                                                device=self.device,
                                                dtype=self.dtype)

        logging.info('Model initialized. Number of parameters: {}'.format(
            sum(p.nelement() for p in self.parameters())))
Ejemplo n.º 10
0
class CatReps(Module):
    """
    Module to concanteate a list of reps. Specify input type for error checking
    and to allow network to fit into main architecture.

    Parameters
    ----------
    taus_in : :obj:`list` of :obj:`GTau` or compatible.
        List of taus of input reps.
    maxdim : :obj:`bool`, optional
        Maximum weight to include in concatenation.
    """
    def __init__(self, taus_in, maxdim=None):
        super().__init__()

        self.taus_in = taus_in = [GTau(tau) for tau in taus_in if tau]

        if maxdim is None:
            maxdim = max(sum(dict(i for tau in taus_in for i in tau.items()), ())) + 1
        self.maxdim = maxdim

        self.taus_in = taus_in
        self.tau_out = {}
        for tau in taus_in:
            for key, val in tau.items():
                if val > 0:
                    if max(key) <= maxdim - 1:
                        self.tau_out.setdefault(key, 0)
                        self.tau_out[key] += val
        self.tau_out = GTau(self.tau_out)

        self.all_keys = list(self.tau_out.keys())

    def forward(self, reps):
        """
        Concatenate a list of reps

        Parameters
        ----------
        reps : :obj:`list` of :obj:`GTensor` subclasses
            List of representations to concatenate.

        Returns
        -------
        reps_cat : :obj:`list` of :obj:`torch.Tensor`
        """
        # Drop Nones
        reps = [rep for rep in reps if rep is not None]

        # Error checking
        reps_taus_in = [rep.tau for rep in reps]
        if reps_taus_in != self.taus_in:
            raise ValueError('Tau of input reps does not match predefined version!'
                             'got: {} expected: {}'.format(reps_taus_in, self.taus_in))

        if self.maxdim is not None:
            reps = [rep.truncate(self.maxdim) for rep in reps]

        return g_torch.cat(reps)

    @property
    def tau(self):
        return self.tau_out
Ejemplo n.º 11
0
 def tau(self):
     return GTau([])
Ejemplo n.º 12
0
def cg_product(cg_dict,
               rep1,
               rep2,
               maxdim=inf,
               aggregate=False,
               ignore_check=False):
    """
    Explicit function to calculate the Clebsch-Gordan product.
    See the documentation for CGProduct for more information.

    rep1 : list of :obj:`torch.Tensors`
        First :obj:`GVector` in the CG product
    rep2 : list of :obj:`torch.Tensors`
        First :obj:`GVector` in the CG product
    maxdim : :obj:`int`, optional
        Minimum weight to include in CG Product
    aggregate : :obj:`bool`, optional
        Apply an "aggregation" operation, or a pointwise convolution
        with a :obj:`GVector` as a filter.
    cg_dict : :obj:`CGDict`, optional
        Specify a Clebsch-Gordan dictionary. If not specified, one will be
        generated automatically at runtime based upon maxdim.
    ignore_check : :obj:`bool`
        Ignore GVec initialization check. Necessary for current implementation
        of :obj:`zonal_functions`. Use with caution.
    """
    tau1 = GTau.from_rep(rep1)
    tau2 = GTau.from_rep(rep2)

    assert tau1.channels and (
        tau1.channels == tau2.channels
    ), 'The number of fragments must be same for each part! {} {}'.format(
        tau1, tau2)

    maxk1 = max({key[0] for key in rep1.keys()})
    maxn1 = max({key[1] for key in rep1.keys()})
    maxk2 = max({key[0] for key in rep2.keys()})
    maxn2 = max({key[1] for key in rep2.keys()})
    maxDim = min(max(maxk1 + maxk2, maxn1 + maxn2) + 1, maxdim)

    if (cg_dict.maxdim < maxDim) or (cg_dict.maxdim < max(
            maxk1, maxn1, maxk2, maxn2)):
        raise ValueError(
            'CG Dictionary maxdim ({}) not sufficiently large for (maxdim, L1, L2) = ({} {} {})'
            .format(cg_dict.maxdim, maxdim, max1, max2))
    assert (
        cg_dict.transpose), 'This operation uses transposed CG coefficients!'

    new_rep = {}

    for (k1, n1), irrep1 in rep1.items():
        for (k2, n2), irrep2 in rep2.items():
            if max(k1, n1, k2, n2) > maxDim - 1 or irrep1.shape[
                    -2] == 0 or irrep2.shape[-2] == 0:
                continue
            # cg_mat, aka H, is initially a dictionary {(k,n):rectangular matrix},
            # which when flattened/stacked over keys becomes an orthogonal square matrix
            # we create a sorted list of keys first and then stack the rectangular matrices over keys
            cg_mat_keys = [
                (k, n)
                for k in range(abs(k1 - k2), min(maxdim, k1 + k2 + 1), 2)
                for n in range(abs(n1 - n2), min(maxdim, n1 + n2 + 1), 2)
            ]
            cg_mat = torch.cat(
                [cg_dict[((k1, n1), (k2, n2))][key] for key in cg_mat_keys],
                -2)
            # Pairwise tensor multiply parts, loop over atom parts accumulating each.
            irrep_prod = complex_kron_product(irrep1,
                                              irrep2,
                                              aggregate=aggregate)
            # Multiply by the CG matrix, effectively turning the product into stacked irreps. Channels are preserved
            # Have to add a dummy index because matmul acts over the last two dimensions, so the vector dimension on the right needs to be -2
            cg_decomp = torch.squeeze(
                torch.matmul(cg_mat, torch.unsqueeze(irrep_prod, -1)), -1)
            # Split the result into a list of separate irreps
            split = [(k + 1) * (n + 1) for (k, n) in cg_mat_keys]
            cg_decomp = torch.split(cg_decomp, split, dim=-1)
            # Add the irreps to the dictionary entries, first keeping the channel dimension as a list

            for idx, key in enumerate(cg_mat_keys):
                new_rep.setdefault(key, [])
                new_rep[key].append(cg_decomp[idx])
    # at the end concatenate over the channel dimension back into torch tensors

    new_rep = {key: torch.cat(val, dim=-2) for key, val in new_rep.items()}

    # TODO: Rewrite so ignore_check not necessary
    return GVec(new_rep, ignore_check=ignore_check)