Exemplo n.º 1
0
def check_S3_quadint_equals_numint(l=1, m=1, n=1, b=10):
    # Create grids on the sphere
    x = S3.meshgrid(b=b, grid_type='SOFT')
    x = np.c_[x[0][..., None], x[1][..., None], x[2][..., None]]

    # Compute quadrature weights
    w = S3.quadrature_weights(b=b, grid_type='SOFT')

    # Define a polynomial function, to be evaluated at one point or at an array of points
    def f1(alpha, beta, gamma):
        df = wigner_D_function(l=l, m=m, n=n, alpha=alpha, beta=beta, gamma=gamma)
        return df * df.conj()

    def f1a(xs):
        d = np.zeros(x.shape[:-1])
        for i in range(d.shape[0]):
            for j in range(d.shape[1]):
                for k in range(d.shape[2]):
                    d[i, j, k] = f1(xs[i, j, k, 0], xs[i, j, k, 1], xs[i, j, k, 2])
        return d

    # Obtain the "true" value of the integral of the function over the sphere, using scipy's numerical integration
    # routines
    i1 = S3.integrate(f1, normalize=True)

    # Compute the integral using the quadrature formulae
    i1_w = S3.integrate_quad(f1a(x), grid_type='SOFT', normalize=True, w=w)

    # Check error
    print(b, l, m, n, 'results:', i1_w, i1, 'diff:', np.abs(i1_w - i1))
    assert np.isclose(np.abs(i1_w - i1), 0.0)
Exemplo n.º 2
0
def _setup_so3_fft(b, nl, weighted):
    from lie_learn.representations.SO3.wigner_d import wigner_d_matrix
    import lie_learn.spaces.S3 as S3
    import numpy as np
    import logging

    betas = (np.arange(2 * b) + 0.5) / (2 * b) * np.pi
    w = S3.quadrature_weights(b)
    assert len(w) == len(betas)

    logging.getLogger("trainer").info("Compute Wigner: b=%d nbeta=%d nl=%d nspec=%d", b, len(betas), nl, nl ** 2)

    dss = []
    for b, beta in enumerate(betas):
        ds = []
        for l in range(nl):
            d = wigner_d_matrix(l, beta,
                                field='complex', normalization='quantum', order='centered', condon_shortley='cs')
            d = d.reshape(((2 * l + 1) ** 2,))

            if weighted:
                d *= w[b]
            else:
                d *= 2 * l + 1

            # d # [m * n]
            ds.append(d)
        ds = np.concatenate(ds)  # [l * m * n]
        dss.append(ds)
    dss = np.stack(dss)  # [beta, l * m * n]
    return dss
Exemplo n.º 3
0
def so3_integrate(x):

	assert tf.size(x)(-1) == tf.size(x)(-2)
	assert tf.size(-2) == tf.size(x)(-3)

	b = tf.size(-1) // 2

	#assigning "w" here difrectly instead of having a separate function with
	#gpu usage
	w = S3.quadrature_weights(b)
	w = tf.cast(w, tf.float32)

	if isinstance(x, tf.variable):
		w = tf.variable(w)


	x = tf.reduce_sum(x, axis=-1).squeeze(-1)
	x = tf.reduce_sum(x, axis=-1).squeeze(-1)

	sz = tf.size(x)
	x = tf.reshape(x, -1, 2*b)
	w = tf.reshape(w, 2*b, 1)
	x = tf.matmul(x, w).squeeze(-1)
	x = x.reshape(x, sz[:-1])

	return x
Exemplo n.º 4
0
def naive_conv(l1=1, m1=1, l2=1, m2=1, g_parameterization='EA313'):
    f1 = lambda t, p: sh(l=l1,
                         m=m1,
                         theta=t,
                         phi=p,
                         field='real',
                         normalization='quantum',
                         condon_shortley=True)
    f2 = lambda t, p: sh(l=l2,
                         m=m2,
                         theta=t,
                         phi=p,
                         field='real',
                         normalization='quantum',
                         condon_shortley=True)

    theta, phi = S2.meshgrid(b=3, grid_type='Gauss-Legendre')
    f1_grid = f1(theta, phi)
    f2_grid = f2(theta, phi)

    alpha, beta, gamma = S3.meshgrid(b=3,
                                     grid_type='SOFT')  # TODO check convention

    f12_grid = np.zeros_like(alpha)
    for i in range(alpha.shape[0]):
        for j in range(alpha.shape[1]):
            for k in range(alpha.shape[2]):
                f12_grid[i, j, k] = naive_S2_conv_v2(f1, f2, alpha[i, j, k],
                                                     beta[i, j, k], gamma[i, j,
                                                                          k],
                                                     g_parameterization)
                print(i, j, k, f12_grid[i, j, k])

    return f1_grid, f2_grid, f12_grid
Exemplo n.º 5
0
def cal_haar_measure_weight(b):
    import lie_learn.spaces.S3 as S3
    w = torch.tensor(S3.quadrature_weights(b))

    if constant.IS_GPU:
        return w.cuda()
    else:
        return w
Exemplo n.º 6
0
    def __init__(self,
                 res=None,
                 lmax=None,
                 normalization='component',
                 lmax_in=None):
        """
        :param res: resolution of the input as a tuple (beta resolution, alpha resolution)
        :param lmax: maximum l of the output
        :param normalization: either 'norm', 'component', 'none' or custom
        :param lmax_in: maximum l of the input of ToS2Grid in order to be the inverse
        """
        super().__init__()

        assert normalization in [
            'norm', 'component', 'none'
        ] or torch.is_tensor(
            normalization
        ), "normalization needs to be 'norm', 'component' or 'none'"

        if isinstance(res, int) or res is None:
            lmax, res_beta, res_alpha = complete_lmax_res(lmax, res, None)
        else:
            lmax, res_beta, res_alpha = complete_lmax_res(lmax, *res)

        if lmax_in is None:
            lmax_in = lmax

        betas, alphas, shb, sha = spherical_harmonics_s2_grid(
            lmax, res_beta, res_alpha)

        with torch_default_dtype(torch.float64):
            # normalize such that it is the inverse of ToS2Grid
            if normalization == 'component':
                n = math.sqrt(4 * math.pi) * torch.tensor(
                    [math.sqrt(2 * l + 1)
                     for l in range(lmax + 1)]) * math.sqrt(lmax_in + 1)
            if normalization == 'norm':
                n = math.sqrt(
                    4 * math.pi) * torch.ones(lmax + 1) * math.sqrt(lmax_in +
                                                                    1)
            if normalization == 'none':
                n = 4 * math.pi * torch.ones(lmax + 1)
            if torch.is_tensor(normalization):
                n = normalization.to(dtype=torch.float64)
            m = rsh.spherical_harmonics_expand_matrix(range(lmax +
                                                            1))  # [l, m, i]
            assert res_beta % 2 == 0
            qw = torch.tensor(S3.quadrature_weights(
                res_beta // 2)) * res_beta**2 / res_alpha  # [b]
        shb = torch.einsum('lmj,bj,lmi,l,b->mbi', m, shb, m, n,
                           qw)  # [m, b, i]

        self.register_buffer('alphas', alphas)
        self.register_buffer('betas', betas)
        self.register_buffer('sha', sha)
        self.register_buffer('shb', shb)
        self.to(torch.get_default_dtype())
Exemplo n.º 7
0
def setup_so3_integrate(b, cuda_device):
    import lie_learn.spaces.S3 as S3

    w = S3.quadrature_weights(b)  # (2b) [beta]

    w = torch.FloatTensor(w)

    if cuda_device is not None:
        w = w.cuda(cuda_device)

    return w
Exemplo n.º 8
0
def test_so3_rfft(b_in, b_out, device):
    x = torch.randn(2 * b_in, 2 * b_in, 2 * b_in, dtype=torch.float, device=device)  # [beta, alpha, gamma]

    from s2cnn.soft.so3_fft import so3_rfft
    y1 = so3_rfft(x, b_out=b_out)

    from s2cnn import so3_rft, so3_soft_grid
    import lie_learn.spaces.S3 as S3

    # so3_ft computes a non weighted Fourier transform
    weights = torch.tensor(S3.quadrature_weights(b_in), dtype=torch.float, device=device)
    x2 = torch.einsum("bac,b->bac", (x, weights))

    y2 = so3_rft(x2.view(-1), b_out, so3_soft_grid(b_in))
    assert (y1 - y2).abs().max().item() < 1e-4 * y1.abs().mean().item() 
Exemplo n.º 9
0
    def __init__(self, L_max, d=None, w=None, L2_normalized=True,
                 field='complex', normalization='quantum', order='centered', condon_shortley='cs'):

        super().__init__()

        if d is None:
            self.d = setup_d_transform(
                b=L_max + 1, L2_normalized=L2_normalized,
                field=field, normalization=normalization,
                order=order, condon_shortley=condon_shortley)
        else:
            self.d = d

        if w is None:
            self.w = S3.quadrature_weights(b=L_max + 1)
        else:
            self.w = w

        self.wd = weigh_wigner_d(self.d, self.w)
Exemplo n.º 10
0
def s2_integrate(x):
    """
    Integrate a signal on SO(3) using the Haar measure
    """
    device_type = x.device.type
    device_index = x.device.index

    b = x.size(-2) // 2
    w = torch.tensor(S3.quadrature_weights(b),
                     dtype=torch.float32,
                     device=torch.device(device_type, device_index))

    x = torch.sum(x, dim=-1).squeeze(-1)

    sz = x.size()
    x = x.view(-1, 2 * b)
    w = w.view(2 * b, 1)
    x = torch.mm(x, w).squeeze(-1)
    x = x.view(*sz[:-1])
    return x
Exemplo n.º 11
0
def compare_naive_and_spectral_conv():

    f1 = lambda t, p: sh(l=2,
                         m=1,
                         theta=t,
                         phi=p,
                         field='real',
                         normalization='quantum',
                         condon_shortley=True)
    f2 = lambda t, p: sh(l=2,
                         m=1,
                         theta=t,
                         phi=p,
                         field='real',
                         normalization='quantum',
                         condon_shortley=True)

    theta, phi = S2.meshgrid(b=4, grid_type='Gauss-Legendre')
    f1_grid = f1(theta, phi)
    f2_grid = f2(theta, phi)

    alpha, beta, gamma = S3.meshgrid(b=4,
                                     grid_type='SOFT')  # TODO check convention

    f12_grid_spectral = spectral_S2_conv(f1_grid,
                                         f2_grid,
                                         s2_fft=None,
                                         so3_fft=None)

    f12_grid = np.zeros_like(alpha)
    for i in range(alpha.shape[0]):
        for j in range(alpha.shape[1]):
            for k in range(alpha.shape[2]):
                f12_grid[i, j, k] = naive_S2_conv(f1, f2, alpha[i, j, k],
                                                  beta[i, j, k], gamma[i, j,
                                                                       k])
                print(i, j, k, f12_grid[i, j, k])

    return f1_grid, f2_grid, f12_grid, f12_grid_spectral
Exemplo n.º 12
0
    def __init__(self, L_max, field='complex', normalization='quantum', order='centered', condon_shortley='cs'):

        super().__init__()
        # TODO allow user to specify the grid (now using SOFT implicitly)

        # Explicitly construct the Wigner-D matrices evaluated at each point in a grid in SO(3)
        self.D = []
        b = L_max + 1
        for l in range(b):
            self.D.append(np.zeros((2 * b, 2 * b, 2 * b, 2 * l + 1, 2 * l + 1),
                                   dtype=complex if field == 'complex' else float))

            for j1 in range(2 * b):
                alpha = 2 * np.pi * j1 / (2. * b)
                for k in range(2 * b):
                    beta = np.pi * (2 * k + 1) / (4. * b)
                    for j2 in range(2 * b):
                        gamma = 2 * np.pi * j2 / (2. * b)
                        self.D[-1][j1, k, j2, :, :] = wigner_D_matrix(l, alpha, beta, gamma,
                                                                      field, normalization, order, condon_shortley)

        # Compute quadrature weights
        self.w = S3.quadrature_weights(b=b, grid_type='SOFT')

        # Stack D into a single Fourier matrix
        # The first axis corresponds to the spatial samples.
        # The spatial grid has shape (2b, 2b, 2b), so this axis has length (2b)^3.
        # The second axis of this matrix has length sum_{l=0}^L_max (2l+1)^2,
        # which corresponds to all the spectral coefficients flattened into a vector.
        # (normally these are stored as matrices D^l of shape (2l+1)x(2l+1))
        self.F = np.hstack([self.D[l].reshape((2 * b) ** 3, (2 * l + 1) ** 2) for l in range(b)])

        # For the IFFT / synthesis transform, we need to weight the order-l Fourier coefficients by (2l + 1)
        # Here we precompute these coefficients.
        ls = [[ls] * (2 * ls + 1) ** 2 for ls in range(b)]
        ls = np.array([ll for sublist in ls for ll in sublist])  # (0,) + 9 * (1,) + 25 * (2,), ...
        self.l_weights = 2 * ls + 1
Exemplo n.º 13
0
def _setup_so3_integrate(b, device_type, device_index):
    import lie_learn.spaces.S3 as S3

    return torch.tensor(S3.quadrature_weights(b), dtype=torch.float32, device=torch.device(device_type, device_index))  # (2b) [beta]  # pylint: disable=E1102
Exemplo n.º 14
0
Compare so3_ft with so3_fft
'''
import torch

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

b_in, b_out = 6, 6  # bandwidth
# random input data to be Fourier Transform
x = torch.randn(2 * b_in, 2 * b_in, 2 * b_in, dtype=torch.float,
                device=device)  # [beta, alpha, gamma]

# Fast version
from s2cnn.soft.so3_fft import so3_rfft

y1 = so3_rfft(x, b_out=b_out)

# Equivalent version but using the naive version
from s2cnn import so3_rft, so3_soft_grid
import lie_learn.spaces.S3 as S3

# so3_ft computes a non weighted Fourier transform
weights = torch.tensor(S3.quadrature_weights(b_in),
                       dtype=torch.float,
                       device=device)
x = torch.einsum("bac,b->bac", (x, weights))

y2 = so3_rft(x.view(-1), b_out, so3_soft_grid(b_in))

# Compare values
assert (y1 - y2).abs().max().item() < 1e-4 * y1.abs().mean().item()
Exemplo n.º 15
0
'''
Compare so3_ft with so3_fft
'''
import torch


b_in, b_out = 6, 6  # bandwidth
# random input data to be Fourier Transform
x = torch.randn(2 * b_in, 2 * b_in, 2 * b_in, dtype=torch.float, device="cuda")  # [beta, alpha, gamma]


# Fast version
from s2cnn.soft.gpu.so3_fft import so3_rfft

y1 = so3_rfft(x, b_out=b_out)


# Equivalent version but using the naive version
from s2cnn import so3_rft, so3_soft_grid
import lie_learn.spaces.S3 as S3

# so3_ft computes a non weighted Fourier transform
weights = torch.tensor(S3.quadrature_weights(b_in), dtype=torch.float, device="cuda")
x = torch.einsum("bac,b->bac", (x, weights))

y2 = so3_rft(x.view(-1), b_out, so3_soft_grid(b_in))


# Compare values
assert (y1 - y2).abs().max().item() < 1e-4 * y1.abs().mean().item()
Exemplo n.º 16
0
def weighted_d(b):
    d = setup_d_transform(b, L2_normalized=False)
    w = S3.quadrature_weights(b, grid_type='SOFT')
    return weigh_wigner_d(d, w)