예제 #1
0
def test_heterosoap():
    import torch
    from theforce.descriptor.cutoff import PolyCut

    xyz = (torch.rand(10, 3) - 0.5) * 5
    xyz.requires_grad = True
    s = HeteroSoap(7, 5, PolyCut(8.0), [10, 18], flatten=False)
    numbers = torch.tensor(4 * [10] + 6 * [18])
    p, dp = s(xyz, numbers)
    p.sum().backward()
    print('fits gradients calculated by autograd: {}'.format(
        xyz.grad.allclose(dp.sum(dim=(0, 1, 2, 3, 4)))))

    ss = RealSeriesSoap(7, 5, PolyCut(8.0))
    pp, dpp = ss(xyz[:4])
    print('HeteroSoap == RealSeriesSoap: {}'.format(pp.allclose(p[0, 0])))
    print('HeteroSoap == RealSeriesSoap: {}'.format(
        dpp.allclose(dp[0, 0, :, :, :, :4])))
    pp, dpp = ss(xyz[4:])
    print('HeteroSoap == RealSeriesSoap: {}'.format(pp.allclose(p[1, 1])))
    print('HeteroSoap == RealSeriesSoap: {}'.format(
        dpp.allclose(dp[1, 1, :, :, :, 4:])))

    # reshape
    s = HeteroSoap(2, 2, PolyCut(8.0), [10, 18], atomic_unit=1., flatten=True)
    p, dp = s(xyz, numbers)
    print('checking dimensions: dim={}, shape={}, grad-shape={}'.format(
        s.dim, p.shape, dp.shape))
예제 #2
0
def test_multisoap():
    from theforce.descriptor.cutoff import PolyCut
    from torch import tensor
    xyz = torch.tensor([[0.175, 0.884, -0.87, 0.354, -0.082, 3.1],
                        [-0.791, 0.116, 0.19, -0.832, 0.184, 0.],
                        [0.387, 0.761, 0.655, -0.528, 0.973, 0.]]).t()
    xyz = xyz * 3
    cutoff = 3.0 * 3
    xyz.requires_grad = True

    soaps = [
        TailoredSoap(RealSeriesSoap(2, 2, PolyCut(cutoff))),
        TailoredSoap(RealSeriesSoap(3, 2, PolyCut(cutoff)))
    ]
    ms = NormalizedSoap(ScaledSoap(MultiSoap(soaps)))

    masks = [xyz[:, 0] >= 0., xyz[:, 0] < 0.]
    a, b = ms(xyz, masks)
    a.sum().backward()
    err = (xyz.grad - b.sum(dim=0)).abs().max()
    test = xyz.grad.allclose(b.sum(dim=0))
    assert ms.dim == a.size(0)
    assert ms.state == eval(ms.state).state
    print('MultiSoap: grads are consistent with autograd: {} ({})'.format(
        test, err))
예제 #3
0
 def __init__(self, lmax, cutoff):
     self.ylm = Ylm(lmax)
     self.radial = PolyCut(cutoff)
     one = torch.ones(lmax + 1, lmax + 1)
     self.Yr = 2 * torch.tril(one) - torch.eye(lmax + 1)
     self.Yi = 2 * torch.triu(one, diagonal=1)
     self.coeff = 4 * pi / torch.arange(0, lmax + 1).mul(2).add(1.)
예제 #4
0
def example():
    from theforce.descriptor.cutoff import PolyCut

    lengthscale = 2.
    cutoff = 8.
    xyz = torch.tensor([[1., 0, 0], [-1., 0, 0], [0, 1., 0], [0, -1., 0],
                        [0, 0, 1.], [0, 0, -1.]]) * lengthscale
    xyz.requires_grad = True
    s = SeriesSoap(2, 2, PolyCut(cutoff), normalize=True)
    p, dp = s(xyz)
    print(p)
예제 #5
0
def test_speed(N=100):
    from theforce.descriptor.cutoff import PolyCut
    import time
    s = AbsSeriesSoap(5, 5, PolyCut(3.0))
    start = time.time()
    for _ in range(N):
        xyz = torch.rand(30, 3)
        p = s(xyz)
    finish = time.time()
    delta = (finish - start) / N
    print("speed of {}: {} sec".format(s.state, delta))
예제 #6
0
def test_validity():
    import torch
    from theforce.descriptor.cutoff import PolyCut

    xyz = torch.tensor([[0.175, 0.884, -0.87, 0.354, -0.082, 3.1],
                        [-0.791, 0.116, 0.19, -0.832, 0.184, 0.],
                        [0.387, 0.761, 0.655, -0.528, 0.973, 0.]]).t()
    xyz.requires_grad = True

    target = torch.tensor([[[0.36174603, 0.39013356, 0.43448023],
                            [0.39013356, 0.42074877, 0.46857549],
                            [0.43448023, 0.46857549, 0.5218387]],
                           [[0.2906253, 0.30558356, 0.33600938],
                            [0.30558356, 0.3246583, 0.36077952],
                            [0.33600938, 0.36077952, 0.40524778]],
                           [[0.16241845, 0.18307552, 0.20443194],
                            [0.18307552, 0.22340802, 0.26811937],
                            [0.20443194, 0.26811937, 0.34109511]]])

    s = AbsSeriesSoap(2, 2, PolyCut(3.0))
    p, dp = s(xyz)
    p = p.permute(2, 0, 1)
    print('fits pre-calculated values: {}'.format(p.allclose(target)))

    p.sum().backward()
    print('fits gradients calculated by autograd: {}'.format(
        xyz.grad.allclose(dp.sum(dim=(0, 1, 2)))))

    # test with normalization turned on
    s = SeriesSoap(3, 7, PolyCut(3.0), normalize=True)
    xyz.grad *= 0
    p, dp = s(xyz)
    p.sum().backward()
    print('fits gradients calculated by autograd (normalize=True):{}'.format(
        xyz.grad.allclose(dp.sum(dim=(0)))))

    assert s.state == eval(s.state).state

    # test if works with empty tensors
    s(torch.rand(0, 3))
예제 #7
0
def test_units():
    from theforce.descriptor.cutoff import PolyCut
    xyz = torch.tensor([[0.175, 0.884, -0.87, 0.354, -0.082, 3.1],
                        [-0.791, 0.116, 0.19, -0.832, 0.184, 0.],
                        [0.387, 0.761, 0.655, -0.528, 0.973, 0.]]).t()
    xyz = xyz * 3
    cutoff = 3.0 * 3
    xyz.requires_grad = True

    s = SeriesSoap(3, 3, PolyCut(cutoff), normalize=True)
    p, dp = s(xyz)
    p.sum().backward()
    print('grads are consistent with larger length scale: {}'.format(
        xyz.grad.allclose(dp.sum(dim=(0)))))
예제 #8
0
def default_kernel(numbers, cutoff=6., au=None, exponent=4, lmax=3, nmax=3, noise=0.01):
    from theforce.regression.gppotential import GaussianProcessPotential
    from theforce.similarity.heterosoap import HeterogeneousSoapKernel as SOAP
    from theforce.descriptor.cutoff import PolyCut
    from theforce.regression.kernel import White, Positive, DotProd
    from theforce.util.util import date
    kerns = [SOAP(DotProd()**exponent, a, numbers, lmax, nmax,
                  PolyCut(cutoff), atomic_unit=au(a) if au else default_au(a))
             for a in numbers]
    gp = GaussianProcessPotential(
        kerns, noise=White(signal=noise, requires_grad=False))
    with open('gp.chp', 'a') as f:
        f.write(f'# {date()}\n{gp}\n')
    return gp
예제 #9
0
def example_optim():
    from theforce.descriptor.cutoff import PolyCut
    cut = 1.63
    d = torch.linspace(0.1, cut*1.3, 50).view(-1, 1)
    Y = (3.7/d**2.4)*(1-d/cut)**2
    fac = Product(ParamedRepulsiveCore(), PolyCut(cut))
    optimizer = torch.optim.Adam([{'params': fac.params}], lr=0.5)

    for _ in range(1000):
        optimizer.zero_grad()
        a, b = fac(d)
        loss = ((a-Y)**2).sum()
        loss.backward()
        optimizer.step()

    print(fac.state)
    print(fac.__class__.__name__)
예제 #10
0
def get_lj_terms(numbers, cutoff, default_order=0.01):
    # create terms
    pairs = ([(a, b) for a, b in itertools.combinations(numbers, 2)] +
             [(a, a) for a in numbers])
    A = {
        pair: Param(Positive, default_order, 'A_{}_{}'.format(*pair))
        for pair in pairs
    }
    B = {
        pair: Param(Positive, default_order, 'B_{}_{}'.format(*pair))
        for pair in pairs
    }
    terms = sum([
        PairPot(*pair,
                PolyCut(cutoff) * (A[pair] * Pow(n=-12) - B[pair] * Pow(n=-6)))
        for pair in pairs
    ])
    return terms
예제 #11
0
def test_UniversalSoap():
    import torch
    from theforce.descriptor.cutoff import PolyCut
    from theforce.descriptor.soap import RealSeriesSoap

    xyz = (torch.rand(10, 3) - 0.5) * 5
    xyz.requires_grad = True
    s = UniversalSoap(3, 3, PolyCut(8.0), flatten=True)
    numbers = torch.tensor(4 * [10] + 6 * [18])
    # test grad
    p, dp = s(xyz, numbers, grad=True)
    torch.sparse.sum(p).backward()
    print('fits gradients calculated by autograd: {}'.format(
        xyz.grad.allclose(torch.sparse.sum(dp, dim=(0, 1, 2)))))

    # test non-overlapping
    numbers = torch.tensor(4 * [11] + 6 * [19])
    pp = s(xyz, numbers, grad=False)
    print(torch.sparse.sum(p * pp).isclose(torch.tensor(0.0)))
예제 #12
0
def test_realseriessoap():
    from theforce.descriptor.cutoff import PolyCut
    xyz = torch.tensor([[0.175, 0.884, -0.87, 0.354, -0.082, 3.1],
                        [-0.791, 0.116, 0.19, -0.832, 0.184, 0.],
                        [0.387, 0.761, 0.655, -0.528, 0.973, 0.]]).t()
    xyz = xyz * 3
    cutoff = 3.0 * 3
    xyz.requires_grad = True

    s = NormalizedSoap(
        TailoredSoap(RealSeriesSoap(2, 2, PolyCut(cutoff), atomic_unit=1.5)))

    p, dp = s(xyz)
    p.sum().backward()
    test_grad = xyz.grad.allclose(dp.sum(dim=(0)))
    err_grad = (xyz.grad - dp.sum(dim=(0))).abs().max()
    print('RealSeriesSoap: grads are consistent with autograd: {} ({})'.format(
        test_grad, err_grad))
    assert eval(s.state).state == s.state
예제 #13
0
def test_SubSeSoap():
    import torch
    from theforce.descriptor.cutoff import PolyCut
    from theforce.descriptor.soap import RealSeriesSoap

    xyz = (torch.rand(10, 3) - 0.5) * 5
    xyz.requires_grad = True
    radii = {10: 0.8, 11: 1., 18: 1.2, 19: 1.4}
    s = SubSeSoap(3, 3, PolyCut(8.0), [10, 18], radii=radii, flatten=True)
    numbers = torch.tensor(4 * [10] + 6 * [18])
    print(eval(s.state))

    # test grad
    p, dp = s(xyz, numbers, grad=True)
    p.sum().backward()
    print('fits gradients calculated by autograd: {}'.format(
        xyz.grad.allclose(dp.sum(dim=0))))

    # test non-overlapping
    numbers = torch.tensor(4 * [11] + 6 * [19])
    pp = s(xyz, numbers, grad=False)
    t = torch.sum(p * pp).isclose(torch.tensor(0.0))
    print(f'non-overlapping: {t}')
예제 #14
0
def get_coulomb_terms(numbers, cutoff, setting={}, default_order=0.01):
    """
    If a setting is passed, it should be a dictionary in the form of 
    {atomic_number: c} where c contains the constraint and optionally 
    the initial value. Acceptable constraints are 
    '+': positive
    '-': negative
    'r': real (no constraints)
    'f': fixed
    ------------------------------------------------------
    examples:
    c = '+' -> "positive" constraint
    c = ('+', 1.) -> "positive" constraint, initial value 1
    c = ('r', 1.) -> no constraint, initial value 1
    c = ('f', 1.) -> fix the charge to value 1
    """
    # initialize charges
    charges = {}
    for a in numbers:
        try:
            # constraint and initial value
            c = setting[a]
            if type(c) == str:
                if c == '+' or c == 'r':
                    ini = default_order
                elif c == '-':
                    ini = -default_order
                elif c == 'f':
                    raise RuntimeError('f (=fixed) constraint needs a value')
                else:
                    raise RuntimeError('unknown constraint {}'.format(c))
            else:
                c, ini = c
            # class of constraint
            if c == '+':
                _cls = Positive
                rg = True
            elif c == '-':
                _cls = Negative
                rg = True
            elif c == 'r':
                _cls = Real
                rg = True
            elif c == 'f':
                _cls = Real
                rg = False
            else:
                raise RuntimeError('unknown constraint {}'.format(c))
            # create charge
            charges[a] = Param(_cls, ini, 'q_{}'.format(a), rg=rg)
        except KeyError:
            charges[a] = Param(Real, default_order, 'q_{}'.format(a), rg=True)
    # create terms
    pairs = ([(a, b) for a, b in itertools.combinations(numbers, 2)] +
             [(a, a) for a in numbers])

    terms = sum([
        PairPot(
            *pair,
            PolyCut(cutoff) * charges[pair[0]] * charges[pair[1]] * Pow(n=-1))
        for pair in pairs
    ])
    return terms
예제 #15
0
def get_kernel(params):
    from theforce.regression.gppotential import GaussianProcessPotential
    from theforce.similarity.pair import PairKernel
    from theforce.similarity.soap import SoapKernel, NormedSoapKernel
    from theforce.similarity.heterosoap import HeterogeneousSoapKernel
    from theforce.regression.stationary import RBF
    from theforce.descriptor.cutoff import PolyCut
    from theforce.regression.kernel import White, Positive, DotProd, Normed, Mul, Pow, Add
    from torch import tensor

    # Gaussian Process
    if params['path_gp_chp'] and params['use_gp_chp'] and os.path.isfile(
            params['path_gp_chp']):
        with open(params['path_gp_chp'], 'r') as f:
            gp = eval(f.readlines()[-1])
            kerns = gp.kern.kernels

        # log
        if params['path_log']:
            with open(params['path_log'], 'a') as log:
                log.write('path_gp_chp: {} (read)\n'.format(
                    params['path_gp_chp']))
    else:

        # log
        if params['path_log']:
            with open(params['path_log'], 'a') as log:
                log.write('pairkernel: {}\nsoapkernel: {}\n'.format(
                    params['pairkernel'], params['soapkernel']))

        kerns = []
        if params['pairkernel']:
            pairs = (
                [(a, b)
                 for a, b in itertools.combinations(params['numbers'], 2)] +
                [(a, a) for a in params['numbers']])
            kerns += [
                PairKernel(RBF(), a, b, factor=PolyCut(params['cutoff']))
                for a, b in pairs
            ]
        if params['soapkernel']:
            if params['heterosoap']:
                SOAP = HeterogeneousSoapKernel
            else:
                SOAP = NormedSoapKernel
            kerns += [
                SOAP(Positive(1.0, requires_grad=True) *
                     DotProd()**params['exponent'],
                     atomic_number,
                     params['numbers'],
                     params['lmax'],
                     params['nmax'],
                     PolyCut(params['cutoff']),
                     atomic_unit=params['atomic_unit'])
                for atomic_number in params['numbers']
            ]
            # log
            if params['path_log']:
                with open(params['path_log'], 'a') as log:
                    log.write(
                        'lmax: {}\n nmax: {}\nexponent: {}\natomic_unit: {}\n'.
                        format(params['lmax'], params['nmax'],
                               params['exponent'], params['atomic_unit']))

        gp = GaussianProcessPotential(kerns,
                                      noise=White(
                                          signal=params['noise'],
                                          requires_grad=params['noisegrad']))

        if params['path_gp_chp']:
            gp.to_file(params['path_gp_chp'], flag='created', mode='w')

            # log
            if params['path_log']:
                with open(params['path_log'], 'a') as log:
                    log.write('path_gp_chp: {} (write)\n'.format(
                        params['path_gp_chp']))

    return gp