Exemplo n.º 1
0
def test_invariance_to_subbatch_size(tmp_path):
    batch_size = 1000

    # For determinism we work on CPU and compute the Hartree-Fock solution once.
    mol = Molecule.from_name('LiH')
    net = PauliNet.from_hf(mol, cas=(4, 2), conf_limit=2).cpu()

    state = copy.deepcopy(net.state_dict())
    params_orig = copy.deepcopy(list(net.parameters()))

    def get_total_gradient_norm(subbatch_size: int):
        torch.manual_seed(0)
        net.load_state_dict(state)

        train(
            net,
            n_steps=1,
            batch_size=batch_size,
            epoch_size=1,
            optimizer='SGD',  # Loss scale variance would be hidden with Adam.
            equilibrate=False,
            workdir=tmp_path,
            fit_kwargs={'subbatch_size': subbatch_size},
            sampler_kwargs={
                'sample_size': batch_size,
                'n_discard': 0,
                'n_decorrelate': 0,
                'n_first_certain': 0,
            },
        )

        params = list(net.parameters())
        return sum((p1 - p2).norm() for p1, p2 in zip(params, params_orig))

    grad_norm_1 = get_total_gradient_norm(subbatch_size=50)
    grad_norm_2 = get_total_gradient_norm(subbatch_size=1000)

    # We accept relative variation up to a factor of 5. If the loss/gradients
    # scaled with subbatch size instead, then the expected ratio would be 20.
    assert torch.isclose(grad_norm_1, grad_norm_2, rtol=5.0)
Exemplo n.º 2
0
    def from_pyscf(
        cls,
        mf,
        *,
        init_weights=True,
        freeze_mos=True,
        freeze_confs=False,
        conf_cutoff=1e-2,
        conf_limit=None,
        **kwargs,
    ):
        r"""Construct a :class:`PauliNet` instance from a finished PySCF_ calculation.

        Args:
            mf (:class:`pyscf.scf.hf.RHF` | :class:`pyscf.mcscf.mc1step.CASSCF`):
                restricted (multireference) HF calculation
            init_weights (bool): whether molecular orbital coefficients and
                configuration coefficients are initialized from the HF calculation
            freeze_mos (bool): whether the MO coefficients are frozen for
                gradient optimization
            freeze_confs (bool): whether the configuration coefficients are
                frozen for gradient optimization
            conf_cutoff (float): determinants with a linear coefficient above
                this threshold are included in the determinant expansion
            conf_limit (int): if given, at maximum the given number of configurations
                with the largest linear coefficients are used in the ansatz
            kwargs: all other arguments are passed to the :class:`PauliNet`
                constructor

        .. _PySCF: http://pyscf.org
        """
        assert not (set(kwargs) & {'n_configurations', 'n_orbitals'})
        n_up, n_down = mf.mol.nelec
        if hasattr(mf, 'fcisolver'):
            if conf_limit:
                conf_cutoff = max(
                    np.sort(abs(mf.ci.flatten()))[-conf_limit:][0] - 1e-10,
                    conf_cutoff)
            for tol in [conf_cutoff, conf_cutoff + 2e-10]:
                conf_coeff, *confs = zip(*mf.fcisolver.large_ci(
                    mf.ci, mf.ncas, mf.nelecas, tol=tol, return_strs=False))
                if not conf_limit or len(conf_coeff) <= conf_limit:
                    break
            else:
                raise AssertionError()
            # discard the last ci wave function if degenerate
            ns_dbl = n_up - mf.nelecas[0], n_down - mf.nelecas[1]
            conf_coeff = torch.tensor(conf_coeff)
            confs = [[
                torch.arange(n_dbl,
                             dtype=torch.long).expand(len(conf_coeff), -1),
                torch.tensor(cfs, dtype=torch.long) + n_dbl,
            ] for n_dbl, cfs in zip(ns_dbl, confs)]
            confs = [torch.cat(cfs, dim=-1) for cfs in confs]
            confs = torch.cat(confs, dim=-1)
            kwargs['n_configurations'] = len(confs)
            kwargs['n_orbitals'] = confs.max().item() + 1
        else:
            confs = None
        mol = Molecule(
            mf.mol.atom_coords().astype('float32'),
            mf.mol.atom_charges(),
            mf.mol.charge,
            mf.mol.spin,
        )
        basis = GTOBasis.from_pyscf(mf.mol)
        wf = cls(mol, basis, **kwargs)
        if init_weights:
            wf.mo.init_from_pyscf(
                mf, freeze_mos=freeze_mos)  # CREATE THE HF ORBITALS
            if confs is not None:
                wf.confs.detach().copy_(confs)
                if len(confs) > 1:
                    wf.conf_coeff.weight.detach().copy_(conf_coeff)
                if freeze_confs:
                    wf.conf_coeff.weight.requires_grad_(False)
        return wf
Exemplo n.º 3
0
def test_simple_example(tmp_path):
    mol = Molecule.from_name('LiH')
    net = PauliNet.from_hf(mol, cas=(4, 2), pauli_kwargs={'conf_limit': 2})
    chkpts = []
    train(
        net,
        n_steps=3,
        batch_size=5,
        save_every=2,
        epoch_size=3,
        equilibrate=1,
        chkpts=chkpts,
        workdir=tmp_path,
        fit_kwargs={'subbatch_size': 5},
        sampler_kwargs={
            'sample_size': 15,
            'n_discard': 0,
            'n_decorrelate': 0,
            'n_first_certain': 0,
        },
    )
    train(
        net,
        n_steps=1,
        batch_size=5,
        epoch_size=1,
        state=chkpts[-1][1],
        equilibrate=False,
        fit_kwargs={'subbatch_size': 5},
        sampler_kwargs={
            'sample_size': 5,
            'n_discard': 0,
            'n_decorrelate': 0,
            'n_first_certain': 0,
        },
    )
    evaluate(
        net,
        n_steps=1,
        sample_size=5,
        log_dict={},
        sample_kwargs={
            'equilibrate': 1,
            'block_size': 1
        },
        sampler_kwargs={
            'n_decorrelate': 0,
            'n_first_certain': 0
        },
    )
    evaluate(
        net,
        n_steps=1,
        workdir=tmp_path,
        sample_size=5,
        sample_kwargs={
            'equilibrate': False,
            'block_size': 1
        },
        sampler_kwargs={
            'n_decorrelate': 0,
            'n_first_certain': 0
        },
    )
Exemplo n.º 4
0
def mol():
    return Molecule.from_name('LiH')
Exemplo n.º 5
0
def mol():
    mol = Molecule.from_name('H2')
    mol.charge = -1
    mol.spin = 3
    return mol
Exemplo n.º 6
0
    def from_pyscf(
        cls,
        mf,
        *,
        init_weights=True,
        freeze_mos=True,
        freeze_confs=False,
        conf_cutoff=1e-2,
        conf_limit=None,
        conf_strs=None,
        **kwargs,
    ):
        r"""Construct a :class:`PauliNet` instance from a finished PySCF_ calculation.

        Args:
            mf (:class:`pyscf.scf.hf.RHF` | :class:`pyscf.mcscf.mc1step.CASSCF`):
                restricted (multireference) HF calculation
            init_weights (bool): whether molecular orbital coefficients and
                configuration coefficients are initialized from the HF calculation
            freeze_mos (bool): whether the MO coefficients are frozen for
                gradient optimization
            freeze_confs (bool): whether the configuration coefficients are
                frozen for gradient optimization
            conf_cutoff (float): determinants with a linear coefficient above
                this threshold are included in the determinant expansion
            conf_limit (int): if given, at maximum the given number of configurations
                with the largest linear coefficients are used in the ansatz
            kwargs: all other arguments are passed to the :class:`PauliNet`
                constructor

        .. _PySCF: http://pyscf.org
        """
        assert not (set(kwargs) & {'n_configurations', 'n_orbitals'})
        assert not conf_strs or not conf_limit
        n_up, n_down = mf.mol.nelec
        if hasattr(mf, 'fcisolver'):
            confs = confs_from_mc(mf)
            if conf_limit:
                if abs(confs[conf_limit - 1][1] -
                       confs[conf_limit][1]) < 1e-10:
                    conf_limit -= 1
                confs = confs[:conf_limit]
            if conf_strs:
                confs = {c[0]: c for c in confs}
                confs = [confs[s] for s in conf_strs]
            if not conf_limit and not conf_strs:
                confs = [c for c in confs if abs(c[1]) >= conf_cutoff]
                assert confs
            conf_strs, conf_coeff, confs = zip(*confs)
            conf_coeff = torch.tensor(conf_coeff)
            confs = torch.tensor(confs)
            log.info(f'Will use {len(confs)} electron configurations')
            kwargs['n_configurations'] = len(confs)
            kwargs['n_orbitals'] = confs.max().item() + 1
        else:
            confs = None
            conf_strs = None
        mol = Molecule(
            mf.mol.atom_coords().astype('float32'),
            mf.mol.atom_charges(),
            mf.mol.charge,
            mf.mol.spin,
        )
        basis = GTOBasis.from_pyscf(mf.mol)
        wf = cls(mol, basis, **kwargs)
        wf.conf_strs = conf_strs
        if init_weights:
            wf.mo.init_from_pyscf(mf, freeze_mos=freeze_mos)
            if confs is not None:
                wf.confs.detach().copy_(confs)
                if len(confs) > 1:
                    wf.conf_coeff.weight.detach().copy_(conf_coeff)
                if freeze_confs:
                    wf.conf_coeff.weight.requires_grad_(False)
        return wf