Пример #1
0
def eval_configs(data, cutoffs):
    """
  Evaluate values using the 
  regularization we have constructed
  """

    mol, wf, to_opt, freeze = wavefunction()
    eacc = EnergyAccumulator(mol)
    transform = LinearTransform(wf.parameters, to_opt, freeze)

    print("data loaded")
    r2 = data['distance_squared']
    weight = data['weight_total']

    d = {}
    for cutoff in list(cutoffs):
        node_cut = r2 < cutoff**2
        print(cutoff, node_cut.sum())

        c = 7. / (cutoff**6)
        b = -15. / (cutoff**4)
        a = 9. / (cutoff**2)

        l2 = r2[node_cut]
        dpH = np.copy(data['dpH'])
        dpH[node_cut] *= a * l2 + b * l2**2 + c * l2**3

        hist, bin_edges = np.histogram(np.log10(np.abs(dpH)[np.abs(dpH) > 0]),
                                       bins=200,
                                       density=True,
                                       weights=weight[np.abs(dpH) > 0])
        d['hist' + str(cutoff)] = list(np.log10(hist)) + [None]
        d['bins' + str(cutoff)] = bin_edges
    df = pd.DataFrame(d)
    df.to_pickle('histogram.pickle')
Пример #2
0
def viznode(node_coords, node_grad, cutoffs, vizfile='viznode.pdf'):
    from wavefunction import wavefunction
    mol, wf, to_opt, freeze = wavefunction()
    eacc = EnergyAccumulator(mol)
    transform = LinearTransform(wf.parameters, to_opt, freeze)

    #Generate the distances we want
    x = np.arange(np.sqrt(1 / 0.01), 1001)
    x = 1. / x**2
    x = np.append(x, np.linspace(0.01, 0.12, 100))
    x = np.append(x, np.array([0, 1e-15, 1e-12, 1e-8] + [-y for y in x]))
    x = np.sort(x)

    coord_path = np.einsum('ij,l->lij', node_grad[0], x) + node_coords[0]
    coord_path = OpenConfigs(coord_path)

    #Move across the node in this path
    val = wf.recompute(coord_path)
    fig, ax = plt.subplots(nrows=2, ncols=1, figsize=(3, 6), sharex=True)
    for k, cutoff in enumerate([1e-8] + cutoffs):
        pgrad = PGradTransform_new(eacc,
                                   transform,
                                   nodal_cutoff=np.array([cutoff]))
        d = pgrad(coord_path, wf)

        total = d['total']
        dpH = np.array(d['dpH'])[:, 0, 0]

        if (cutoff == 1e-8):
            ax[0].plot(x,
                       dpH * (val[0] * np.exp(val[1]))**2,
                       'k-',
                       label=r'$10^{' + str(int(np.log10(cutoff))) + '}$')
            ax[1].plot(x, np.log10(dpH**2 * (val[0] * np.exp(val[1]))**2),
                       'k-')
        else:
            ax[0].plot(x,
                       dpH * (val[0] * np.exp(val[1]))**2,
                       '-',
                       label=r'$10^{' + str(int(np.log10(cutoff))) + '}$')
            ax[1].plot(x, np.log10(dpH**2 * (val[0] * np.exp(val[1]))**2), '-')

    ax[0].set_ylabel(r'$E_L\frac{\partial_p \Psi}{\Psi} f_\epsilon |\Psi|^2$')
    ax[1].set_ylabel(
        r'log$_{10}((E_L\frac{\partial_p \Psi}{\Psi})^2 f_\epsilon^2|\Psi|^2)$'
    )
    ax[1].set_xlabel(r'$l$ (Bohr)')
    ax[0].set_xlim((-max(x) - 0.02, max(x) + 0.02))
    ax[1].set_xlim((-max(x) - 0.02, max(x) + 0.02))
    ax[0].legend(loc='best', title=r'$\epsilon$ (Bohr)')
    plt.savefig(vizfile, bbox_inches='tight')
    plt.close()
Пример #3
0
def collectconfigs(n, dump_file):
    """
  Collect all the configurations from genconfig
  into a single place
  """
    dpH_total = []
    weight_total = []
    logweight_total = []
    distance_squared = []

    mol, wf, to_opt, freeze = wavefunction()

    eacc = EnergyAccumulator(mol)
    transform = LinearTransform(wf.parameters, to_opt, freeze)
    pgrad_bare = PGradTransform_new(eacc, transform, 1e-20)

    for i in range(1, n + 1):
        print(i)
        coords = OpenConfigs(pd.read_pickle('vmc/coords' + str(i) + '.pickle'))
        df = pd.read_json('vmc/evals' + str(i) + '.json')

        wf.recompute(coords)
        print("Recomputed")
        node_cut, r2 = pgrad_bare._node_cut(coords, wf)
        print("nodes cut")

        dpH = df['dpH'].values
        wfval = df['wfval'].values
        wfpval = df['wfpval'].values

        logweight = 2 * (wfval - wfpval)
        weight = np.exp(logweight)
        print(i, weight[weight == 0])

        dpH_total += list(dpH)
        weight_total += list(weight)
        logweight_total += list(logweight)
        distance_squared += list(r2)
        print(i, np.array(weight_total)[np.array(weight_total) == 0])

        df = pd.DataFrame({
            'dpH': dpH_total,
            'weight_total': weight_total,
            'logweight_total': logweight_total,
            'distance_squared': distance_squared
        })
        df.to_pickle(dump_file)
    return df
Пример #4
0
def locatenode(df, scale):
  """
  Pinpoint a node, scale variable scales wf value
  so that minimization algorithm can converge
  """

  from scipy.optimize import minimize_scalar
  mol, wf, to_opt, freeze = wavefunction() 
  
  ind = np.argsort(-df['dpH'].values)
  node_coords_0 = np.array(df.iloc[ind[0]]['configs']) #Pretty close!

  #Get the gradient
  def wfgrad(coords, wf, mol):
    nelec = mol.nelec[0] + mol.nelec[1]
    val = wf.recompute(coords)
    grad = []
    for e in range(nelec):
      node_grad = wf.gradient(e, coords.electron(e)) * np.exp(val[1]) * val[0]
      grad.append(node_grad)
    grad = np.array(grad)
    grad /= np.linalg.norm(grad.ravel())
    return np.rollaxis(grad, -1) 

  #Value function along that gradient
  def wvfal(x, wf, gradient):
      node_coords = OpenConfigs(node_coords_0 + gradient * x)
      val = wf.recompute(node_coords)
      return np.exp(2 * val[1])/scale #Scaling for minimization 

  #Minimize function 
  node_coords = node_coords_0

  for i in range(1):
    val = wf.recompute(OpenConfigs(node_coords))
    grad = wfgrad(OpenConfigs(node_coords), wf, mol)
    print("Wfval: ", np.exp(val[1])*val[0])

    res = minimize_scalar(lambda x: wvfal(x, wf, grad), bracket = [-0.1, 0.1], tol = 1e-16)
    print("x: ",res.x)

    #Upgrade gradient
    node_coords += grad * res.x[0]
    val = wf.recompute(OpenConfigs(node_coords))
    print("Wfval post: ", np.exp(val[1])*val[0])

  return node_coords, grad
Пример #5
0
def genconfigs(n):
    """
  Generate configurations and weights corresponding
  to the highest determinant in MD expansion
  """

    import os
    os.system('mkdir -p vmc/')

    mol, mf, mc, wf, to_opt, freeze = wavefunction(return_mf=True)
    #Sample from the wave function which we're taking pderiv relative to
    mf.mo_coeff = mc.mo_coeff
    mf.mo_occ *= 0
    mf.mo_occ[wf.wf1._det_occup[0][-1]] = 2
    wfp = PySCFSlaterUHF(mol, mf)

    #Lots of configurations
    coords = pyqmc.initial_guess(mol, 100000)

    eacc = EnergyAccumulator(mol)
    transform = LinearTransform(wf.parameters, to_opt, freeze)
    pgrad_bare = PGradTransform(eacc, transform, 0)

    #Lots of steps
    warmup = 10
    for i in range(n + warmup + 1):
        df, coords = vmc(wfp, coords, nsteps=1)

        print(i)
        if (i > warmup):
            coords.configs.dump('vmc/coords' + str(i - warmup) + '.pickle')

            val = wf.recompute(coords)
            valp = wfp.value()

            d = pgrad_bare(coords, wf)

            data = {
                'dpH': np.array(d['dpH'])[:, -1],
                'dppsi': np.array(d['dppsi'])[:, -1],
                'en': np.array(d['total']),
                "wfval": val[1],
                "wfpval": valp[1]
            }
            pd.DataFrame(data).to_json('vmc/evals' + str(i - warmup) + '.json')
    return -1
Пример #6
0
def sweepelectron():
  """
  Sweep an electron across the molecule
  to find a guess for the nodal position
  """
  import copy 
  from pyqmc.accumulators import EnergyAccumulator, LinearTransform, PGradTransform
  
  #Generate wave function and bare parameter gradient objects
  mol, wf, to_opt, freeze = wavefunction() 
  eacc = EnergyAccumulator(mol)
  transform = LinearTransform(wf.parameters, to_opt, freeze)
  pgrad = PGradTransform(eacc, transform, 1e-20)

  #Initial coords
  configs = pyqmc.initial_guess(mol, 1).configs[:,:,:]

  #Sweep electron 0
  full_df = None
  e = 3 #electron
  dim = 1 #Coordinate to vary
  
  for i in np.linspace(0, 20, 200):
    new_configs = copy.deepcopy(configs)
    new_configs[:,e,dim] += i
    shifted_configs = OpenConfigs(new_configs)
    wfval = wf.recompute(shifted_configs)
    d = pgrad(shifted_configs, wf)
    small_df = pd.DataFrame({
      'ke':[d['ke'][0]],
      'total':[d['total'][0]],
      'dppsi':[d['dppsi'][0][0]],
      'dpH'  :[d['dpH'][0][0]],
      'wfval':[wfval[0][0]*np.exp(wfval[1][0])],
      'ycoord': i,
      'configs':[copy.deepcopy(new_configs)],
    })
    if(full_df is None): full_df = small_df
    else: full_df = pd.concat((full_df, small_df), axis=0)
  
  return full_df.reset_index()
Пример #7
0
import numpy as np
import matplotlib.pyplot as plt
import wavefunction as wf
import matrix_product_state as mps
import DEnFG as denfg

k = 6
n = 4
alphabet = 2
t_max = 20
error = 1e-6
psi = np.array([[[[1., 1.], [2., 0.5 + 3.j]], [[4., 8.j], [-1.j, 2.]]], [[[4., 1.j], [2., 1.j]], [[3.j, 4.], [2., 1.j]]]])
#psi = np.array([[[[1., 0.], [0., 0.j]], [[0., 0.j], [0.j, 0.]]], [[[1.j, 0.j], [0., 0.j]], [[0.j, 0.], [0., 0.j]]]])

wf = wf.wavefunction()
wf.addwf(psi, n, alphabet)

z = np.array([[1, 0], [0, -1]])

expectation_z0_wf = wf.SingleSpinMeasurement(0, z)
expectation_z1_wf = wf.SingleSpinMeasurement(1, z)
expectation_z2_wf = wf.SingleSpinMeasurement(2, z)
expectation_z3_wf = wf.SingleSpinMeasurement(3, z)

expectation_z0_mps = []
expectation_z1_mps = []
expectation_z2_mps = []
expectation_z3_mps = []

expectation_z0_graph = []
expectation_z1_graph = []
Пример #8
0
from pyqmc.dasktools import distvmc
from dask.distributed import Client, LocalCluster
from pyqmc.accumulators import EnergyAccumulator, LinearTransform
from pyqmc_regr import PGradTransform_new
from wavefunction import wavefunction

if __name__ == '__main__':
    nconfig_per_core = 100
    ncore = 20
    nsteps = 2000000

    cutoffs = list(np.logspace(-8, -1, 20)) + list([
        0.05, 0.075, 0.11, 0.12, 0.13, 0.14, 0.15, 0.16, 0.17, 0.18, 0.19, 0.20
    ])
    cutoffs = np.sort(cutoffs)
    mol, mf, mc, wf, to_opt, freeze = wavefunction(return_mf=True)
    wf.parameters['wf1det_coeff'] *= 0
    wf.parameters['wf1det_coeff'][[0, 10]] = 1. / np.sqrt(2.)

    eacc = EnergyAccumulator(mol)
    transform = LinearTransform(wf.parameters, to_opt, freeze)
    pgrad = PGradTransform_new(eacc, transform, np.array(cutoffs))

    #Client
    cluster = LocalCluster(n_workers=ncore, threads_per_worker=1)
    client = Client(cluster)

    distvmc(wf,
            pyqmc.initial_guess(mol, nconfig_per_core * ncore),
            client=client,
            accumulators={"pgrad": pgrad},
Пример #9
0
def integratenode(node_coords,
                  node_grad,
                  vizfile='integratenode.pdf',
                  integ_range=1e-1,
                  poly=1e-2,
                  max_cutoff=0.1):
    from wavefunction import wavefunction
    import scipy.integrate as integrate
    from numpy.polynomial import polynomial
    mol, wf, to_opt, freeze = wavefunction()

    eacc = EnergyAccumulator(mol)
    transform = LinearTransform(wf.parameters, to_opt, freeze)
    pgrad_bare = PGradTransform(eacc, transform, 1e-15)
    #Integrate biases and variances

    biases = []
    biases_err = []
    variances = []
    cutoffs = list(np.logspace(-6, -1, 20)) + [0.05, 0.075]
    '''
  normalization = integrate.quad(lambda x: psi2(x, node_coords, node_grad, wf), -integ_range, integ_range, epsabs = 1e-15, epsrel = 1e-15)
  for cutoff in cutoffs:
    print(cutoff)
    pgrad = PGradTransform_new(eacc, transform, nodal_cutoff = np.array([cutoff]))
    bias = integrate.quad(lambda x: dpH(x, pgrad, pgrad_bare, node_coords, node_grad, wf)/1e-40, -cutoff, cutoff, epsabs = 1e-15, epsrel = 1e-15, points=[0])
    variance = integrate.quad(lambda x: dpH2(x, pgrad, node_coords, node_grad, wf),  -cutoff, cutoff, epsabs = 1e-15, epsrel = 1e-15, points=[0])
    variance += integrate.quad(lambda x: dpH2(x, pgrad, node_coords, node_grad, wf),  -integ_range + cutoff, integ_range - cutoff, epsabs = 1e-15, epsrel = 1e-15)
    biases.append(bias[0]*1e-40/normalization[0])
    variances.append(variance[0]/normalization[0])
  df = pd.DataFrame({'cutoff': cutoffs, 'bias': biases, 'variance': variances})
  df.to_pickle('integratenode.pickle')
  '''
    df = pd.read_pickle('integratenode.pickle')
    #Fit theory curves and visualize
    ind = np.argsort(df['cutoff'])

    ind = ind[df['cutoff'].iloc[ind] <= max_cutoff]

    fig, ax = plt.subplots(nrows=2, ncols=1, figsize=(3, 6), sharex=True)
    x = df['cutoff'].iloc[ind]
    y = np.abs(df['bias']).iloc[ind]
    y = y[2:]
    x = x[2:]

    p = polynomial.polyfit(x, y, [3])
    print("Fit for bias ", p)
    xfit = np.linspace(min(x), max(x[x < poly]), 1000)
    fit = p[3] * (xfit)**3
    ax[0].plot(np.log10(x), np.log10(y), 'o')
    ax[0].plot(np.log10(xfit), np.log10(fit), '--')
    ax[0].set_ylabel(r'log$_{10}$(Bias)')

    x = df['cutoff'].iloc[ind]
    y = df['variance'].iloc[ind]
    y = y[2:]
    x = x[2:]
    x = np.log10(x)
    y = np.log10(y)
    poly = np.log10(poly)
    p = polynomial.polyfit(x[x < poly], y[x < poly], [1, 0])
    print("Fit for variance ", p)
    xfit = np.logspace(min(x), max(x[x <= poly]), 1000)
    fit = p[0] + p[1] * np.log10(xfit)
    ax[1].plot(x, y, 'o')
    ax[1].plot(np.log10(xfit), fit, '--')
    ax[1].set_xlabel(r'$log_{10}(\epsilon/$Bohr$)$')
    ax[1].set_ylabel(r'log$_{10}$(Variance)')
    #ax[1].set_xlim((-3.2, 2.3))
    #ax[1].set_xticks(np.arange(-3,3))

    plt.savefig(vizfile, bbox_inches='tight')
    plt.close()