Esempio n. 1
0
def write_script_output(fn_h5, grp_name, results, args):
    '''Store the output of a script in an open HDF5 file

       **Arguments:**

       fn_h5
            The HDF5 filename

       grp_name
            The name of the group where the results will be stored.

       results
            A dictionary with results.

       args
            The results of the command line parser. All arguments are stored
            as attributes in the HDF5 output file.
    '''
    with LockedH5File(fn_h5) as f:
        # Store results
        grp = f.require_group(grp_name)
        for key in grp.keys():
            del grp[key]
        dump_h5(grp, results)

        # Store command-line arguments arguments
        store_args(args, grp)

        if log.do_medium:
            log('Results written to %s:%s' % (fn_h5, grp_name))
Esempio n. 2
0
 def _log_init(self):
     '''Write a summary of the wavefunction to the screen logger'''
     if log.do_medium:
         log('Initialized: %s' % self)
         if self.occ_model is not None:
             self.occ_model.log()
         log.blank()
Esempio n. 3
0
def guess_core_hamiltonian(overlap, core, *orbs):
    '''Guess the orbitals by diagonalizing a core Hamiltonian.

    Parameters
    ----------
    overlap : np.ndarray, shape=(nbasis, nbasis), dtype=float
        The overlap operator.
    core : np.ndarray, shape=(nbasis, nbasis), dtype=float
        The core Hamiltonian. operator that resembles a Fock operator is fine. Usually,
        one adds the kinetic energy and nuclear attraction integrals.
    orb1, orb2, ... : Orbitals
        A list of Orbitals objects (output arguments)

    This method only modifies the expansion coefficients and the orbital energies.
    '''
    if log.do_medium:
        log('Performing a core Hamiltonian guess.')
        log.blank()

    if len(orbs) == 0:
        raise TypeError('At least one set of orbitals.')

    # Compute orbitals.
    orbs[0].from_fock(core, overlap)
    # Copy to other Orbitals objects.
    for i in xrange(1, len(orbs)):
        orbs[i].coeffs[:] = orbs[0].coeffs
        orbs[i].energies[:] = orbs[0].energies
Esempio n. 4
0
def write_script_output(fn_h5, grp_name, results, args):
    '''Store the output of a script in an open HDF5 file

       **Arguments:**

       fn_h5
            The HDF5 filename

       grp_name
            The name of the group where the results will be stored.

       results
            A dictionary with results.

       args
            The results of the command line parser. All arguments are stored
            as attributes in the HDF5 output file.
    '''
    with LockedH5File(fn_h5) as f:
        # Store results
        grp = f.require_group(grp_name)
        for key in grp.keys():
            del grp[key]
        dump_h5(grp, results)

        # Store command-line arguments arguments
        store_args(args, grp)

        if log.do_medium:
            log('Results written to %s:%s' % (fn_h5, grp_name))
Esempio n. 5
0
    def eval_proatom(self, index, output, grid=None):
        # Greedy version of eval_proatom
        output[:] = self.get_constant(index, grid)

        #
        propars = self._cache.load('propars')
        begin = self.hebasis.get_atom_begin(index)
        nbasis =  self.hebasis.get_atom_nbasis(index)

        for j in xrange(nbasis):
            coeff = propars[j+begin]
            if coeff != 0.0:
                output += coeff*self.get_basis(index, j, grid)

        # correct if the proatom is negative in some parts
        if output.min() < 0:
            # use a dedicated grid for this part, which is needed in case of local+greedy.
            pop_grid = self.get_grid(index)
            if grid is None or pop_grid == grid:
                pop_output = output
            else:
                assert grid is self.grid
                pop_output = self.to_atomic_grid(index, output)
            pop_before = pop_grid.integrate(pop_output)
            # clipping is done on the original array
            np.clip(output, 1e-100, np.inf, out=output)
            pop_after = pop_grid.integrate(pop_output)
            error = pop_before - pop_after
            if abs(error) > 1e-5:
                if log.do_medium:
                    log('Lost %.1e electrons in proatom %i' % (error, index))
        output += 1e-100
Esempio n. 6
0
    def do_density_decomposition(self):
        if not self.local:
            if log.do_warning:
                log.warn(
                    'Skipping density decomposition because no local grids were found.'
                )
            return

        for index in xrange(self.natom):
            atgrid = self.get_grid(index)
            assert isinstance(atgrid, AtomicGrid)
            key = ('density_decomposition', index)
            if key not in self.cache:
                moldens = self.get_moldens(index)
                self.do_partitioning()
                if log.do_medium:
                    log('Computing density decomposition for atom %i' % index)
                at_weights = self.cache.load('at_weights', index)
                splines = atgrid.get_spherical_decomposition(moldens,
                                                             at_weights,
                                                             lmax=self.lmax)
                density_decomposition = dict(
                    ('spline_%05i' % j, spline)
                    for j, spline in enumerate(splines))
                self.cache.dump(key, density_decomposition, tags='o')
Esempio n. 7
0
    def do_hartree_decomposition(self):
        if not self.local:
            if log.do_warning:
                log.warn(
                    'Skipping hartree decomposition because no local grids were found.'
                )
            return

        for index in xrange(self.natom):
            key = ('hartree_decomposition', index)
            if key not in self.cache:
                self.do_density_decomposition()
                if log.do_medium:
                    log('Computing hartree decomposition for atom %i' % index)
                density_decomposition = self.cache.load(
                    'density_decomposition', index)
                rho_splines = [
                    spline for foo, spline in sorted(
                        density_decomposition.iteritems())
                ]
                v_splines = solve_poisson_becke(rho_splines)
                hartree_decomposition = dict(
                    ('spline_%05i' % j, spline)
                    for j, spline in enumerate(v_splines))
                self.cache.dump(key, hartree_decomposition, tags='o')
Esempio n. 8
0
 def eval_spline(self, index, spline, output, grid, label='noname'):
     center = self.coordinates[index]
     if log.do_debug:
         number = self.numbers[index]
         log('  Evaluating spline (%s) for atom %i (n=%i) on %i grid points'
             % (label, index, number, grid.size))
     grid.eval_spline(spline, center, output)
Esempio n. 9
0
    def update_at_weights(self):
        if log.do_medium:
            log('Computing Becke weights.')

        # The list of radii is constructed to be as close as possible to
        # the original values used by Becke.
        radii = []
        for number in self.numbers:
            if number == 1:
                radius = 0.35*angstrom # exception defined in Becke's paper
            else:
                radius = periodic[number].becke_radius
                if radius is None: # for cases not covered by Brag-Slater
                    radius = periodic[number].cov_radius
            radii.append(radius)
        radii = np.array(radii)

        # Actual work
        pb = log.progress(self.natom)
        for index in xrange(self.natom):
            grid = self.get_grid(index)
            at_weights = self.cache.load('at_weights', index, alloc=grid.shape)[0]
            at_weights[:] = 1
            becke_helper_atom(grid.points, at_weights, radii, self.coordinates, index, self._k)
            pb()
Esempio n. 10
0
    def update_at_weights(self):
        if log.do_medium:
            log('Computing Becke weights.')

        # The list of radii is constructed to be as close as possible to
        # the original values used by Becke.
        radii = []
        for number in self.numbers:
            if number == 1:
                radius = 0.35*angstrom # exception defined in Becke's paper
            else:
                radius = periodic[number].becke_radius
                if radius is None: # for cases not covered by Brag-Slater
                    radius = periodic[number].cov_radius
            radii.append(radius)
        radii = np.array(radii)

        # Actual work
        pb = log.progress(self.natom)
        for index in xrange(self.natom):
            grid = self.get_grid(index)
            at_weights = self.cache.load('at_weights', index, alloc=grid.shape)[0]
            at_weights[:] = 1
            becke_helper_atom(grid.points, at_weights, radii, self.coordinates, index, self._k)
            pb()
Esempio n. 11
0
    def fix_proatom_rho(self, index, rho, deriv):
        '''Check if the radial density for the proatom is correct and fix as needed.

           **Arguments:**

           index
                The atom for which this proatom rho is created.

           rho
                The radial density

           deriv
                the derivative of the radial density or None.
        '''
        rgrid = self.get_rgrid(index)

        # Check for negative parts
        original = rgrid.integrate(rho)
        if rho.min() < 0:
            rho[rho < 0] = 0.0
            deriv = None
            error = rgrid.integrate(rho) - original
            if log.do_medium:
                log('                    Pro-atom not positive everywhere. Lost %.1e electrons'
                    % error)

        return rho, deriv
Esempio n. 12
0
def guess_core_hamiltonian(overlap, core, *orbs):
    '''Guess the orbitals by diagonalizing a core Hamiltonian.

    Parameters
    ----------
    overlap : np.ndarray, shape=(nbasis, nbasis), dtype=float
        The overlap operator.
    core : np.ndarray, shape=(nbasis, nbasis), dtype=float
        The core Hamiltonian. operator that resembles a Fock operator is fine. Usually,
        one adds the kinetic energy and nuclear attraction integrals.
    orb1, orb2, ... : Orbitals
        A list of Orbitals objects (output arguments)

    This method only modifies the expansion coefficients and the orbital energies.
    '''
    if log.do_medium:
        log('Performing a core Hamiltonian guess.')
        log.blank()

    if len(orbs) == 0:
        raise TypeError('At least one set of orbitals.')

    # Compute orbitals.
    orbs[0].from_fock(core, overlap)
    # Copy to other Orbitals objects.
    for i in xrange(1, len(orbs)):
        orbs[i].coeffs[:] = orbs[0].coeffs
        orbs[i].energies[:] = orbs[0].energies
Esempio n. 13
0
    def fix_proatom_rho(self, index, rho, deriv):
        '''Check if the radial density for the proatom is correct and fix as needed.

           **Arguments:**

           index
                The atom for which this proatom rho is created.

           rho
                The radial density

           deriv
                the derivative of the radial density or None.
        '''
        rgrid = self.get_rgrid(index)

        # Check for negative parts
        original = rgrid.integrate(rho)
        if rho.min() < 0:
            rho[rho<0] = 0.0
            deriv = None
            error = rgrid.integrate(rho) - original
            if log.do_medium:
                log('                    Pro-atom not positive everywhere. Lost %.1e electrons' % error)

        return rho, deriv
Esempio n. 14
0
    def swap_orbitals(self, swaps):
        """Change the order of the orbitals using pair-exchange.

        Parameters
        ----------
        swaps : np.ndarray, shape=(m, 2), dtype=int
            An integer numpy array with two columns where every row corresponds to one
            swap.

        The attributes ``energies`` and ``occupations`` are also reordered.
        """
        if not (swaps.shape[1] == 2 and swaps.ndim == 2
                and issubclass(swaps.dtype.type, int)):
            raise TypeError('The argument swaps has the wrong shape/type.')
        for iswap in range(len(swaps)):
            index0, index1 = swaps[iswap]
            if log.do_medium:
                log('  Swapping orbitals %i and %i' % (index0, index1))
            tmp = self.coeffs[:, index0].copy()
            self.coeffs[:, index0] = self.coeffs[:, index1]
            self.coeffs[:, index1] = tmp
            self.energies[index0], self.energies[index1] =\
                self.energies[index1], self.energies[index0]
            self.occupations[index0], self.occupations[index1] =\
                self.occupations[index1], self.occupations[index0]
Esempio n. 15
0
 def _log_init(self):
     '''Write a summary of the wavefunction to the screen logger'''
     if log.do_medium:
         log('Initialized: %s' % self)
         if self.occ_model is not None:
             self.occ_model.log()
         log.blank()
Esempio n. 16
0
 def _init_log_base(self):
     if log.do_medium:
         log('Performing a density-based AIM analysis with a wavefunction as input.')
         log.deflist([
             ('Molecular grid', self._grid),
             ('Using local grids', self._local),
         ])
Esempio n. 17
0
    def eval_proatom(self, index, output, grid=None):
        # Greedy version of eval_proatom
        output[:] = self.get_constant(index, grid)

        #
        propars = self._cache.load('propars')
        begin = self.hebasis.get_atom_begin(index)
        nbasis = self.hebasis.get_atom_nbasis(index)

        for j in xrange(nbasis):
            coeff = propars[j + begin]
            if coeff != 0.0:
                output += coeff * self.get_basis(index, j, grid)

        # correct if the proatom is negative in some parts
        if output.min() < 0:
            # use a dedicated grid for this part, which is needed in case of local+greedy.
            pop_grid = self.get_grid(index)
            if grid is None or pop_grid == grid:
                pop_output = output
            else:
                assert grid is self.grid
                pop_output = self.to_atomic_grid(index, output)
            pop_before = pop_grid.integrate(pop_output)
            # clipping is done on the original array
            np.clip(output, 1e-100, np.inf, out=output)
            pop_after = pop_grid.integrate(pop_output)
            error = pop_before - pop_after
            if abs(error) > 1e-5:
                if log.do_medium:
                    log('Lost %.1e electrons in proatom %i' % (error, index))
        output += 1e-100
Esempio n. 18
0
 def _init_log_base(self):
     if log.do_medium:
         log('Performing a density-based AIM analysis with a wavefunction as input.')
         log.deflist([
             ('Molecular grid', self._grid),
             ('Using local grids', self._local),
         ])
Esempio n. 19
0
    def do_partitioning(self):
        # Perform one general check in the beginning to avoid recomputation
        new = any(('at_weights', i) not in self.cache for i in xrange(self.natom))
        new |= 'niter' not in self.cache
        new |= 'change'not in self.cache
        if new:
            propars = self._init_propars()
            if log.medium:
                log.hline()
                log('Iteration       Change')
                log.hline()

            counter = 0
            change = 1e100

            while True:
                counter += 1

                # Update the parameters that determine the pro-atoms.
                old_propars = propars.copy()
                self._update_propars()

                # Check for convergence
                change = self.compute_change(propars, old_propars)
                if log.medium:
                    log('%9i   %10.5e' % (counter, change))
                if change < self._threshold or counter >= self._maxiter:
                    break

            if log.medium:
                log.hline()

            self._finalize_propars()
            self.cache.dump('niter', counter, tags='o')
            self.cache.dump('change', change, tags='o')
Esempio n. 20
0
 def log(self):
     log('Occupation model: %s' % self)
     log.deflist([
         ('nalpha', self.nalpha),
         ('nbeta', self.nbeta),
         ('temperature', self.temperature),
         ('eps', self.eps),
     ])
Esempio n. 21
0
 def eval_spline(self, index, spline, output, grid=None, label='noname'):
     center = self.system.coordinates[index]
     if grid is None:
         grid = self.get_grid(index)
     if log.do_debug:
         number = self.system.numbers[index]
         log('  Evaluating spline (%s) for atom %i (n=%i) on %i grid points' % (label, index, number, grid.size))
     grid.eval_spline(spline, center, output)
Esempio n. 22
0
 def do_charges(self):
     charges, new = self._cache.load('charges', alloc=self.natom, tags='o')
     if new:
         self.do_populations()
         populations = self._cache.load('populations')
         if log.do_medium:
             log('Computing atomic charges.')
         charges[:] = self.numbers - populations
Esempio n. 23
0
 def do_charges(self):
     charges, new = self._cache.load('charges', alloc=self.natom, tags='o')
     if new:
         self.do_populations()
         populations = self._cache.load('populations')
         if log.do_medium:
             log('Computing atomic charges.')
         charges[:] = self.numbers - populations
Esempio n. 24
0
 def log(self):
     log('Occupation model: %s' % self)
     log.deflist([
         ('nalpha', self.nalpha),
         ('nbeta', self.nbeta),
         ('temperature', self.temperature),
         ('eps', self.eps),
     ])
Esempio n. 25
0
 def _log_init(self):
     if log.do_medium:
         log('Initialized: %s' % self)
         log.deflist([
             ('Numbers', self._rgrid_map.keys()),
             ('Records', self._map.keys()),
         ])
         log.blank()
Esempio n. 26
0
 def _log_init(self):
     if log.do_medium:
         log('Initialized: %s' % self)
         log.deflist([
             ('Numbers', self._rgrid_map.keys()),
             ('Records', self._map.keys()),
         ])
         log.blank()
Esempio n. 27
0
    def do_moments(self):
        if log.do_medium:
            log('Computing cartesian and pure AIM multipoles and radial AIM moments.'
                )

        ncart = get_ncart_cumul(self.lmax)
        cartesian_multipoles, new1 = self._cache.load(
            'cartesian_multipoles',
            alloc=(self._system.natom, ncart),
            tags='o')

        npure = get_npure_cumul(self.lmax)
        pure_multipoles, new1 = self._cache.load('pure_multipoles',
                                                 alloc=(self._system.natom,
                                                        npure),
                                                 tags='o')

        nrad = self.lmax + 1
        radial_moments, new2 = self._cache.load('radial_moments',
                                                alloc=(self._system.natom,
                                                       nrad),
                                                tags='o')

        if new1 or new2:
            self.do_partitioning()
            for i in xrange(self._system.natom):
                # 1) Define a 'window' of the integration grid for this atom
                center = self._system.coordinates[i]
                grid = self.get_grid(i)

                # 2) Compute the AIM
                aim = self.get_moldens(i) * self.cache.load('at_weights', i)

                # 3) Compute weight corrections (TODO: needs to be assessed!)
                wcor = self.get_wcor(i)

                # 4) Compute Cartesian multipole moments
                # The minus sign is present to account for the negative electron
                # charge.
                cartesian_multipoles[i] = -grid.integrate(
                    aim, wcor, center=center, lmax=self.lmax, mtype=1)
                cartesian_multipoles[i, 0] += self.system.pseudo_numbers[i]

                # 5) Compute Pure multipole moments
                # The minus sign is present to account for the negative electron
                # charge.
                pure_multipoles[i] = -grid.integrate(
                    aim, wcor, center=center, lmax=self.lmax, mtype=2)
                pure_multipoles[i, 0] += self.system.pseudo_numbers[i]

                # 6) Compute Radial moments
                # For the radial moments, it is not common to put a minus sign
                # for the negative electron charge.
                radial_moments[i] = grid.integrate(aim,
                                                   wcor,
                                                   center=center,
                                                   lmax=self.lmax,
                                                   mtype=3)
Esempio n. 28
0
    def _update_propars_atom(self, index):
        # Prepare some things
        charges = self._cache.load('charges', alloc=self.natom, tags='o')[0]
        begin = self.hebasis.get_atom_begin(index)
        nbasis = self.hebasis.get_atom_nbasis(index)

        # Compute charge and delta aim density
        charge, delta_aim = self._get_charge_and_delta_aim(index)
        charges[index] = charge

        # Preliminary check
        if charges[index] > nbasis:
            raise RuntimeError('The charge on atom %i becomes too positive: %f > %i. (infeasible)' % (index, charges[index], nbasis))

        # Define the least-squares system
        A, B, C = self._get_he_system(index, delta_aim)

        # preconditioning
        scales = np.sqrt(np.diag(A))
        A = A/scales/scales.reshape(-1,1)
        B /= scales

        # resymmetrize A due to potential round-off errors after rescaling
        # (minor thing)
        A = 0.5*(A + A.T)

        # Find solution
        #    constraint for total population of pro-atom
        qp_r = np.array([np.ones(nbasis)/scales])
        qp_s = np.array([-charges[index]])
        #    inequality constraints to keep coefficients larger than -1 or 0.
        lower_bounds = np.zeros(nbasis)
        for j0 in xrange(nbasis):
            lower_bounds[j0] = self.hebasis.get_lower_bound(index, j0)*scales[j0]
        #    call the quadratic solver with modified b due to non-zero lower bound
        qp_a = A
        qp_b = B - np.dot(A, lower_bounds)
        qp_s -= np.dot(qp_r, lower_bounds)
        qps = QPSolver(qp_a, qp_b, qp_r, qp_s)
        qp_x = qps.find_brute()[1]
        # convert back to atom_pars
        atom_propars = qp_x + lower_bounds

        rrms = np.dot(np.dot(A, atom_propars) - 2*B, atom_propars)/C + 1
        if rrms > 0:
            rrmsd = np.sqrt(rrms)
        else:
            rrmsd = -0.01

        #    correct for scales
        atom_propars /= scales

        if log.do_high:
            log('            %10i (%.0f%%):&%s' % (index, rrmsd*100, ' '.join('% 6.3f' % c for c in atom_propars)))

        self.cache.load('propars')[begin:begin+nbasis] = atom_propars
Esempio n. 29
0
def guess_hamiltonian_core(system):
    if log.do_medium:
        log('Performing a hamiltonian core guess.')
        log.blank()
    if isinstance(system.wfn, RestrictedWFN):
        guess_hamiltonian_core_cs(system)
    elif isinstance(system.wfn, UnrestrictedWFN):
        guess_hamiltonian_core_os(system)
    else:
        raise NotImplementedError
Esempio n. 30
0
def guess_hamiltonian_core(system):
    if log.do_medium:
        log('Performing a hamiltonian core guess.')
        log.blank()
    if isinstance(system.wfn, RestrictedWFN):
        guess_hamiltonian_core_cs(system)
    elif isinstance(system.wfn, UnrestrictedWFN):
        guess_hamiltonian_core_os(system)
    else:
        raise NotImplementedError
Esempio n. 31
0
 def _init_log_base(self):
     if log.do_medium:
         log('Performing a density-based AIM analysis with a wavefunction as input.')
         log.deflist([
             ('Molecular grid', self._grid),
             ('System', self._system),
             ('Using local grids', self._local),
             ('Epsilon density:', self.epsilon),
             ('Compute expensive properties (slow)', self._slow),
         ])
Esempio n. 32
0
 def do_populations(self):
     populations, new = self.cache.load('populations', alloc=self.natom, tags='o')
     if new:
         self.do_partitioning()
         pseudo_populations = self.cache.load('pseudo_populations', alloc=self.natom, tags='o')[0]
         if log.do_medium:
             log('Computing atomic populations.')
         for i in xrange(self.natom):
             pseudo_populations[i] = self.compute_pseudo_population(i)
         populations[:] = pseudo_populations
         populations += self.numbers - self.pseudo_numbers
Esempio n. 33
0
 def _init_log_base(self):
     if log.do_medium:
         log('Performing a density-based AIM analysis with a wavefunction as input.'
             )
         log.deflist([
             ('Molecular grid', self._grid),
             ('System', self._system),
             ('Using local grids', self._local),
             ('Epsilon density:', self.epsilon),
             ('Compute expensive properties (slow)', self._slow),
         ])
Esempio n. 34
0
 def do_populations(self):
     populations, new = self.cache.load('populations', alloc=self.natom, tags='o')
     if new:
         self.do_partitioning()
         pseudo_populations = self.cache.load('pseudo_populations', alloc=self.natom, tags='o')[0]
         if log.do_medium:
             log('Computing atomic populations.')
         for i in xrange(self.natom):
             pseudo_populations[i] = self.compute_pseudo_population(i)
         populations[:] = pseudo_populations
         populations += self.numbers - self.pseudo_numbers
Esempio n. 35
0
 def _log_init(self):
     if log.do_medium:
         log('Initialized: %s' % self)
         log.deflist([
             ('Size', self.size),
             ('Switching function', 'k=%i' % self._k),
         ])
         log.blank()
     # Cite reference
     log.cite('becke1988_multicenter', 'the multicenter integration scheme used for the molecular integration grid')
     log.cite('cordero2008', 'the covalent radii used for the Becke-Lebedev molecular integration grid')
Esempio n. 36
0
 def _log_init(self):
     if log.do_medium:
         log('Initialized: %s' % self)
         log.deflist([
             ('Size', self.size),
             ('Switching function', 'k=%i' % self._k),
         ])
         log.blank()
     # Cite reference
     biblio.cite('becke1988_multicenter', 'the multicenter integration scheme used for the molecular integration grid')
     biblio.cite('cordero2008', 'the covalent radii used for the Becke-Lebedev molecular integration grid')
Esempio n. 37
0
 def _init_log_base(self):
     if log.do_medium:
         log('Performing a density-based AIM analysis with a cube file as input.')
         log.deflist([
             ('Uniform Integration Grid', self.grid),
             ('Grid shape', self.grid.shape),
             ('Using local grids', self._local),
             ('Mean spacing', '%10.5e' % (self.grid.get_grid_cell().volume**(1.0/3.0))),
             ('Weight corr. numbers', ' '.join(str(n) for n in self.wcor_numbers)),
             ('Weight corr. max rcut', '%10.5f' % self._wcor_rcut_max),
             ('Weight corr. rcond', '%10.5e' % self._wcor_rcond),
         ])
Esempio n. 38
0
 def do_spin_charges(self):
     if self._spindens is not None:
         spin_charges, new = self._cache.load('spin_charges', alloc=self.natom, tags='o')
         self.do_partitioning()
         if log.do_medium:
             log('Computing atomic spin charges.')
         for index in xrange(self.natom):
             grid = self.get_grid(index)
             spindens = self.get_spindens(index)
             at_weights = self.cache.load('at_weights', index)
             wcor = self.get_wcor(index)
             spin_charges[index] = grid.integrate(at_weights, spindens, wcor)
Esempio n. 39
0
def guess_core_hamiltonian(overlap, *args, **kwargs):
    '''Guess the orbitals by diagonalizing a core Hamiltonian

       **Arguments:**

       overlap
            The overlap operator.

       core1, core2, ...
            A number of operators that add up to the core Hamiltonian. Any set
            of operators whose sum resembles a Fock operator is fine. Usually,
            one passes the kinetic energy and nuclear attraction integrals.

       exp1, exp2, ...
            A list of wavefunction expansion objects (output arguments)

       This method only modifies the expansion coefficients and the orbital
       energies.
    '''
    if len(kwargs) != 0:
        raise TypeError('Unknown keyword arguments: %s' % kwargs.keys())

    if log.do_medium:
        log('Performing a core Hamiltonian guess.')
        log.blank()

    core = []
    exps = []
    for arg in args:
        if isinstance(arg, TwoIndex):
            core.append(arg)
        elif isinstance(arg, Expansion):
            exps.append(arg)
        else:
            raise TypeError('argument of unsupported type: %s' % arg)

    if len(core) == 0:
        raise TypeError(
            'At least one term is needed for the core Hamiltonian.')
    if len(exps) == 0:
        raise TypeError('At least one wavefunction expansion is needed.')

    # Take sum of operators for core hamiltonian
    hamcore = core[0].copy()
    for term in core[1:]:
        hamcore.iadd(term)

    # Compute orbitals.
    exps[0].from_fock(hamcore, overlap)
    # Copy to other expansions.
    for i in xrange(1, len(exps)):
        exps[i].coeffs[:] = exps[0].coeffs
        exps[i].energies[:] = exps[0].energies
Esempio n. 40
0
 def write_run_script(self):
     # write the script
     fn_script = 'run_%s.sh' % self.name
     exists = os.path.isfile(fn_script)
     if not exists:
         with open(fn_script, 'w') as f:
             print >> f, self.run_script
         log('Written new:      ', fn_script)
     else:
         log('Not overwriting:  ', fn_script)
     # make the script executable
     os.chmod(fn_script, stat.S_IXUSR | os.stat(fn_script).st_mode)
Esempio n. 41
0
 def write_run_script(self):
     # write the script
     fn_script = 'run_%s.sh' % self.name
     exists = os.path.isfile(fn_script)
     if not exists:
         with open(fn_script, 'w') as f:
             print >> f, self.run_script
         log('Written new:      ', fn_script)
     else:
         log('Not overwriting:  ', fn_script)
     # make the script executable
     os.chmod(fn_script, stat.S_IXUSR | os.stat(fn_script).st_mode)
Esempio n. 42
0
 def _log_init(self):
     if log.do_high:
         log('Initialized: %s' % self)
         log.deflist([
             ('Size', self.size),
             ('Number of radii', self.nsphere),
             ('Min LL sphere', self._nlls.min()),
             ('Max LL sphere', self._nlls.max()),
             ('Radial Transform', self._rgrid.rtransform.to_string()),
         ])
     # Cite reference
     biblio.cite('lebedev1999', 'the use of Lebedev-Laikov grids (quadrature on a sphere)')
Esempio n. 43
0
 def do_spin_charges(self):
     if self._spindens is not None:
         spin_charges, new = self._cache.load('spin_charges', alloc=self.natom, tags='o')
         self.do_partitioning()
         if log.do_medium:
             log('Computing atomic spin charges.')
         for index in xrange(self.natom):
             grid = self.get_grid(index)
             spindens = self.get_spindens(index)
             at_weights = self.cache.load('at_weights', index)
             wcor = self.get_wcor(index)
             spin_charges[index] = grid.integrate(at_weights, spindens, wcor)
Esempio n. 44
0
def guess_core_hamiltonian(overlap, *args, **kwargs):
    '''Guess the orbitals by diagonalizing a core Hamiltonian

       **Arguments:**

       overlap
            The overlap operator.

       core1, core2, ...
            A number of operators that add up to the core Hamiltonian. Any set
            of operators whose sum resembles a Fock operator is fine. Usually,
            one passes the kinetic energy and nuclear attraction integrals.

       exp1, exp2, ...
            A list of wavefunction expansion objects (output arguments)

       This method only modifies the expansion coefficients and the orbital
       energies.
    '''
    if len(kwargs) != 0:
        raise TypeError('Unknown keyword arguments: %s' % kwargs.keys())

    if log.do_medium:
        log('Performing a core Hamiltonian guess.')
        log.blank()

    core = []
    exps = []
    for arg in args:
        if isinstance(arg, TwoIndex):
            core.append(arg)
        elif isinstance(arg, Expansion):
            exps.append(arg)
        else:
            raise TypeError('argument of unsupported type: %s' % arg)

    if len(core) == 0:
        raise TypeError('At least one term is needed for the core Hamiltonian.')
    if len(exps) == 0:
        raise TypeError('At least one wavefunction expansion is needed.')

    # Take sum of operators for core hamiltonian
    hamcore = core[0].copy()
    for term in core[1:]:
        hamcore.iadd(term)

    # Compute orbitals.
    exps[0].from_fock(hamcore, overlap)
    # Copy to other expansions.
    for i in xrange(1, len(exps)):
        exps[i].coeffs[:] = exps[0].coeffs
        exps[i].energies[:] = exps[0].energies
Esempio n. 45
0
 def _log_init(self):
     if log.do_high:
         log('Initialized: %s' % self)
         log.deflist([
             ('Size', self.size),
             ('Number of radii', self.nsphere),
             ('Min LL sphere', self._nlls.min()),
             ('Max LL sphere', self._nlls.max()),
             ('Radial Transform', self._rgrid.rtransform.to_string()),
             ('1D Integrator', self._rgrid.int1d),
         ])
     # Cite reference
     biblio.cite('lebedev1999', 'the use of Lebedev-Laikov grids (quadrature on a sphere)')
Esempio n. 46
0
 def log_energy(self):
     '''Write an overview of the last energy computation on screen'''
     log('Contributions to the energy:')
     log.hline()
     log('                                       Energy term                 Value')
     log.hline()
     for term in self.terms:
         energy = self.system.extra['energy_%s' % term.label]
         log('%50s  %20.12f' % (term.label, energy))
     log('%50s  %20.12f' % ('nn', self.system.extra['energy_nn']))
     log('%50s  %20.12f' % ('total', self.system.extra['energy']))
     log.hline()
     log.blank()
Esempio n. 47
0
    def do_dispersion(self):
        if self.lmax < 3:
            if log.do_warning:
                log.warn(
                    'Skipping the computation of dispersion coefficients because lmax=%i<3'
                    % self.lmax)
            return

        if log.do_medium:
            log.cite(
                'tkatchenko2009',
                'the method to evaluate atoms-in-molecules C6 parameters')
            log.cite('chu2004',
                     'the reference C6 parameters of isolated atoms')
            log.cite('yan1996', 'the isolated hydrogen C6 parameter')

        ref_c6s = { # reference C6 values in atomic units
            1: 6.499, 2: 1.42, 3: 1392.0, 4: 227.0, 5: 99.5, 6: 46.6, 7: 24.2,
            8: 15.6, 9: 9.52, 10: 6.20, 11: 1518.0, 12: 626.0, 13: 528.0, 14:
            305.0, 15: 185.0, 16: 134.0, 17: 94.6, 18: 64.2, 19: 3923.0, 20:
            2163.0, 21: 1383.0, 22: 1044.0, 23: 832.0, 24: 602.0, 25: 552.0, 26:
            482.0, 27: 408.0, 28: 373.0, 29: 253.0, 30: 284.0, 31: 498.0, 32:
            354.0, 33: 246.0, 34: 210.0, 35: 162.0, 36: 130.0, 37: 4769.0, 38:
            3175.0, 49: 779.0, 50: 659.0, 51: 492.0, 52: 445.0, 53: 385.0,
        }

        volumes, new_volumes = self._cache.load('volumes',
                                                alloc=self.natom,
                                                tags='o')
        volume_ratios, new_volume_ratios = self._cache.load('volume_ratios',
                                                            alloc=self.natom,
                                                            tags='o')
        c6s, new_c6s = self._cache.load('c6s', alloc=self.natom, tags='o')

        if new_volumes or new_volume_ratios or new_c6s:
            self.do_moments()
            radial_moments = self._cache.load('radial_moments')

            if log.do_medium:
                log('Computing atomic dispersion coefficients.')

            for i in xrange(self.natom):
                n = self.numbers[i]
                volumes[i] = radial_moments[i, 3]
                ref_volume = self.proatomdb.get_record(n, 0).get_moment(3)
                volume_ratios[i] = volumes[i] / ref_volume
                if n in ref_c6s:
                    c6s[i] = (volume_ratios[i])**2 * ref_c6s[n]
                else:
                    c6s[i] = -1  # This is just to indicate that no value is available.
Esempio n. 48
0
 def log(self):
     """Write an overview of the last computation on screen."""
     log('Contributions to the energy:')
     log.hline()
     log('                                              term                 Value')
     log.hline()
     for term in self.terms:
         energy = self.cache['energy_%s' % term.label]
         log('%50s  %20.12f' % (term.label, energy))
     for key, energy in self.external.iteritems():
         log('%50s  %20.12f' % (key, energy))
     log('%50s  %20.12f' % ('total', self.cache['energy']))
     log.hline()
     log.blank()
Esempio n. 49
0
 def log_energy(self):
     '''Write an overview of the last energy computation on screen'''
     log('Contributions to the energy:')
     log.hline()
     log('                                       Energy term                 Value'
         )
     log.hline()
     for term in self.terms:
         energy = self.system.extra['energy_%s' % term.label]
         log('%50s  %20.12f' % (term.label, energy))
     log('%50s  %20.12f' % ('nn', self.system.extra['energy_nn']))
     log('%50s  %20.12f' % ('total', self.system.extra['energy']))
     log.hline()
     log.blank()
Esempio n. 50
0
def _fix_molden_from_buggy_codes(result, filename):
    """Detect errors in the data loaded from a molden/mkl/... file and correct.

       **Argument:**

       result
            A dictionary with the data loaded in the ``load_molden`` function.

       This function can recognize erroneous files created by PSI4 and ORCA. The
       data in the obasis and signs fields will be updated accordingly.
    """
    obasis = result['obasis']
    permutation = result.get('permutation', None)
    if _is_normalized_properly(result['lf'], obasis, permutation,
                               result['exp_alpha'], result.get('exp_beta')):
        # The file is good. No need to change data.
        return
    if log.do_medium:
        log('Detected incorrect normalization of orbitals loaded from a file.')
    # Try to fix it as if it was a file generated by ORCA.
    orca_signs = _get_orca_signs(obasis)
    orca_con_coeffs = _get_fixed_con_coeffs(obasis, 'orca')
    orca_obasis = GOBasis(obasis.centers, obasis.shell_map, obasis.nprims,
                          obasis.shell_types, obasis.alphas, orca_con_coeffs)
    if _is_normalized_properly(result['lf'], orca_obasis,
                               permutation, result['exp_alpha'],
                               result.get('exp_beta'), orca_signs):
        if log.do_medium:
            log('Detected typical ORCA errors in file. Fixing them...')
        result['obasis'] = orca_obasis
        result['signs'] = orca_signs
        return
    # Try to fix it as if it was a file generated by PSI4 (pre 1.0).
    psi4_con_coeffs = _get_fixed_con_coeffs(obasis, 'psi4')
    psi4_obasis = GOBasis(obasis.centers, obasis.shell_map, obasis.nprims,
                          obasis.shell_types, obasis.alphas, psi4_con_coeffs)
    if _is_normalized_properly(result['lf'], psi4_obasis, permutation,
                               result['exp_alpha'], result.get('exp_beta')):
        if log.do_medium:
            log('Detected typical PSI4 errors in file. Fixing them...')
        result['obasis'] = psi4_obasis
        return
    # Last resort: simply renormalize all contractions
    normed_con_coeffs = _normalized_contractions(obasis)
    normed_obasis = GOBasis(obasis.centers, obasis.shell_map, obasis.nprims,
                            obasis.shell_types, obasis.alphas,
                            normed_con_coeffs)
    if _is_normalized_properly(result['lf'], normed_obasis, permutation,
                               result['exp_alpha'], result.get('exp_beta')):
        if log.do_medium:
            log('Detected unnormalized contractions in file. Fixing them...')
        result['obasis'] = normed_obasis
        return

    raise IOError(('Could not correct the data read from %s. The molden or '
                   'mkl file you are trying to load contains errors. Please '
                   'report this problem to [email protected], so he '
                   'can fix it.') % filename)
Esempio n. 51
0
 def do_prosplines(self):
     for index in xrange(self.natom):
         # density
         key = ('spline_prodensity', index)
         if key not in self.cache:
             if log.medium:
                 log('Storing proatom density spline for atom %i.' % index)
             spline = self.get_proatom_spline(index)
             self.cache.dump(key, spline, tags='o')
         # hartree potential
         key = ('spline_prohartree', index)
         if key not in self.cache:
             if log.medium:
                 log('Computing proatom hartree potential spline for atom %i.' % index)
             rho_spline = self.cache.load('spline_prodensity', index)
             v_spline = solve_poisson_becke([rho_spline])[0]
             self.cache.dump(key, v_spline, tags='o')
Esempio n. 52
0
def _fix_molden_from_buggy_codes(result, filename):
    """Detect errors in the data loaded from a molden/mkl/... file and correct.

       **Argument:**

       result
            A dictionary with the data loaded in the ``load_molden`` function.

       This function can recognize erroneous files created by PSI4 and ORCA. The
       data in the obasis and signs fields will be updated accordingly.
    """
    obasis = result['obasis']
    permutation = result.get('permutation', None)
    if _is_normalized_properly(result['lf'], obasis, permutation,
                               result['exp_alpha'], result.get('exp_beta')):
        # The file is good. No need to change data.
        return
    if log.do_medium:
        log('Detected incorrect normalization of orbitals loaded from a file.')
    # Try to fix it as if it was a file generated by ORCA.
    orca_signs = _get_orca_signs(obasis)
    orca_con_coeffs = _get_fixed_con_coeffs(obasis, 'orca')
    orca_obasis = GOBasis(obasis.centers, obasis.shell_map, obasis.nprims,
                          obasis.shell_types, obasis.alphas, orca_con_coeffs)
    if _is_normalized_properly(result['lf'], orca_obasis, permutation,
                               result['exp_alpha'], result.get('exp_beta'), orca_signs):
        if log.do_medium:
            log('Detected typical ORCA errors in file. Fixing them...')
        result['obasis'] = orca_obasis
        result['signs'] = orca_signs
        return
    # Try to fix it as if it was a file generated by PSI4 (pre 1.0).
    psi4_con_coeffs = _get_fixed_con_coeffs(obasis, 'psi4')
    psi4_obasis = GOBasis(obasis.centers, obasis.shell_map, obasis.nprims,
                          obasis.shell_types, obasis.alphas, psi4_con_coeffs)
    if _is_normalized_properly(result['lf'], psi4_obasis, permutation,
                               result['exp_alpha'], result.get('exp_beta')):
        if log.do_medium:
            log('Detected typical PSI4 errors in file. Fixing them...')
        result['obasis'] = psi4_obasis
        return
    # Last resort: simply renormalize all contractions
    normed_con_coeffs = _normalized_contractions(obasis)
    normed_obasis = GOBasis(obasis.centers, obasis.shell_map, obasis.nprims,
                            obasis.shell_types, obasis.alphas, normed_con_coeffs)
    if _is_normalized_properly(result['lf'], normed_obasis, permutation,
                               result['exp_alpha'], result.get('exp_beta')):
        if log.do_medium:
            log('Detected unnormalized contractions in file. Fixing them...')
        result['obasis'] = normed_obasis
        return

    raise IOError(('Could not correct the data read from %s. The molden or '
                   'mkl file you are trying to load contains errors. Please '
                   'report this problem to [email protected], so he '
                   'can fix it.') % filename)
Esempio n. 53
0
    def do_hartree_decomposition(self):
        if not self.local:
            if log.do_warning:
                log.warn('Skipping hartree decomposition because no local grids were found.')
            return

        for index in xrange(self.natom):
            key = ('hartree_decomposition', index)
            if key not in self.cache:
                self.do_density_decomposition()
                if log.do_medium:
                    log('Computing hartree decomposition for atom %i' % index)
                density_decomposition = self.cache.load('density_decomposition', index)
                rho_splines = [spline for foo, spline in sorted(density_decomposition.iteritems())]
                v_splines = solve_poisson_becke(rho_splines)
                hartree_decomposition = dict(('spline_%05i' % j, spline) for j, spline in enumerate(v_splines))
                self.cache.dump(key, hartree_decomposition, tags='o')
Esempio n. 54
0
    def do_dispersion(self):
        if self.lmax < 3:
            if log.do_warning:
                log.warn('Skipping the computation of dispersion coefficients because lmax=%i<3' % self.lmax)
            return

        if log.do_medium:
            log.cite('tkatchenko2009', 'the method to evaluate atoms-in-molecules C6 parameters')
            log.cite('chu2004', 'the reference C6 parameters of isolated atoms')
            log.cite('yan1996', 'the isolated hydrogen C6 parameter')

        ref_c6s = { # reference C6 values in atomic units
            1: 6.499, 2: 1.42, 3: 1392.0, 4: 227.0, 5: 99.5, 6: 46.6, 7: 24.2,
            8: 15.6, 9: 9.52, 10: 6.20, 11: 1518.0, 12: 626.0, 13: 528.0, 14:
            305.0, 15: 185.0, 16: 134.0, 17: 94.6, 18: 64.2, 19: 3923.0, 20:
            2163.0, 21: 1383.0, 22: 1044.0, 23: 832.0, 24: 602.0, 25: 552.0, 26:
            482.0, 27: 408.0, 28: 373.0, 29: 253.0, 30: 284.0, 31: 498.0, 32:
            354.0, 33: 246.0, 34: 210.0, 35: 162.0, 36: 130.0, 37: 4769.0, 38:
            3175.0, 49: 779.0, 50: 659.0, 51: 492.0, 52: 445.0, 53: 385.0,
        }

        volumes, new_volumes = self._cache.load('volumes', alloc=self.natom, tags='o')
        volume_ratios, new_volume_ratios = self._cache.load('volume_ratios', alloc=self.natom, tags='o')
        c6s, new_c6s = self._cache.load('c6s', alloc=self.natom, tags='o')

        if new_volumes or new_volume_ratios or new_c6s:
            self.do_populations()
            self.do_moments()
            radial_moments = self._cache.load('radial_moments')
            populations = self._cache.load('populations')

            if log.do_medium:
                log('Computing atomic dispersion coefficients.')

            for i in xrange(self.natom):
                n = self.numbers[i]
                volumes[i] = radial_moments[i,2]/populations[i]
                ref_volume = self.proatomdb.get_record(n, 0).get_moment(3)/n
                volume_ratios[i] = volumes[i]/ref_volume
                if n in ref_c6s:
                    c6s[i] = (volume_ratios[i])**2*ref_c6s[n]
                else:
                    c6s[i] = -1 # This is just to indicate that no value is available.
Esempio n. 55
0
    def do_density_decomposition(self):
        if not self.local:
            if log.do_warning:
                log.warn('Skipping density decomposition because no local grids were found.')
            return

        for index in xrange(self.natom):
            atgrid = self.get_grid(index)
            assert isinstance(atgrid, AtomicGrid)
            key = ('density_decomposition', index)
            if key not in self.cache:
                moldens = self.get_moldens(index)
                self.do_partitioning()
                if log.do_medium:
                    log('Computing density decomposition for atom %i' % index)
                at_weights = self.cache.load('at_weights', index)
                splines = atgrid.get_spherical_decomposition(moldens, at_weights, lmax=self.lmax)
                density_decomposition = dict(('spline_%05i' % j, spline) for j, spline in enumerate(splines))
                self.cache.dump(key, density_decomposition, tags='o')
Esempio n. 56
0
    def do_moments(self):
        ncart = get_ncart_cumul(self.lmax)
        cartesian_multipoles, new1 = self._cache.load('cartesian_multipoles', alloc=(self.natom, ncart), tags='o')

        npure = get_npure_cumul(self.lmax)
        pure_multipoles, new1 = self._cache.load('pure_multipoles', alloc=(self.natom, npure), tags='o')

        nrad = self.lmax+1
        radial_moments, new2 = self._cache.load('radial_moments', alloc=(self.natom, nrad), tags='o')

        if new1 or new2:
            self.do_partitioning()
            if log.do_medium:
                log('Computing cartesian and pure AIM multipoles and radial AIM moments.')

            for i in xrange(self.natom):
                # 1) Define a 'window' of the integration grid for this atom
                center = self.coordinates[i]
                grid = self.get_grid(i)

                # 2) Compute the AIM
                aim = self.get_moldens(i)*self.cache.load('at_weights', i)

                # 3) Compute weight corrections
                wcor = self.get_wcor(i)

                # 4) Compute Cartesian multipole moments
                # The minus sign is present to account for the negative electron
                # charge.
                cartesian_multipoles[i] = -grid.integrate(aim, wcor, center=center, lmax=self.lmax, mtype=1)
                cartesian_multipoles[i, 0] += self.pseudo_numbers[i]

                # 5) Compute Pure multipole moments
                # The minus sign is present to account for the negative electron
                # charge.
                pure_multipoles[i] = -grid.integrate(aim, wcor, center=center, lmax=self.lmax, mtype=2)
                pure_multipoles[i, 0] += self.pseudo_numbers[i]

                # 6) Compute Radial moments
                # For the radial moments, it is not common to put a minus sign
                # for the negative electron charge.
                radial_moments[i] = grid.integrate(aim, wcor, center=center, lmax=self.lmax, mtype=3)