def save_weights(fn_cube, sys, ugrid, weights): '''Save the weights used for the ESP cost function to a cube file **Arguments:** fn_cube The name of the cube file. sys A System instance. ugrid The uniform integration grid. weights The weights array to be saved. ''' # construct a new system that contains all info for the cube file my_sys = System(sys.coordinates, sys.numbers, pseudo_numbers=sys.pseudo_numbers, grid=ugrid) my_sys.extra['cube_data'] = weights # save to file my_sys.to_file(fn_cube)
def main(): args = parse_args() fn_h5, grp_name = parse_h5(args.output, 'output') # check if the group is already present (and not empty) in the output file if check_output(fn_h5, grp_name, args.overwrite): return # Load the system sys = System.from_file(args.cube) ugrid = sys.grid if not isinstance(ugrid, UniformGrid): raise TypeError( 'The specified file does not contain data on a rectangular grid.') ugrid.pbc[:] = parse_pbc(args.pbc) moldens = sys.extra['cube_data'] # Reduce the grid if required if args.stride > 1 or args.chop > 0: moldens, ugrid = reduce_data(moldens, ugrid, args.stride, args.chop) # Load the proatomdb and make pro-atoms more compact if that is requested proatomdb = ProAtomDB.from_file(args.atoms) if args.compact is not None: proatomdb.compact(args.compact) proatomdb.normalize() # Select the partitioning scheme CPartClass = cpart_schemes[args.scheme] # List of element numbers for which weight corrections are needed: wcor_numbers = list(iter_elements(args.wcor)) # Run the partitioning kwargs = dict((key, val) for key, val in vars(args).iteritems() if key in CPartClass.options) cpart = cpart_schemes[args.scheme](sys, ugrid, True, moldens, proatomdb, wcor_numbers, args.wcor_rcut_max, args.wcor_rcond, **kwargs) names = cpart.do_all() # Do a symmetry analysis if requested. if args.symmetry is not None: sys_sym = System.from_file(args.symmetry) sym = sys_sym.extra.get('symmetry') if sym is None: raise ValueError('No symmetry information found in %s.' % args.symmetry) sys_results = dict((name, cpart[name]) for name in names) sym_results = symmetry_analysis(sys, sym, sys_results) cpart.cache.dump('symmetry', sym_results) names.append('symmetry') sys.extra['symmetry'] = sym write_part_output(fn_h5, grp_name, cpart, names, args)
def main(): args = parse_args() fn_h5, grp_name = parse_h5(args.output, 'output') # check if the group is already present (and not empty) in the output file if check_output(fn_h5, grp_name, args.overwrite): return # Load the cost function from the HDF5 file cost, used_volume = load_cost(args.cost) # Find the optimal charges results = {} results['x'] = cost.solve(args.qtot, args.ridge) results['charges'] = results['x'][:cost.natom] # Related properties results['cost'] = cost.value(results['x']) if results['cost'] < 0: results['rmsd'] = 0.0 else: results['rmsd'] = (results['cost']/used_volume)**0.5 # Worst case stuff results['cost_worst'] = cost.worst(0.0) if results['cost_worst'] < 0: results['rmsd_worst'] = 0.0 else: results['rmsd_worst'] = (results['cost_worst']/used_volume)**0.5 # Write some things on screen if log.do_medium: log('Important parameters:') log.hline() log('RMSD charges: %10.5e' % np.sqrt((results['charges']**2).mean())) log('RMSD ESP: %10.5e' % results['rmsd']) log('Worst RMSD ESP: %10.5e' % results['rmsd_worst']) log.hline() # Perform a symmetry analysis if requested if args.symmetry is not None: sys = System.from_file(args.symmetry[0]) sys_sym = System.from_file(args.symmetry[1]) sym = sys_sym.extra.get('symmetry') if sym is None: raise ValueError('No symmetry information found in %s.' % args.symmetry[1]) sys_results = {'charges': results['charges']} sym_results = symmetry_analysis(sys, sym, sys_results) results['symmetry'] = sym_results sys.extra['symmetry'] = sym # Store the results in an HDF5 file write_script_output(fn_h5, grp_name, results, args)
def main(): args = parse_args() fn_h5, grp_name = parse_h5(args.output, 'output') # check if the group is already present (and not empty) in the output file if check_output(fn_h5, grp_name, args.overwrite): return # Load the system sys = System.from_file(args.cube) ugrid = sys.grid if not isinstance(ugrid, UniformGrid): raise TypeError('The specified file does not contain data on a rectangular grid.') ugrid.pbc[:] = parse_pbc(args.pbc) moldens = sys.extra['cube_data'] # Reduce the grid if required if args.stride > 1 or args.chop > 0: moldens, ugrid = reduce_data(moldens, ugrid, args.stride, args.chop) # Load the proatomdb and make pro-atoms more compact if that is requested proatomdb = ProAtomDB.from_file(args.atoms) if args.compact is not None: proatomdb.compact(args.compact) proatomdb.normalize() # Select the partitioning scheme CPartClass = cpart_schemes[args.scheme] # List of element numbers for which weight corrections are needed: wcor_numbers = list(iter_elements(args.wcor)) # Run the partitioning kwargs = dict((key, val) for key, val in vars(args).iteritems() if key in CPartClass.options) cpart = cpart_schemes[args.scheme]( sys, ugrid, True, moldens, proatomdb, wcor_numbers, args.wcor_rcut_max, args.wcor_rcond, **kwargs) names = cpart.do_all() # Do a symmetry analysis if requested. if args.symmetry is not None: sys_sym = System.from_file(args.symmetry) sym = sys_sym.extra.get('symmetry') if sym is None: raise ValueError('No symmetry information found in %s.' % args.symmetry) sys_results = dict((name, cpart[name]) for name in names) sym_results = symmetry_analysis(sys, sym, sys_results) cpart.cache.dump('symmetry', sym_results) names.append('symmetry') sys.extra['symmetry'] = sym write_part_output(fn_h5, grp_name, cpart, names, args)
def main(): args = parse_args() margin = args.margin*angstrom spacing = args.spacing*angstrom sys = System.from_file(args.structure) # compute the shape tensor shape = np.dot(sys.coordinates.transpose(), sys.coordinates) # diagonalize to obtain the x, y and z directions. evals, evecs = np.linalg.eigh(shape) axes = evecs.transpose()*spacing # compute the origin and the number of repetitions along each axis. nrep = np.zeros(3, int) origin = np.zeros(3, float) for i in xrange(3): projc = np.dot(sys.coordinates, evecs[:,i]) nrep[i] = np.ceil((projc.max() - projc.min() + 2*margin)/spacing)+1 origin += 0.5*(projc.max() + projc.min() - (nrep[i]-1)*spacing)*evecs[:,i] with open(args.output, 'w') as f: # the header is written in Bohr, hence the -nrep[0] print >> f, '% 5i % 15.10f % 15.10f % 15.10f' % (0, origin[0], origin[1], origin[2]) print >> f, '% 5i % 15.10f % 15.10f % 15.10f' % (-nrep[0], axes[0,0], axes[0,1], axes[0,2]) print >> f, '% 5i % 15.10f % 15.10f % 15.10f' % (nrep[1], axes[1,0], axes[1,1], axes[1,2]) print >> f, '% 5i % 15.10f % 15.10f % 15.10f' % (nrep[2], axes[2,0], axes[2,1], axes[2,2])
def main(): args = parse_args() fn_h5, grp_name = parse_h5(args.output, 'output') # check if the group is already present (and not empty) in the output file if check_output(fn_h5, grp_name, args.overwrite): return # Load the system sys = System.from_file(args.wfn) # Define a list of optional arguments for the WPartClass: WPartClass = wpart_schemes[args.scheme] kwargs = dict((key, val) for key, val in vars(args).iteritems() if key in WPartClass.options) # Load the proatomdb if args.atoms is not None: proatomdb = ProAtomDB.from_file(args.atoms) proatomdb.normalize() kwargs['proatomdb'] = proatomdb else: proatomdb = None # Run the partitioning agspec = AtomicGridSpec(args.grid) molgrid = BeckeMolGrid(sys, agspec, mode='only') sys.update_grid(molgrid) # for the grid to be written to the output wpart = wpart_schemes[args.scheme](sys, molgrid, **kwargs) names = wpart.do_all() write_part_output(fn_h5, grp_name, wpart, names, args)
def write_random_lta_cube(dn, fn_cube): sys = System.from_file(context.get_fn('test/lta_gulp.cif')) ugrid = UniformGrid(np.zeros(3, float), sys.cell.rvecs*0.1, np.array([10, 10, 10]), np.array([1, 1, 1])) cube_data = np.random.uniform(0, 1, ugrid.shape) sys.update_grid(ugrid) sys.extra['cube_data'] = cube_data sys.to_file(os.path.join(dn, fn_cube)) return sys
def write_random_lta_cube(dn, fn_cube): sys = System.from_file(context.get_fn('test/lta_gulp.cif')) ugrid = UniformGrid(np.zeros(3, float), sys.cell.rvecs * 0.1, np.array([10, 10, 10]), np.array([1, 1, 1])) cube_data = np.random.uniform(0, 1, ugrid.shape) sys.update_grid(ugrid) sys.extra['cube_data'] = cube_data sys.to_file(os.path.join(dn, fn_cube)) return sys
def load_atom(self, dn_mult, ext): fn = '%s/atom.%s' % (dn_mult, ext) if not os.path.isfile(fn): return None, None try: system = System.from_file(fn) except: return None, None system.extra['energy'] = self._get_energy(system, dn_mult) return system, system.extra['energy']
def load_rho(system, fn_cube, ref_ugrid, stride, chop): '''Load densities from a file, reduce by stride, chop and check ugrid **Arguments:** system A Horton system object for the current system. This is only used to construct the pro-density. fn_cube The cube file with the electron density. ref_ugrid A reference ugrid that must match the one from the density cube file (after reduction). stride The reduction factor. chop The number of slices to chop of the grid in each direction. ''' if fn_cube is None: # Load the built-in database of proatoms numbers = np.unique(system.numbers) proatomdb = ProAtomDB.from_refatoms(numbers, max_kation=0, max_anion=0, agspec='fine') # Construct the pro-density rho = np.zeros(ref_ugrid.shape) for i in xrange(system.natom): spline = proatomdb.get_spline(system.numbers[i]) ref_ugrid.eval_spline(spline, system.coordinates[i], rho) else: # Load cube sys = System.from_file(fn_cube) rho = sys.extra['cube_data'] ugrid = sys.grid # Reduce grid size if stride > 1: rho, ugrid = reduce_data(rho, ugrid, stride, chop) # Compare with ref_ugrid (only shape) if (ugrid.shape != ref_ugrid.shape).any(): raise ValueError( 'The densities file does not contain the same amount if information as the potential file.' ) return rho
def load_rho(system, fn_cube, ref_ugrid, stride, chop): '''Load densities from a file, reduce by stride, chop and check ugrid **Arguments:** system A Horton system object for the current system. This is only used to construct the pro-density. fn_cube The cube file with the electron density. ref_ugrid A reference ugrid that must match the one from the density cube file (after reduction). stride The reduction factor. chop The number of slices to chop of the grid in each direction. ''' if fn_cube is None: # Load the built-in database of proatoms numbers = np.unique(system.numbers) proatomdb = ProAtomDB.from_refatoms(numbers, max_kation=0, max_anion=0, agspec='fine') # Construct the pro-density rho = np.zeros(ref_ugrid.shape) for i in xrange(system.natom): spline = proatomdb.get_spline(system.numbers[i]) ref_ugrid.eval_spline(spline, system.coordinates[i], rho) else: # Load cube sys = System.from_file(fn_cube) rho = sys.extra['cube_data'] ugrid = sys.grid # Reduce grid size if stride > 1: rho, ugrid = reduce_data(rho, ugrid, stride, chop) # Compare with ref_ugrid (only shape) if (ugrid.shape != ref_ugrid.shape).any(): raise ValueError('The densities file does not contain the same amount if information as the potential file.') return rho
def main(): args = parse_args() fn_h5, grp_name = parse_h5(args.output, 'output') # check if the group is already present (and not empty) in the output file if check_output(fn_h5, grp_name, args.overwrite): return # Load the system if log.do_medium: log('Loading potential array') sys = System.from_file(args.cube) ugrid = sys.grid if not isinstance(ugrid, UniformGrid): raise TypeError( 'The specified file does not contain data on a rectangular grid.') ugrid.pbc[:] = parse_pbc(args.pbc) # correct pbc esp = sys.extra['cube_data'] # Reduce the grid if required if args.stride > 1: esp, ugrid = reduce_data(esp, ugrid, args.stride, args.chop) # Fix sign if args.sign: esp *= -1 # Some screen info if log.do_medium: log('Important parameters:') log.hline() log('Number of grid points: %12i' % np.product(ugrid.shape)) log('Grid shape: [%8i, %8i, %8i]' % tuple(ugrid.shape)) log('PBC: [%8i, %8i, %8i]' % tuple(ugrid.pbc)) log.hline() # Construct the weights for the ESP Cost function. wdens = parse_wdens(args.wdens) if wdens is not None: if log.do_medium: log('Loading density array') rho = load_rho(sys, wdens[0], ugrid, args.stride, args.chop) wdens = (rho, ) + wdens[1:] if log.do_medium: log('Constructing weight function') weights = setup_weights( sys, ugrid, dens=wdens, near=parse_wnear(args.wnear), far=parse_wnear(args.wfar), ) # write the weights to a cube file if requested if args.wsave is not None: if log.do_medium: log(' Saving weights array ') save_weights(args.wsave, sys, ugrid, weights) # rescale weights such that the cost function is the mean-square-error if weights.max() == 0.0: raise ValueError('No points with a non-zero weight were found') wmax = weights.min() wmin = weights.max() used_volume = ugrid.integrate(weights) # Some screen info if log.do_medium: log('Important parameters:') log.hline() log('Used number of grid points: %12i' % (weights > 0).sum()) log('Used volume: %12.5f' % used_volume) log('Used volume/atom: %12.5f' % (used_volume / sys.natom)) log('Lowest weight: %12.5e' % wmin) log('Highest weight: %12.5e' % wmax) log('Max weight at edge: %12.5f' % max_at_edge(weights, ugrid.pbc)) # Ewald parameters rcut, alpha, gcut = parse_ewald_args(args) # Some screen info if log.do_medium: log('Ewald real cutoff: %12.5e' % rcut) log('Ewald alpha: %12.5e' % alpha) log('Ewald reciprocal cutoff: %12.5e' % gcut) log.hline() # Construct the cost function if log.do_medium: log('Setting up cost function (may take a while) ') cost = ESPCost.from_grid_data(sys, ugrid, esp, weights, rcut, alpha, gcut) # Store cost function info results = {} results['cost'] = cost results['used_volume'] = used_volume # Store cost function properties results['evals'] = np.linalg.eigvalsh(cost._A) abs_evals = abs(results['evals']) if abs_evals.min() == 0.0: results['cn'] = 0.0 else: results['cn'] = abs_evals.max() / abs_evals.min() # Report some on-screen info if log.do_medium: log('Important parameters:') log.hline() log('Lowest abs eigen value: %12.5e' % abs_evals.min()) log('Highest abs eigen value: %12.5e' % abs_evals.max()) log('Condition number: %12.5e' % results['cn']) log.hline() # Store the results in an HDF5 file write_script_output(fn_h5, grp_name, results, args)
def load_ugrid_coordinates(arg_grid): sys = System.from_file(arg_grid) return sys.grid, sys.coordinates
def main(): args = parse_args() sys = System.from_file(args.input) sys.to_file(args.output)
def main(): args = parse_args() fn_h5, grp_name = parse_h5(args.output, 'output') # check if the group is already present (and not empty) in the output file if check_output(fn_h5, grp_name, args.overwrite): return # Load the system if log.do_medium: log('Loading potential array') sys = System.from_file(args.cube) ugrid = sys.grid if not isinstance(ugrid, UniformGrid): raise TypeError('The specified file does not contain data on a rectangular grid.') ugrid.pbc[:] = parse_pbc(args.pbc) # correct pbc esp = sys.extra['cube_data'] # Reduce the grid if required if args.stride > 1: esp, ugrid = reduce_data(esp, ugrid, args.stride, args.chop) # Fix sign if args.sign: esp *= -1 # Some screen info if log.do_medium: log('Important parameters:') log.hline() log('Number of grid points: %12i' % np.product(ugrid.shape)) log('Grid shape: [%8i, %8i, %8i]' % tuple(ugrid.shape)) log('PBC: [%8i, %8i, %8i]' % tuple(ugrid.pbc)) log.hline() # Construct the weights for the ESP Cost function. wdens = parse_wdens(args.wdens) if wdens is not None: if log.do_medium: log('Loading density array') rho = load_rho(sys, wdens[0], ugrid, args.stride, args.chop) wdens = (rho,) + wdens[1:] if log.do_medium: log('Constructing weight function') weights = setup_weights(sys, ugrid, dens=wdens, near=parse_wnear(args.wnear), far=parse_wnear(args.wfar), ) # write the weights to a cube file if requested if args.wsave is not None: if log.do_medium: log(' Saving weights array ') save_weights(args.wsave, sys, ugrid, weights) # rescale weights such that the cost function is the mean-square-error if weights.max() == 0.0: raise ValueError('No points with a non-zero weight were found') wmax = weights.min() wmin = weights.max() used_volume = ugrid.integrate(weights) # Some screen info if log.do_medium: log('Important parameters:') log.hline() log('Used number of grid points: %12i' % (weights>0).sum()) log('Used volume: %12.5f' % used_volume) log('Used volume/atom: %12.5f' % (used_volume/sys.natom)) log('Lowest weight: %12.5e' % wmin) log('Highest weight: %12.5e' % wmax) log('Max weight at edge: %12.5f' % max_at_edge(weights, ugrid.pbc)) # Ewald parameters rcut, alpha, gcut = parse_ewald_args(args) # Some screen info if log.do_medium: log('Ewald real cutoff: %12.5e' % rcut) log('Ewald alpha: %12.5e' % alpha) log('Ewald reciprocal cutoff: %12.5e' % gcut) log.hline() # Construct the cost function if log.do_medium: log('Setting up cost function (may take a while) ') cost = ESPCost.from_grid_data(sys, ugrid, esp, weights, rcut, alpha, gcut) # Store cost function info results = {} results['cost'] = cost results['used_volume'] = used_volume # Store cost function properties results['evals'] = np.linalg.eigvalsh(cost._A) abs_evals = abs(results['evals']) if abs_evals.min() == 0.0: results['cn'] = 0.0 else: results['cn'] = abs_evals.max()/abs_evals.min() # Report some on-screen info if log.do_medium: log('Important parameters:') log.hline() log('Lowest abs eigen value: %12.5e' % abs_evals.min()) log('Highest abs eigen value: %12.5e' % abs_evals.max()) log('Condition number: %12.5e' % results['cn']) log.hline() # Store the results in an HDF5 file write_script_output(fn_h5, grp_name, results, args)