Exemplo n.º 1
0
def main():
    args = parse_args()

    fn_h5, grp_name = parse_h5(args.output, 'output')
    # check if the group is already present (and not empty) in the output file
    if check_output(fn_h5, grp_name, args.overwrite):
        return

    # Load the system
    sys = System.from_file(args.wfn)

    # Define a list of optional arguments for the WPartClass:
    WPartClass = wpart_schemes[args.scheme]
    kwargs = dict((key, val) for key, val in vars(args).iteritems() if key in WPartClass.options)

    # Load the proatomdb
    if args.atoms is not None:
        proatomdb = ProAtomDB.from_file(args.atoms)
        proatomdb.normalize()
        kwargs['proatomdb'] = proatomdb
    else:
        proatomdb = None

    # Run the partitioning
    agspec = AtomicGridSpec(args.grid)
    molgrid = BeckeMolGrid(sys, agspec, mode='only')
    sys.update_grid(molgrid) # for the grid to be written to the output
    wpart = wpart_schemes[args.scheme](sys, molgrid, **kwargs)
    names = wpart.do_all()

    write_part_output(fn_h5, grp_name, wpart, names, args)
Exemplo n.º 2
0
def main():
    args = parse_args()

    fn_h5, grp_name = parse_h5(args.output, 'output')
    # check if the group is already present (and not empty) in the output file
    if check_output(fn_h5, grp_name, args.overwrite):
        return

    # Load the system
    sys = System.from_file(args.cube)
    ugrid = sys.grid
    if not isinstance(ugrid, UniformGrid):
        raise TypeError(
            'The specified file does not contain data on a rectangular grid.')
    ugrid.pbc[:] = parse_pbc(args.pbc)
    moldens = sys.extra['cube_data']

    # Reduce the grid if required
    if args.stride > 1 or args.chop > 0:
        moldens, ugrid = reduce_data(moldens, ugrid, args.stride, args.chop)

    # Load the proatomdb and make pro-atoms more compact if that is requested
    proatomdb = ProAtomDB.from_file(args.atoms)
    if args.compact is not None:
        proatomdb.compact(args.compact)
    proatomdb.normalize()

    # Select the partitioning scheme
    CPartClass = cpart_schemes[args.scheme]

    # List of element numbers for which weight corrections are needed:
    wcor_numbers = list(iter_elements(args.wcor))

    # Run the partitioning
    kwargs = dict((key, val) for key, val in vars(args).iteritems()
                  if key in CPartClass.options)
    cpart = cpart_schemes[args.scheme](sys, ugrid, True, moldens, proatomdb,
                                       wcor_numbers, args.wcor_rcut_max,
                                       args.wcor_rcond, **kwargs)
    names = cpart.do_all()

    # Do a symmetry analysis if requested.
    if args.symmetry is not None:
        sys_sym = System.from_file(args.symmetry)
        sym = sys_sym.extra.get('symmetry')
        if sym is None:
            raise ValueError('No symmetry information found in %s.' %
                             args.symmetry)
        sys_results = dict((name, cpart[name]) for name in names)
        sym_results = symmetry_analysis(sys, sym, sys_results)
        cpart.cache.dump('symmetry', sym_results)
        names.append('symmetry')
        sys.extra['symmetry'] = sym

    write_part_output(fn_h5, grp_name, cpart, names, args)
Exemplo n.º 3
0
def main():
    args = parse_args()

    fn_h5, grp_name = parse_h5(args.output, 'output')
    # check if the group is already present (and not empty) in the output file
    if check_output(fn_h5, grp_name, args.overwrite):
        return

    # Load the cost function from the HDF5 file
    cost, used_volume = load_cost(args.cost)

    # Find the optimal charges
    results = {}
    results['x'] = cost.solve(args.qtot, args.ridge)
    results['charges'] = results['x'][:cost.natom]

    # Related properties
    results['cost'] = cost.value(results['x'])
    if results['cost'] < 0:
        results['rmsd'] = 0.0
    else:
        results['rmsd'] = (results['cost'] / used_volume)**0.5

    # Worst case stuff
    results['cost_worst'] = cost.worst(0.0)
    if results['cost_worst'] < 0:
        results['rmsd_worst'] = 0.0
    else:
        results['rmsd_worst'] = (results['cost_worst'] / used_volume)**0.5

    # Write some things on screen
    if log.do_medium:
        log('Important parameters:')
        log.hline()
        log('RMSD charges:                  %10.5e' % np.sqrt(
            (results['charges']**2).mean()))
        log('RMSD ESP:                      %10.5e' % results['rmsd'])
        log('Worst RMSD ESP:                %10.5e' % results['rmsd_worst'])
        log.hline()

    # Perform a symmetry analysis if requested
    if args.symmetry is not None:
        mol_pot = IOData.from_file(args.symmetry[0])
        mol_sym = IOData.from_file(args.symmetry[1])
        if not hasattr(mol_sym, 'symmetry'):
            raise ValueError('No symmetry information found in %s.' %
                             args.symmetry[1])
        aim_results = {'charges': results['charges']}
        sym_results = symmetry_analysis(mol_pot.coordinates, mol_pot.cell,
                                        mol_sym.symmetry, aim_results)
        results['symmetry'] = sym_results

    # Store the results in an HDF5 file
    write_script_output(fn_h5, grp_name, results, args)
Exemplo n.º 4
0
def main():
    args = parse_args()

    fn_h5, grp_name = parse_h5(args.output, 'output')
    # check if the group is already present (and not empty) in the output file
    if check_output(fn_h5, grp_name, args.overwrite):
        return

    # Load the cost function from the HDF5 file
    cost, used_volume = load_cost(args.cost)

    # Find the optimal charges
    results = {}
    results['x'] = cost.solve(args.qtot, args.ridge)
    results['charges'] = results['x'][:cost.natom]

    # Related properties
    results['cost'] = cost.value(results['x'])
    if results['cost'] < 0:
        results['rmsd'] = 0.0
    else:
        results['rmsd'] = (results['cost']/used_volume)**0.5

    # Worst case stuff
    results['cost_worst'] = cost.worst(0.0)
    if results['cost_worst'] < 0:
        results['rmsd_worst'] = 0.0
    else:
        results['rmsd_worst'] = (results['cost_worst']/used_volume)**0.5

    # Write some things on screen
    if log.do_medium:
        log('Important parameters:')
        log.hline()
        log('RMSD charges:                  %10.5e' % np.sqrt((results['charges']**2).mean()))
        log('RMSD ESP:                      %10.5e' % results['rmsd'])
        log('Worst RMSD ESP:                %10.5e' % results['rmsd_worst'])
        log.hline()

    # Perform a symmetry analysis if requested
    if args.symmetry is not None:
        sys = System.from_file(args.symmetry[0])
        sys_sym = System.from_file(args.symmetry[1])
        sym = sys_sym.extra.get('symmetry')
        if sym is None:
            raise ValueError('No symmetry information found in %s.' % args.symmetry[1])
        sys_results = {'charges': results['charges']}
        sym_results = symmetry_analysis(sys, sym, sys_results)
        results['symmetry'] = sym_results
        sys.extra['symmetry'] = sym

    # Store the results in an HDF5 file
    write_script_output(fn_h5, grp_name, results, args)
Exemplo n.º 5
0
def main():
    args = parse_args()

    fn_h5, grp_name = parse_h5(args.output, 'output')
    # check if the group is already present (and not empty) in the output file
    if check_output(fn_h5, grp_name, args.overwrite):
        return

    # Load the system
    sys = System.from_file(args.cube)
    ugrid = sys.grid
    if not isinstance(ugrid, UniformGrid):
        raise TypeError('The specified file does not contain data on a rectangular grid.')
    ugrid.pbc[:] = parse_pbc(args.pbc)
    moldens = sys.extra['cube_data']

    # Reduce the grid if required
    if args.stride > 1 or args.chop > 0:
        moldens, ugrid = reduce_data(moldens, ugrid, args.stride, args.chop)

    # Load the proatomdb and make pro-atoms more compact if that is requested
    proatomdb = ProAtomDB.from_file(args.atoms)
    if args.compact is not None:
        proatomdb.compact(args.compact)
    proatomdb.normalize()

    # Select the partitioning scheme
    CPartClass = cpart_schemes[args.scheme]

    # List of element numbers for which weight corrections are needed:
    wcor_numbers = list(iter_elements(args.wcor))

    # Run the partitioning
    kwargs = dict((key, val) for key, val in vars(args).iteritems() if key in CPartClass.options)
    cpart = cpart_schemes[args.scheme](
        sys, ugrid, True, moldens, proatomdb, wcor_numbers,
        args.wcor_rcut_max, args.wcor_rcond, **kwargs)
    names = cpart.do_all()

    # Do a symmetry analysis if requested.
    if args.symmetry is not None:
        sys_sym = System.from_file(args.symmetry)
        sym = sys_sym.extra.get('symmetry')
        if sym is None:
            raise ValueError('No symmetry information found in %s.' % args.symmetry)
        sys_results = dict((name, cpart[name]) for name in names)
        sym_results = symmetry_analysis(sys, sym, sys_results)
        cpart.cache.dump('symmetry', sym_results)
        names.append('symmetry')
        sys.extra['symmetry'] = sym

    write_part_output(fn_h5, grp_name, cpart, names, args)
Exemplo n.º 6
0
def main():
    args = parse_args()

    fn_h5, grp_name = parse_h5(args.output, 'output')
    # check if the group is already present (and not empty) in the output file
    if check_output(fn_h5, grp_name, args.overwrite):
        return

    # Load the system
    mol = IOData.from_file(args.wfn)

    # Define a list of optional arguments for the WPartClass:
    WPartClass = wpart_schemes[args.scheme]
    kwargs = dict((key, val) for key, val in vars(args).iteritems()
                  if key in WPartClass.options)

    # Load the proatomdb
    if args.atoms is not None:
        proatomdb = ProAtomDB.from_file(args.atoms)
        proatomdb.normalize()
        kwargs['proatomdb'] = proatomdb
    else:
        proatomdb = None

    # Run the partitioning
    agspec = AtomicGridSpec(args.grid)
    grid = BeckeMolGrid(mol.coordinates,
                        mol.numbers,
                        mol.pseudo_numbers,
                        agspec,
                        mode='only')
    dm_full = mol.get_dm_full()
    moldens = mol.obasis.compute_grid_density_dm(dm_full,
                                                 grid.points,
                                                 epsilon=args.epsilon)
    dm_spin = mol.get_dm_spin()
    if dm_spin is not None:
        kwargs['spindens'] = mol.obasis.compute_grid_density_dm(
            dm_spin, grid.points, epsilon=args.epsilon)
    wpart = wpart_schemes[args.scheme](mol.coordinates, mol.numbers,
                                       mol.pseudo_numbers, grid, moldens,
                                       **kwargs)
    keys = wpart.do_all()

    if args.slow:
        # ugly hack for the slow analysis involving the AIM overlap operators.
        wpart_slow_analysis(wpart, mol)
        keys = list(wpart.cache.iterkeys(tags='o'))

    write_part_output(fn_h5, grp_name, wpart, keys, args)
Exemplo n.º 7
0
def main():
    args = parse_args()

    fn_h5, grp_name = parse_h5(args.output, 'output')
    # check if the group is already present (and not empty) in the output file
    if check_output(fn_h5, grp_name, args.overwrite):
        return

    # Load the charges from the HDF5 file
    charges = load_charges(args.charges)

    # Load the uniform grid and the coordintes
    ugrid, coordinates = load_ugrid_coordinates(args.grid)
    ugrid.pbc[:] = 1 # enforce 3D periodic

    # Fix total charge if requested
    if args.qtot is not None:
        charges -= (charges.sum() - args.qtot)/len(charges)

    # Store parameters in output
    results = {}
    results['qtot'] = charges.sum()

    # Determine the grid specification
    results['ugrid'] = ugrid

    # Ewald parameters
    rcut, alpha, gcut = parse_ewald_args(args)

    # Some screen info
    if log.do_medium:
        log('Important parameters:')
        log.hline()
        log('Number of grid points:   %12i' % ugrid.size)
        log('Grid shape:                 [%8i, %8i, %8i]' % tuple(ugrid.shape))
        log('Ewald real cutoff:       %12.5e' % rcut)
        log('Ewald alpha:             %12.5e' % alpha)
        log('Ewald reciprocal cutoff: %12.5e' % gcut)
        log.hline()
        # TODO: add summation ranges
        log('Computing ESP (may take a while)')

    # Allocate and compute ESP grid
    esp = np.zeros(ugrid.shape, float)
    compute_esp_grid_cube(ugrid, esp, coordinates, charges, rcut, alpha, gcut)
    results['esp'] = esp

    # Store the results in an HDF5 file
    write_script_output(fn_h5, grp_name, results, args)
Exemplo n.º 8
0
def main():
    args = parse_args()

    fn_h5, grp_name = parse_h5(args.output, 'output')
    # check if the group is already present (and not empty) in the output file
    if check_output(fn_h5, grp_name, args.overwrite):
        return

    # Load the charges from the HDF5 file
    charges = load_charges(args.charges)

    # Load the uniform grid and the coordintes
    ugrid, coordinates = load_ugrid_coordinates(args.grid)
    ugrid.pbc[:] = 1  # enforce 3D periodic

    # Fix total charge if requested
    if args.qtot is not None:
        charges -= (charges.sum() - args.qtot) / len(charges)

    # Store parameters in output
    results = {}
    results['qtot'] = charges.sum()

    # Determine the grid specification
    results['ugrid'] = ugrid

    # Ewald parameters
    rcut, alpha, gcut = parse_ewald_args(args)

    # Some screen info
    if log.do_medium:
        log('Important parameters:')
        log.hline()
        log('Number of grid points:   %12i' % ugrid.size)
        log('Grid shape:                 [%8i, %8i, %8i]' % tuple(ugrid.shape))
        log('Ewald real cutoff:       %12.5e' % rcut)
        log('Ewald alpha:             %12.5e' % alpha)
        log('Ewald reciprocal cutoff: %12.5e' % gcut)
        log.hline()
        # TODO: add summation ranges
        log('Computing ESP (may take a while)')

    # Allocate and compute ESP grid
    esp = np.zeros(ugrid.shape, float)
    compute_esp_grid_cube(ugrid, esp, coordinates, charges, rcut, alpha, gcut)
    results['esp'] = esp

    # Store the results in an HDF5 file
    write_script_output(fn_h5, grp_name, results, args)
Exemplo n.º 9
0
def main():
    args = parse_args()

    fn_h5, grp_name = parse_h5(args.output, 'output')
    # check if the group is already present (and not empty) in the output file
    if check_output(fn_h5, grp_name, args.overwrite):
        return

    # Load the cost function from the HDF5 file
    cost, used_volume = load_cost(args.cost)

    # Load the charges from the HDF5 file
    charges = load_charges(args.charges)

    # Fix total charge if requested
    if args.qtot is not None:
        charges -= (charges.sum() - args.qtot) / len(charges)

    # Store parameters in output
    results = {}
    results['qtot'] = charges.sum()

    # Fitness of the charges
    results['cost'] = cost.value_charges(charges)
    if results['cost'] < 0:
        results['rmsd'] = 0.0
    else:
        results['rmsd'] = (results['cost'] / used_volume)**0.5

    # Worst case stuff
    results['cost_worst'] = cost.worst(0.0)
    if results['cost_worst'] < 0:
        results['rmsd_worst'] = 0.0
    else:
        results['rmsd_worst'] = (results['cost_worst'] / used_volume)**0.5

    # Write some things on screen
    if log.do_medium:
        log('RMSD charges:                  %10.5e' % np.sqrt(
            (charges**2).mean()))
        log('RMSD ESP:                      %10.5e' % results['rmsd'])
        log('Worst RMSD ESP:                %10.5e' % results['rmsd_worst'])
        log.hline()

    # Store the results in an HDF5 file
    write_script_output(fn_h5, grp_name, results, args)
Exemplo n.º 10
0
def main():
    args = parse_args()

    fn_h5, grp_name = parse_h5(args.output, 'output')
    # check if the group is already present (and not empty) in the output file
    if check_output(fn_h5, grp_name, args.overwrite):
        return

    # Load the cost function from the HDF5 file
    cost, used_volume = load_cost(args.cost)

    # Load the charges from the HDF5 file
    charges = load_charges(args.charges)

    # Fix total charge if requested
    if args.qtot is not None:
        charges -= (charges.sum() - args.qtot)/len(charges)

    # Store parameters in output
    results = {}
    results['qtot'] = charges.sum()

    # Fitness of the charges
    results['cost'] = cost.value_charges(charges)
    if results['cost'] < 0:
        results['rmsd'] = 0.0
    else:
        results['rmsd'] = (results['cost']/used_volume)**0.5

    # Worst case stuff
    results['cost_worst'] = cost.worst(0.0)
    if results['cost_worst'] < 0:
        results['rmsd_worst'] = 0.0
    else:
        results['rmsd_worst'] = (results['cost_worst']/used_volume)**0.5

    # Write some things on screen
    if log.do_medium:
        log('RMSD charges:                  %10.5e' % np.sqrt((charges**2).mean()))
        log('RMSD ESP:                      %10.5e' % results['rmsd'])
        log('Worst RMSD ESP:                %10.5e' % results['rmsd_worst'])
        log.hline()

    # Store the results in an HDF5 file
    write_script_output(fn_h5, grp_name, results, args)
Exemplo n.º 11
0
def main():
    args = parse_args()

    fn_h5, grp_name = parse_h5(args.output, 'output')
    # check if the group is already present (and not empty) in the output file
    if check_output(fn_h5, grp_name, args.overwrite):
        return

    # Load the cost function from the HDF5 file
    cost, used_volume = load_cost(args.cost)

    # Find the optimal charges
    results = {}

    # MODIFICATION HERE

    results['x'] = cost.solve(args.qtot, args.ridge)
    results['charges'] = results['x'][:cost.natom]

    # Related properties
    results['cost'] = cost.value(results['x'])
    if results['cost'] < 0:
        results['rmsd'] = 0.0
    else:
        results['rmsd'] = (results['cost'] / used_volume)**0.5

    # Worst case stuff
    results['cost_worst'] = cost.worst(0.0)
    if results['cost_worst'] < 0:
        results['rmsd_worst'] = 0.0
    else:
        results['rmsd_worst'] = (results['cost_worst'] / used_volume)**0.5

    # Write some things on screen
    if log.do_medium:
        log('Important parameters:')
        log.hline()
        log('RMSD charges:                  %10.5e' % np.sqrt(
            (results['charges']**2).mean()))
        log('RMSD ESP:                      %10.5e' % results['rmsd'])
        log('Worst RMSD ESP:                %10.5e' % results['rmsd_worst'])
        log.hline()

    # Store the results in an HDF5 file
    write_script_output(fn_h5, grp_name, results, args)
Exemplo n.º 12
0
def main():
    args = parse_args()

    fn_h5, grp_name = parse_h5(args.output, 'output')
    # check if the group is already present (and not empty) in the output file
    if check_output(fn_h5, grp_name, args.overwrite):
        return

    # Load the cost function from the HDF5 file
    cost, used_volume = load_cost(args.cost)

    # Find the optimal charges
    results = {}
    results['x'] = cost.solve(args.qtot, args.ridge)
    results['charges'] = results['x'][:cost.natom]

    # Related properties
    results['cost'] = cost.value(results['x'])
    if results['cost'] < 0:
        results['rmsd'] = 0.0
    else:
        results['rmsd'] = (results['cost']/used_volume)**0.5

    # Worst case stuff
    results['cost_worst'] = cost.worst(0.0)
    if results['cost_worst'] < 0:
        results['rmsd_worst'] = 0.0
    else:
        results['rmsd_worst'] = (results['cost_worst']/used_volume)**0.5

    # Write some things on screen
    if log.do_medium:
        log('Important parameters:')
        log.hline()
        log('RMSD charges:                  %10.5e' % np.sqrt((results['charges']**2).mean()))
        log('RMSD ESP:                      %10.5e' % results['rmsd'])
        log('Worst RMSD ESP:                %10.5e' % results['rmsd_worst'])
        log.hline()

    # Store the results in an HDF5 file
    write_script_output(fn_h5, grp_name, results, args)
Exemplo n.º 13
0
def main():
    args = parse_args()

    fn_h5, grp_name = parse_h5(args.output, 'output')
    # check if the group is already present (and not empty) in the output file
    if check_output(fn_h5, grp_name, args.overwrite):
        return

    # Load the system
    mol = IOData.from_file(args.wfn)

    # Define a list of optional arguments for the WPartClass:
    WPartClass = wpart_schemes[args.scheme]
    kwargs = dict((key, val) for key, val in vars(args).iteritems() if key in WPartClass.options)

    # Load the proatomdb
    if args.atoms is not None:
        proatomdb = ProAtomDB.from_file(args.atoms)
        proatomdb.normalize()
        kwargs['proatomdb'] = proatomdb
    else:
        proatomdb = None

    # Run the partitioning
    agspec = AtomicGridSpec(args.grid)
    grid = BeckeMolGrid(mol.coordinates, mol.numbers, mol.pseudo_numbers, agspec, mode='only')
    dm_full = mol.get_dm_full()
    moldens = mol.obasis.compute_grid_density_dm(dm_full, grid.points, epsilon=args.epsilon)
    dm_spin = mol.get_dm_spin()
    if dm_spin is not None:
        kwargs['spindens'] = mol.obasis.compute_grid_density_dm(dm_spin, grid.points, epsilon=args.epsilon)
    wpart = wpart_schemes[args.scheme](mol.coordinates, mol.numbers, mol.pseudo_numbers,grid, moldens, **kwargs)
    keys = wpart.do_all()

    if args.slow:
        # ugly hack for the slow analysis involving the AIM overlap operators.
        wpart_slow_analysis(wpart, mol)
        keys = list(wpart.cache.iterkeys(tags='o'))

    write_part_output(fn_h5, grp_name, wpart, keys, args)
Exemplo n.º 14
0
def main():
    args = parse_args()

    fn_h5, grp_name = parse_h5(args.h5, 'h5')
    with LockedH5File(fn_h5, 'r') as fin, open(args.csv, 'w') as fout:
        w = csv.writer(fout)
        w.writerow(['Converted data from %s' % args.h5])
        w.writerow([])
        for name, dset in iter_datasets(fin[grp_name]):
            if len(dset.shape) > 3:
                if log.do_warning:
                    log.warn(
                        'Skipping %s because it has more than three axes.' %
                        name)
            else:
                log('Converting %s' % name)

            w.writerow(['Dataset', name])
            w.writerow(['Shape'] + list(dset.shape))
            if len(dset.shape) == 0:
                w.writerow([dset[()]])
            elif len(dset.shape) == 1:
                for value in dset:
                    w.writerow([value])
            elif len(dset.shape) == 2:
                for row in dset:
                    w.writerow([value for value in row])
            elif len(dset.shape) == 3:
                for array in dset:
                    l = []
                    for col in array.T:
                        for value in col:
                            l.append(value)
                        l.append('')
                    del l[-1]
                    w.writerow(l)
            else:
                w.writerow(['Skipped because ndim=%i>3' % len(dset.shape)])
            w.writerow([])
Exemplo n.º 15
0
def main():
    args = parse_args()

    fn_h5, grp_name = parse_h5(args.h5, 'h5')
    with LockedH5File(fn_h5, 'r') as fin, open(args.csv, 'w') as fout:
        w = csv.writer(fout)
        w.writerow(['Converted data from %s' % args.h5])
        w.writerow([])
        for name, dset in iter_datasets(fin[grp_name]):
            if len(dset.shape) > 3:
                if log.do_warning:
                    log.warn('Skipping %s because it has more than three axes.' % name)
            else:
                log('Converting %s' % name)

            w.writerow(['Dataset', name])
            w.writerow(['Shape'] + list(dset.shape))
            if len(dset.shape) == 0:
                w.writerow([dset[()]])
            elif len(dset.shape) == 1:
                for value in dset:
                    w.writerow([value])
            elif len(dset.shape) == 2:
                for row in dset:
                    w.writerow([value for value in row])
            elif len(dset.shape) == 3:
                for array in dset:
                    l = []
                    for col in array.T:
                        for value in col:
                            l.append(value)
                        l.append('')
                    del l[-1]
                    w.writerow(l)
            else:
                w.writerow(['Skipped because ndim=%i>3' % len(dset.shape)])
            w.writerow([])
Exemplo n.º 16
0
def main():
    args = parse_args()

    fn_h5, grp_name = parse_h5(args.output, 'output')
    # check if the group is already present (and not empty) in the output file
    if check_output(fn_h5, grp_name, args.overwrite):
        return

    # Load the potential data
    if log.do_medium:
        log('Loading potential array')
    mol_pot = IOData.from_file(args.cube)
    if not isinstance(mol_pot.grid, UniformGrid):
        raise TypeError('The specified file does not contain data on a rectangular grid.')
    mol_pot.grid.pbc[:] = parse_pbc(args.pbc) # correct pbc
    esp = mol_pot.cube_data

    # Reduce the grid if required
    if args.stride > 1:
        esp, mol_pot.grid = reduce_data(esp, mol_pot.grid, args.stride, args.chop)

    # Fix sign
    if args.sign:
        esp *= -1

    # Some screen info
    if log.do_medium:
        log('Important parameters:')
        log.hline()
        log('Number of grid points:   %12i' % np.product(mol_pot.grid.shape))
        log('Grid shape:                 [%8i, %8i, %8i]' % tuple(mol_pot.grid.shape))
        log('PBC:                        [%8i, %8i, %8i]' % tuple(mol_pot.grid.pbc))
        log.hline()

    # Construct the weights for the ESP Cost function.
    wdens = parse_wdens(args.wdens)
    if wdens is not None:
        if log.do_medium:
            log('Loading density array')
        # either the provided density or a built-in prodensity
        rho = load_rho(mol_pot.coordinates, mol_pot.numbers, wdens[0], mol_pot.grid, args.stride, args.chop)
        wdens = (rho,) + wdens[1:]
    if log.do_medium:
        log('Constructing weight function')
    weights = setup_weights(mol_pot.coordinates, mol_pot.numbers, mol_pot.grid,
        dens=wdens,
        near=parse_wnear(args.wnear),
        far=parse_wnear(args.wfar),
    )

    # write the weights to a cube file if requested
    if args.wsave is not None:
        if log.do_medium:
            log('   Saving weights array   ')
        # construct a new data dictionary that contains all info for the cube file
        mol_weights = mol_pot.copy()
        mol_weights.cube_data = weights
        mol_weights.to_file(args.wsave)

    # rescale weights such that the cost function is the mean-square-error
    if weights.max() == 0.0:
        raise ValueError('No points with a non-zero weight were found')
    wmax = weights.min()
    wmin = weights.max()
    used_volume = mol_pot.grid.integrate(weights)

    # Some screen info
    if log.do_medium:
        log('Important parameters:')
        log.hline()
        log('Used number of grid points:   %12i' % (weights>0).sum())
        log('Used volume:                      %12.5f' % used_volume)
        log('Used volume/atom:                 %12.5f' % (used_volume/mol_pot.natom))
        log('Lowest weight:                %12.5e' % wmin)
        log('Highest weight:               %12.5e' % wmax)
        log('Max weight at edge:           %12.5f' % max_at_edge(weights, mol_pot.grid.pbc))

    # Ewald parameters
    rcut, alpha, gcut = parse_ewald_args(args)

    # Some screen info
    if log.do_medium:
        log('Ewald real cutoff:       %12.5e' % rcut)
        log('Ewald alpha:             %12.5e' % alpha)
        log('Ewald reciprocal cutoff: %12.5e' % gcut)
        log.hline()

    # Construct the cost function
    if log.do_medium:
        log('Setting up cost function (may take a while)   ')
    cost = ESPCost.from_grid_data(mol_pot.coordinates, mol_pot.grid, esp, weights, rcut, alpha, gcut)

    # Store cost function info
    results = {}
    results['cost'] = cost
    results['used_volume'] = used_volume

    # Store cost function properties
    results['evals'] = np.linalg.eigvalsh(cost._A)
    abs_evals = abs(results['evals'])
    if abs_evals.min() == 0.0:
        results['cn'] = 0.0
    else:
        results['cn'] = abs_evals.max()/abs_evals.min()

    # Report some on-screen info
    if log.do_medium:
        log('Important parameters:')
        log.hline()
        log('Lowest abs eigen value:       %12.5e' % abs_evals.min())
        log('Highest abs eigen value:      %12.5e' % abs_evals.max())
        log('Condition number:             %12.5e' % results['cn'])
        log.hline()

    # Store the results in an HDF5 file
    write_script_output(fn_h5, grp_name, results, args)
Exemplo n.º 17
0
def load_cost(arg_cost):
    '''Load an ESP cost function given at the command line'''
    fn_h5_in, grp_name_in = parse_h5(arg_cost, 'cost')
    with LockedH5File(fn_h5_in, 'r') as f:
        return ESPCost.from_hdf5(
            f[grp_name_in]['cost']), f[grp_name_in]['used_volume'][()]
Exemplo n.º 18
0
def load_charges(arg_charges):
    '''Load a charges given at the command line'''
    fn_h5, ds_name = parse_h5(arg_charges, 'charges', path_optional=False)
    with LockedH5File(fn_h5, 'r') as f:
        return f[ds_name][:]
Exemplo n.º 19
0
def load_cost(arg_cost):
    '''Load an ESP cost function given at the command line'''
    fn_h5_in, grp_name_in = parse_h5(arg_cost, 'cost')
    with LockedH5File(fn_h5_in, 'r') as f:
        return ESPCost.from_hdf5(f[grp_name_in]['cost'], None), f[grp_name_in]['used_volume'][()]
Exemplo n.º 20
0
def load_charges(arg_charges):
    '''Load a charges given at the command line'''
    fn_h5, ds_name = parse_h5(arg_charges, 'charges', path_optional=False)
    with LockedH5File(fn_h5, 'r') as f:
        return f[ds_name][:]
Exemplo n.º 21
0
def main():
    args = parse_args()

    fn_h5, grp_name = parse_h5(args.output, 'output')
    # check if the group is already present (and not empty) in the output file
    if check_output(fn_h5, grp_name, args.overwrite):
        return

    # Load the IOData
    mol = IOData.from_file(args.cube)
    ugrid = mol.grid
    if not isinstance(ugrid, UniformGrid):
        raise TypeError('The density cube file does not contain data on a rectangular grid.')
    ugrid.pbc[:] = parse_pbc(args.pbc)
    moldens = mol.cube_data

    # Reduce the grid if required
    if args.stride > 1 or args.chop > 0:
        moldens, ugrid = reduce_data(moldens, ugrid, args.stride, args.chop)

    # Load the spin density (optional)
    if args.spindens is not None:
        molspin = IOData.from_file(args.spindens)
        if not isinstance(molspin.grid, UniformGrid):
            raise TypeError('The spin cube file does not contain data on a rectangular grid.')
        spindens = molspin.cube_data
        if args.stride > 1 or args.chop > 0:
            spindens = reduce_data(spindens, molspin.grid, args.stride, args.chop)[0]
        if spindens.shape != moldens.shape:
            raise TypeError('The shape of the spin cube does not match the shape of the density cube.')
    else:
        spindens = None

    # Load the proatomdb and make pro-atoms more compact if that is requested
    proatomdb = ProAtomDB.from_file(args.atoms)
    if args.compact is not None:
        proatomdb.compact(args.compact)
    proatomdb.normalize()

    # Select the partitioning scheme
    CPartClass = cpart_schemes[args.scheme]

    # List of element numbers for which weight corrections are needed:
    if args.wcor == '0':
        wcor_numbers = []
    else:
        wcor_numbers = list(iter_elements(args.wcor))

    # Run the partitioning
    kwargs = dict((key, val) for key, val in vars(args).iteritems() if key in CPartClass.options)
    cpart = cpart_schemes[args.scheme](
        mol.coordinates, mol.numbers, mol.pseudo_numbers, ugrid, moldens,
        proatomdb, spindens=spindens, local=True, wcor_numbers=wcor_numbers,
        wcor_rcut_max=args.wcor_rcut_max, wcor_rcond=args.wcor_rcond, **kwargs)
    keys = cpart.do_all()

    # Do a symmetry analysis if requested.
    if args.symmetry is not None:
        mol_sym = IOData.from_file(args.symmetry)
        if not hasattr(mol_sym, 'symmetry'):
            raise ValueError('No symmetry information found in %s.' % args.symmetry)
        aim_results = dict((key, cpart[key]) for key in keys)
        sym_results = symmetry_analysis(mol.coordinates, ugrid.get_cell(), mol_sym.symmetry, aim_results)
        cpart.cache.dump('symmetry', sym_results)
        keys.append('symmetry')

    write_part_output(fn_h5, grp_name, cpart, keys, args)