コード例 #1
0
 def get_bc(self,matrix=None,is_vector=False):
   '''Calculates Barycenter for scalar field
   '''  
   # Initialize variable
   self.bc = numpy.zeros(3)
   # Calculation of barycenter
   from orbkit import grid
   if not is_vector:
     grid.grid2vector()
   xyz = grid.tolist()
   for i in range(3):
     self.bc[i] = (matrix.reshape((-1,))*xyz[i]).sum()
   self.bc /= matrix.sum()
   if not is_vector:
     grid.vector2grid(*grid.N_)
   return self.bc
コード例 #2
0
def main_output(data,
                qc=None,
                outputname='data',
                otype='auto',
                gname='',
                drv=None,
                omit=[],
                datalabels='',
                mode='w',
                **kwargs):
    '''Creates the requested output.
  
  **Parameters:**
  
  data : numpy.ndarray, shape=N, shape=((NDRV,) + N), shape=(n, (NDRV,) + N) or list of numpy.ndarrays
    Contains the output data. The shape (N) depends on the grid and the data, i.e.,
    3d for regular grid, 1d for vector grid. 
  qc : class or dict
    QCinfo class or dictionary containing the following attributes/keys.
    See :ref:`Central Variables` for details.
  outputname : str or list of str
    Contains the base name of the output file. If outputname contains @, string will be split and first 
    part interpreted as outputname and second as gname (cf. Parameters:gname). 
  otype : str or list of str, optional
    Contains the output file type. Possible options:
    'auto', 'h5', 'cb', 'am', 'hx', 'vmd', 'mayavi'
    
    If otype='native', a native input file will be written, the type of which may be specifie by
    ftype='numpy'.
  gname : str, optional
    For native, HDF5, or npz output, specifies the group, where the data will be stored.
  drv : None, list of str or list of list of str, optional
    If not None, a 4d(regular)/2d(vector) input data array will be expected
    with NDRV = len(drv). Specifies the file labels, i.e. e.g., data_d{drv}.cube for 4d array.
    For 5d arrays i.e., data_0_d{drv}.cube
  datalabels : list of str, optional
    If not empty, the output file types specified here are omitted.
  omit : list of str, optional
    If not empty, the output file types specified here are omitted.
  mode : str={'r', 'w', 'a'}, optional
    Specifies the mode used to open the file (native, HDF5, or npz). 
  
  **Note:**
  
    All additional keyword arguments are forwarded to the output functions.
  '''
    if otype is None or otype == []:
        return []

    if isinstance(outputname, str):
        if '@' in outputname:
            outputname, gname = outputname.split('@')
    if isinstance(otype, str):
        if otype == 'auto':
            outputname, otype = path.splitext(outputname)
            otype = otype[1:]
        otype = [otype]
    elif isinstance(otype, list) and len(otype) == 1:
        if otype[0] == 'auto':
            outputname, otype = path.splitext(outputname)
            otype = [otype[1:]]
    else:
        for iot in range(len(otype)):
            if otype[iot] == 'auto':
                outputname, tmp = path.splitext(outputname)
                if tmp != '':
                    otype[iot] = tmp[1:]

    # Catch our native format before all else
    # We can't figure this out by the file ending alone
    # as we support hdf5 for output of both grid-based data
    # as well as our internal format

    output_written = []
    internals = [i for i in range(len(otype)) if otype[i] == 'native']

    if len(internals) > 0:
        if not isinstance(data, list):
            data = [data]

        if isinstance(outputname, str):
            outputname = [outputname for _ in data]

        if 'ftype' in kwargs.keys():
            if isinstance(kwargs['ftype'], str):
                ftype = [kwargs['ftype'] for _ in data]
        else:
            ftype = ['numpy' for _ in data]

        if 'group' in kwargs.keys():
            if isinstance(kwargs['group'], str):
                group = [kwargs['group'] for _ in data]
        else:
            group = [i.__class__.__name__.lower() for i in data]

        display('Writing native input file...')
        for i, oname in enumerate(outputname):
            output_written.append(
                write_native(data[i],
                             oname,
                             ftype[i],
                             mode=mode,
                             gname=path.join(gname, group[i])))
        display('\n'.join(['\t' + i for i in output_written]))

    else:
        print_waring = False
        output_not_possible = (grid.is_vector and not grid.is_regular)

        # Shape shall be (Ndrv,Ndata,Nx,Ny,Nz) or (Ndrv,Ndata,Nxyz)
        data = numpy.array(data)
        dims = 1 if grid.is_vector else 3
        shape = data.shape

        if drv is not None and isinstance(drv, str):
            drv = [drv]

        if data.ndim < dims:
            output_not_possible = True
            display('data.ndim < ndim of grid')
        elif data.ndim == dims:  # 3d data set
            data = data[numpy.newaxis, numpy.newaxis]
        elif data.ndim == dims + 1:  # 4d data set
            if drv is not None:
                data = data[:, numpy.newaxis]
            else:
                data = data[numpy.newaxis]
        elif data.ndim == dims + 2:  # 5d data set check if drv matches Ndrv
            if drv is None or len(drv) != data.shape[0]:
                drv = list(range(data.shape[0]))
        elif data.ndim > dims + 2:
            output_not_possible = True
            display('data.ndim > (ndim of grid) +2')

        if 'vmd' in otype and not ('cb' in otype or 'cube' in otype):
            otype.append('cube')
        if 'hx' in otype and not 'am' in otype:
            otype.append('am')

        otype = [i for i in otype if i not in omit]
        otype_synonyms = [synonyms[i] for i in otype]
        otype_ext = dict(zip(otype_synonyms, otype))

        # Convert the data to a regular grid, if possible
        is_regular_vector = (grid.is_vector and grid.is_regular)

        if is_regular_vector:
            display(
                '\nConverting the regular 1d vector grid to a 3d regular grid.'
            )
            grid.vector2grid(*grid.N_)
            data = numpy.array(grid.mv2g(data))

        isstr = isinstance(outputname, str)
        if isinstance(datalabels, str):
            if data.shape[1] > 1:
                datalabels = numpy.array([
                    str(idata) + ',' + datalabels
                    for idata in range(data.shape[1])
                ])
            else:
                datalabels = numpy.array([datalabels])
        elif isinstance(datalabels, list):
            datalabels = numpy.array(datalabels)

        if drv is not None:
            fid = '%(f)s_d%(d)s.'
            datalabel_id = 'd/d%(d)s %(f)s'
            contents = {
                'axis:0':
                numpy.array(
                    ['d/d%s' % i if i is not None else str(i) for i in drv]),
                'axis:1':
                datalabels
            }
            it = enumerate(drv)
        elif data.shape[0] > 1:
            fid = '%(f)s_%(d)s.'
            datalabel_id = '%(d)s %(f)s'
            it = enumerate(data.shape[0])
            contents = {
                'axis:0': numpy.arange(data.shape[0]).astype(str),
                'axis:1': datalabels
            }
        else:
            fid = '%(f)s.'
            datalabel_id = '%(f)s'
            it = [(0, None)]
            if data.shape[1] > 1:
                contents = {'axis:0': datalabels}
            else:
                contents = datalabels

        cube_files = []
        all_datalabels = []
        for idrv, jdrv in it:
            datasetlabels = []
            for idata in range(data.shape[1]):
                if isstr:
                    f = {
                        'f':
                        outputname + '_' +
                        str(idata) if data.shape[1] > 1 else outputname,
                        'd':
                        jdrv
                    }
                else:
                    f = {'f': outputname[idata], 'd': jdrv}
                c = {'f': datalabels[idata], 'd': jdrv}
                datalabel = datalabel_id % c
                datasetlabels.append(datalabel)

                if 'am' in otype_synonyms and not print_waring:
                    if output_not_possible: print_waring = True
                    else:
                        filename = fid % f + otype_ext['am']
                        display('\nSaving to ZIBAmiraMesh file...\n\t' +
                                filename)
                        amira_creator(data[idrv, idata], filename)
                        output_written.append(filename)
                if 'hx' in otype_synonyms and not print_waring:
                    if output_not_possible: print_waring = True
                    else:
                        filename = fid % f + otype_ext['hx']
                        display('\nCreating ZIBAmira network file...\n\t' +
                                filename)
                        hx_network_creator(data[idrv, idata], filename)
                        output_written.append(filename)
                if 'cube' in otype_synonyms and not print_waring:
                    if output_not_possible: print_waring = True
                    elif qc is None:
                        display(
                            '\nFor cube file output `qc` is a required keyword parameter in `main_output`.'
                        )
                    else:
                        filename = fid % f + otype_ext['cube']
                        display('\nSaving to cube file...\n\t' + filename)
                        cube_creator(data[idrv, idata],
                                     filename,
                                     qc.geo_info,
                                     qc.geo_spec,
                                     comments=datalabel,
                                     **kwargs)
                        output_written.append(filename)
                        cube_files.append(filename)

            all_datalabels.extend(datasetlabels)

        if 'vmd' in otype_synonyms and not print_waring:
            if output_not_possible: print_waring = True
            else:
                filename = (outputname if isstr else
                            outputname[-1]) + '.' + otype_ext['vmd']
                display('\nCreating VMD network file...\n\t' + filename)
                vmd_network_creator(filename, cube_files=cube_files, **kwargs)
                output_written.append(filename)

        if 'h5' in otype_synonyms:
            filename = (outputname
                        if isstr else outputname[-1]) + '.' + otype_ext['h5']
            display('\nSaving to Hierarchical Data Format file (HDF5)...\n\t' +
                    filename)

            hdf5_creator(data.reshape(shape),
                         filename,
                         qcinfo=qc,
                         gname=gname,
                         ftype='hdf5',
                         contents=contents,
                         mode=mode,
                         **kwargs)
            output_written.append(filename)

        if 'npz' in otype_synonyms:
            filename = (outputname if isstr else outputname[-1])
            display('\nSaving to a compressed .npz archive...\n\t' + filename +
                    '.npz')
            hdf5_creator(data.reshape(shape),
                         filename,
                         qcinfo=qc,
                         gname=gname,
                         ftype='numpy',
                         contents=contents,
                         mode=mode,
                         **kwargs)
            output_written.append(filename)

        if 'mayavi' in otype_synonyms:
            if output_not_possible: print_waring = True
            else:
                display('\nDepicting the results with MayaVi...\n\t')
                if drv == ['x', 'y', 'z'] or drv == [0, 1, 2]:
                    is_vectorfield = True
                    data = numpy.swapaxes(data, 0, 1)
                    datalabels = datalabels
                else:
                    is_vectorfield = False
                    data = data.reshape((-1, ) + grid.get_shape())
                    datalabels = all_datalabels

                view_with_mayavi(grid.x,
                                 grid.y,
                                 grid.z,
                                 data,
                                 is_vectorfield=is_vectorfield,
                                 geo_spec=qc.geo_spec,
                                 datalabels=datalabels,
                                 **kwargs)

        if print_waring:
            display(
                'For a non-regular vector grid (`if grid.is_vector and not grid.is_regular`)'
            )
            display('only HDF5 is available as output format...')
            display('Skipping all other formats...')

        if is_regular_vector:
            # Convert the data back to a regular vector grid
            grid.grid2vector()

    return output_written
コード例 #3
0
ファイル: output.py プロジェクト: danielsylvinson/OverlApp
def main_output(data,
                geo_info,
                geo_spec,
                outputname='new',
                otype='h5',
                drv=None,
                omit=[],
                **kwargs):
    '''Creates the requested output.
  
  **Parameters:**
  
  data : numpy.ndarray, shape=N or shape=((NDRV,) + N)
    Contains the output data. The shape (N) depends on the grid and the data, i.e.,
    3d for regular grid, 1d for vector grid. 
  geo_info, geo_spec : 
    See :ref:`Central Variables` for details.
  outputname : str
    Contains the base name of the output file.
  otype : str or list of str
    Contains the output file type. Possible options:
    'h5', 'cb', 'am', 'hx', 'vmd', 'mayavi'
  drv : None or list of str, optional
    If not None, a 4d(regular)/2d(vector) input data array will be expected
    with NDRV = len(drv).
  omit : list of str, optional
    If not empty, the input file types specified here are omitted.
  
  **Note:**
  
    All additional keyword arguments are forwarded to the output functions.
  '''
    print_waring = False
    output_written = []
    if isinstance(otype, str):
        otype = [otype]

    if 'vmd' in otype and not 'cb' in otype:
        otype.append('cb')

    otype = [i for i in otype if i not in omit]

    if otype is None or otype == []:
        return output_written

    # Convert the data to a regular grid, if possible
    output_not_possible = (grid.is_vector and not grid.is_regular)
    is_regular_vector = (grid.is_vector and grid.is_regular)
    if is_regular_vector:
        display(
            '\nConverting the regular 1d vector grid to a 3d regular grid.')
        grid.vector2grid(*grid.N_)
        data = grid.mv2g(data=data)

    if 'mayavi' in otype:
        if output_not_possible: print_waring = True
        else:
            view_with_mayavi(grid.x,
                             grid.y,
                             grid.z,
                             data,
                             geo_spec=geo_spec,
                             **kwargs)

    if drv is not None:
        fid = '%(f)s_d%(d)s'
        it = enumerate(drv)
    else:
        fid = '%(f)s'
        it = [(0, None)]
        data = [data]
    f = {'f': outputname}

    for i, j in it:
        f['d'] = j
        d = data[i]
        if 'h5' in otype:
            display('\nSaving to Hierarchical Data Format file (HDF5)...' +
                    '\n\t%(o)s.h5' % {'o': fid % f})
            HDF5_creator(d, (fid % f), geo_info, geo_spec, **kwargs)
            output_written.append('%s.h5' % (fid % f))
        if 'am' in otype or 'hx' in otype and not print_waring:
            if output_not_possible: print_waring = True
            else:
                display('\nSaving to ZIBAmiraMesh file...' +
                        '\n\t%(o)s.am' % {'o': fid % f})
                amira_creator(d, (fid % f))
                output_written.append('%s.am' % (fid % f))
        if 'hx' in otype and not print_waring:
            if output_not_possible: print_waring = True
            else:
                # Create Amira network incl. Alphamap
                display('\nCreating ZIBAmira network file...')
                hx_network_creator(data, (fid % f))
                output_written.append('%s.hx' % (fid % f))
        if 'cb' in otype or 'vmd' in otype and not print_waring:
            if output_not_possible: print_waring = True
            else:
                display('\nSaving to .cb file...' +
                        '\n\t%(o)s.cb' % {'o': fid % f})
                cube_creator(d, (fid % f), geo_info, geo_spec, **kwargs)
                output_written.append('%s.cb' % (fid % f))
            #else: output_creator(d,(fid % f),geo_info,geo_spec)  # Axel's cube files
        if 'vmd' in otype and not print_waring:
            if output_not_possible: print_waring = True
            else:
                # Create VMD network
                display('\nCreating VMD network file...' +
                        '\n\t%(o)s.vmd' % {'o': fid % f})
                vmd_network_creator((fid % f),
                                    cube_files=['%s.cb' % (fid % f)],
                                    **kwargs)
                output_written.append('%s.vmd' % (fid % f))

    if print_waring:
        display(
            'For a non-regular vector grid (`if grid.is_vector and not grid.is_regular`)'
        )
        display('only HDF5 is available as output format...')
        display('Skipping all other formats...')

    if is_regular_vector:
        # Convert the data back to a regular vector grid
        grid.grid2vector()

    return output_written
コード例 #4
0
ファイル: core.py プロジェクト: felixplasser/orbkit
def rho_compute(qc,
                calc_ao=False,
                calc_mo=False,
                drv=None,
                laplacian=False,
                numproc=1,
                slice_length=1e4,
                vector=None,
                save_hdf5=False,
                **kwargs):
    r'''Calculate the density, the molecular orbitals, or the derivatives thereof.
  
  orbkit divides 3-dimensional regular grids into 2-dimensional slices and 
  1-dimensional vector grids into 1-dimensional slices of equal length. By default,
  3-dimensional grids are used (:literal:`vector=None`).
  The computational tasks are distributed to the worker processes.
  
  **Parameters:**
  
  qc : class or dict
    QCinfo class or dictionary containing the following attributes/keys.
    See :ref:`Central Variables` for details.
  qc.geo_spec : numpy.ndarray, shape=(3,NATOMS) 
    See :ref:`Central Variables` for details.
  qc.ao_spec : List of dictionaries
    See :ref:`Central Variables` for details.
  qc.mo_spec : List of dictionaries
    See :ref:`Central Variables` for details.
  calc_mo : bool, optional
    If True, the computation of  the molecular orbitals requested is only
    carried out.
  slice_length : int, optional
    Specifies the number of points per subprocess.
  drv : string or list of strings {None,'x','y', 'z', 'xx', 'xy', ...}, optional
    If not None, computes the analytical derivative of the requested 
    quantities with respect to DRV.
  laplacian : bool, optional
    If True, computes the laplacian of the density.
  numproc : int
    Specifies number of subprocesses for multiprocessing.
  grid : module or class, global
    Contains the grid, i.e., grid.x, grid.y, and grid.z. If grid.is_initialized
    is not True, functions runs grid.grid_init().

  **Returns:**
  
  :if calc_mo and drv is None: 
    - mo_list
  :if calc_mo and drv is not None:  
    - delta_mo_list
  :if not calc_mo and drv is None: 
    - rho
  :if not calc_mo and drv is not None: 
    - rho, delta_rho
  :if not calc_mo and laplacian:
    - rho, delta_rho, laplacian_rho      
  
  mo_list : numpy.ndarray, shape=((NMO,) + N)
    Contains the NMO=len(qc.mo_spec) molecular orbitals on a grid.
  delta_mo_list : numpy.ndarray, shape=((NDRV,NMO) + N)
    Contains the derivatives with respect to drv (NDRV=len(drv)) of the 
    NMO=len(qc.mo_spec) molecular orbitals on a grid.
  mo_norm : numpy.ndarray, shape=(NMO,)
    Contains the numerical norms of the molecular orbitals.
  rho : numpy.ndarray, shape=(N)
    Contains the density on a grid.
  delta_rho : numpy.ndarray, shape=((NDRV,) + N)
    Contains derivatives with respect to drv (NDRV=len(drv)) of 
    the density on a grid.
  laplacian_rho : numpy.ndarray, shape=(N)
    Contains the laplacian of the density on a grid, i.e. 
    :math:`\nabla^2 \rho = \nabla^2_x \rho + \nabla^2_y \rho + \nabla^2_z \rho`.
  '''
    if calc_ao and calc_mo:
        raise ValueError('Choose either calc_ao=True or calc_mo=True')
    elif calc_ao:
        calc_mo = True

    slice_length = slice_length if not vector else vector
    if slice_length == 0:
        return rho_compute_no_slice(qc,
                                    calc_ao=calc_ao,
                                    calc_mo=calc_mo,
                                    drv=drv,
                                    laplacian=laplacian,
                                    **kwargs)
    if laplacian:
        if not (drv is None or drv == ['xx', 'yy', 'zz']
                or drv == ['x2', 'y2', 'z2']):
            display(
                'Note: You have set the option `laplacian` and specified values\n'
                + 'for `drv`. Both options are not compatible.\n' +
                'The option `drv` has been changed to `drv=["xx","yy","zz"]`.')
        drv = ['xx', 'yy', 'zz']

    if drv is not None:
        is_drv = True
        try:
            drv = list(drv)
        except TypeError:
            drv = [drv]
    else:
        is_drv = False

    # Specify the global variable containing all desired information needed
    # by the function slice_rho
    if isinstance(qc, dict):
        Spec = qc
    else:
        Spec = qc.todict()
    Spec['calc_ao'] = calc_ao
    Spec['calc_mo'] = calc_mo
    Spec['Derivative'] = drv
    if calc_ao:
        if Spec['ao_spherical'] is None:
            lxlylz, assign = get_lxlylz(Spec['ao_spec'], get_assign=True)
            labels = [
                'lxlylz=%s,atom=%d' %
                (lxlylz[i], Spec['ao_spec'][assign[i]]['atom'])
                for i in range(len(lxlylz))
            ]
            mo_num = len(lxlylz)
        else:
            mo_num = len(Spec['ao_spherical'])
            labels = [
                'l,m=%s,atom=%d' % (j, Spec['ao_spec'][i]['atom'])
                for i, j in Spec['ao_spherical']
            ]
    else:
        mo_num = len(Spec['mo_spec'])
        labels = [ii_mo['sym'] for ii_mo in Spec['mo_spec']]

    if not grid.is_initialized:
        display('\nSetting up the grid...')
        grid.grid_init(is_vector=True)
        display(grid.get_grid())  # Display the grid

    was_vector = grid.is_vector
    N = (len(grid.x), ) if was_vector else (len(grid.x), len(grid.y),
                                            len(grid.z))
    if not was_vector:
        grid.grid2vector()
        display('Converting the regular grid to a vector grid containing ' +
                '%.2e grid points...' % len(grid.x))

    # Define the slice length
    npts = len(grid.x)
    if slice_length < 0: slice_length = numpy.ceil(npts / float(numproc)) + 1
    sNum = int(numpy.floor(npts / slice_length) + 1)

    # The number of worker processes is capped to the number of
    # grid points in x-direction.
    if numproc > sNum: numproc = sNum

    # Print information regarding the density calculation
    display('\nStarting the calculation of the %s...' %
            ('molecular orbitals' if calc_mo else 'density'))
    display(
        'The grid has been separated into %d slices each having %.2e grid points.'
        % (sNum, slice_length))
    if numproc <= 1:
        display(
            'The calculation will be carried out using only one process.\n' +
            '\n\tThe number of subprocesses can be changed with -p\n')
    else:
        display('The calculation will be carried out with %d subprocesses.' %
                numproc)
    display('\nThere are %d contracted %s AOs' %
            (len(Spec['mo_spec'][0]['coeffs']),
             'Cartesian' if not Spec['ao_spherical'] else 'spherical') +
            ('' if calc_ao else ' and %d MOs to be calculated.' % mo_num))

    # Initialize some additional user information
    status_old = 0
    s_old = 0
    t = [time.time()]

    # Make slices
    # Initialize an array to store the results
    mo_norm = numpy.zeros((mo_num, ))

    def zeros(shape, name, save_hdf5):
        if not save_hdf5:
            return numpy.zeros(shape)
        else:
            return f.create_dataset(name,
                                    shape,
                                    dtype=numpy.float64,
                                    chunks=shape[:-1] + (slice_length, ))

    def reshape(data, shape):
        if not save_hdf5:
            return data.reshape(shape)
        else:
            data.attrs['shape'] = shape
            return data[...].reshape(shape)

    if save_hdf5:
        import h5py
        f = h5py.File(str(save_hdf5), 'w')
        f['x'] = grid.x
        f['y'] = grid.y
        f['z'] = grid.z

    if calc_mo:
        mo_list = zeros((mo_num, npts) if drv is None else
                        (len(drv), mo_num, npts),
                        'ao_list' if calc_ao else 'mo_list', save_hdf5)
    else:
        rho = zeros(npts, 'rho', save_hdf5)
        if is_drv:
            delta_rho = zeros((len(drv), npts), 'rho', save_hdf5)

    # Write the slices in x to an array xx
    xx = []
    i = 0
    for s in range(sNum):
        if i == npts:
            sNum -= 1
            break
        elif (i + slice_length) >= npts:
            xx.append((numpy.array([i, npts], dtype=int)))
        else:
            xx.append((numpy.array([i, i + slice_length], dtype=int)))
        i += slice_length

    # Start the worker processes
    if numproc > 1:
        pool = Pool(processes=numproc,
                    initializer=initializer,
                    initargs=(Spec, ))
        it = pool.imap(slice_rho, xx)
    else:
        initializer(Spec)

    # Compute the density slice by slice
    for s in range(sNum):
        # Which slice do we compute
        i = xx[s][0]
        j = xx[s][1]
        # Perform the compution for the current slice
        result = it.next() if numproc > 1 else slice_rho(xx[s])
        # What output do we expect
        if calc_mo:
            if not is_drv:
                mo_list[:, i:j] = result[:, :]
            else:
                for ii_d in range(len(drv)):
                    mo_list[ii_d, :, i:j] = result[ii_d, :, :, ]
        else:
            rho[i:j] = result[0]
            mo_norm += result[1]
            if is_drv:
                for ii_d in range(len(drv)):
                    delta_rho[ii_d, i:j] = result[2][ii_d, :]

        # Print out the progress of the computation
        status = numpy.floor(s * 10 / float(sNum)) * 10
        if not status % 10 and status != status_old:
            t.append(time.time())
            display('\tFinished %(f)d %% (%(s)d slices in %(t).3f s)' % {
                'f': status,
                's': s + 1 - s_old,
                't': t[-1] - t[-2]
            })
            status_old = status
            s_old = s + 1

    # Close the worker processes
    if numproc > 1:
        pool.close()
        pool.join()

    if not was_vector:
        grid.vector2grid(*N)
        display(
            'Converting the output from a vector grid to a regular grid...')

    if not was_vector and drv is None:
        # Print the norm of the MOs
        display('\nNorm of the MOs:')
        for ii_mo in range(len(mo_norm)):
            if calc_mo:
                norm = numpy.sum(numpy.square(mo_list[ii_mo])) * grid.d3r
            else:
                norm = mo_norm[ii_mo] * grid.d3r
            display('\t%(m).6f\t%(t)s %(n)s' % {
                'm': norm,
                'n': labels[ii_mo],
                't': 'AO' if calc_ao else 'MO'
            })

    if calc_mo:
        #if not was_vector:
        mo_list = reshape(mo_list, ((mo_num, ) if drv is None else (
            len(drv),
            mo_num,
        )) + N)
        if save_hdf5: f.close()
        return mo_list

    if not was_vector:
        # Print the number of electrons
        display('We have ' + str(numpy.sum(rho) * grid.d3r) + ' electrons.')

    #if not was_vector:
    rho = reshape(rho, N)
    if not is_drv:
        if save_hdf5: f.close()
        return rho
    else:
        #if not was_vector:
        delta_rho = reshape(delta_rho, (len(drv), ) + N)
        if save_hdf5: f.close()
        if laplacian: return rho, delta_rho, delta_rho.sum(axis=0)
        return rho, delta_rho
コード例 #5
0
ファイル: rho_compute.py プロジェクト: wangvei/orbkit
from orbkit import options

options.quiet = True

tests_home = os.path.dirname(inspect.getfile(inspect.currentframe()))
folder = os.path.join(tests_home, '../outputs_for_testing/molpro')
filepath = os.path.join(folder, 'h2o_rhf_sph.molden')
qc = read.main_read(filepath, all_mo=True)

grid.adjust_to_geo(qc, extend=2.0, step=1)
grid.grid_init(is_vector=False, force=True)

drv = [None, 'x', 'y', 'z', 'xx', 'xy', 'xz', 'yy', 'yz', 'zz']
data = []
for i in range(2):
    if i: grid.grid2vector()
    data.append([
        rho_compute(qc, slice_length=0),
        rho_compute(qc, numproc=options.numproc),
        rho_compute(qc, laplacian=True, slice_length=0)[-1],
        rho_compute(qc, laplacian=True, numproc=options.numproc)[-1],
        rho_compute(qc, calc_mo=True, drv=drv, slice_length=0),
        rho_compute(qc, calc_mo=True, drv=drv, numproc=options.numproc)
    ])

data[1] = [grid.mv2g(d=i) for i in data[1]]

for i in range(len(data[0])):
    equal(data[0][i], data[1][i])

filepath = os.path.join(tests_home, 'refdata_rho_compute.npz')