def main_output(data, qc=None, outputname='data', otype='auto', gname='', drv=None, omit=[], datalabels='', mode='w', **kwargs): '''Creates the requested output. **Parameters:** data : numpy.ndarray, shape=N, shape=((NDRV,) + N), shape=(n, (NDRV,) + N) or list of numpy.ndarrays Contains the output data. The shape (N) depends on the grid and the data, i.e., 3d for regular grid, 1d for vector grid. qc : class or dict QCinfo class or dictionary containing the following attributes/keys. See :ref:`Central Variables` for details. outputname : str or list of str Contains the base name of the output file. If outputname contains @, string will be split and first part interpreted as outputname and second as gname (cf. Parameters:gname). otype : str or list of str, optional Contains the output file type. Possible options: 'auto', 'h5', 'cb', 'am', 'hx', 'vmd', 'mayavi' If otype='native', a native input file will be written, the type of which may be specifie by ftype='numpy'. gname : str, optional For native, HDF5, or npz output, specifies the group, where the data will be stored. drv : None, list of str or list of list of str, optional If not None, a 4d(regular)/2d(vector) input data array will be expected with NDRV = len(drv). Specifies the file labels, i.e. e.g., data_d{drv}.cube for 4d array. For 5d arrays i.e., data_0_d{drv}.cube datalabels : list of str, optional If not empty, the output file types specified here are omitted. omit : list of str, optional If not empty, the output file types specified here are omitted. mode : str={'r', 'w', 'a'}, optional Specifies the mode used to open the file (native, HDF5, or npz). **Note:** All additional keyword arguments are forwarded to the output functions. ''' if otype is None or otype == []: return [] if isinstance(outputname, str): if '@' in outputname: outputname, gname = outputname.split('@') if isinstance(otype, str): if otype == 'auto': outputname, otype = path.splitext(outputname) otype = otype[1:] otype = [otype] elif isinstance(otype, list) and len(otype) == 1: if otype[0] == 'auto': outputname, otype = path.splitext(outputname) otype = [otype[1:]] else: for iot in range(len(otype)): if otype[iot] == 'auto': outputname, tmp = path.splitext(outputname) if tmp != '': otype[iot] = tmp[1:] # Catch our native format before all else # We can't figure this out by the file ending alone # as we support hdf5 for output of both grid-based data # as well as our internal format output_written = [] internals = [i for i in range(len(otype)) if otype[i] == 'native'] if len(internals) > 0: if not isinstance(data, list): data = [data] if isinstance(outputname, str): outputname = [outputname for _ in data] if 'ftype' in kwargs.keys(): if isinstance(kwargs['ftype'], str): ftype = [kwargs['ftype'] for _ in data] else: ftype = ['numpy' for _ in data] if 'group' in kwargs.keys(): if isinstance(kwargs['group'], str): group = [kwargs['group'] for _ in data] else: group = [i.__class__.__name__.lower() for i in data] display('Writing native input file...') for i, oname in enumerate(outputname): output_written.append( write_native(data[i], oname, ftype[i], mode=mode, gname=path.join(gname, group[i]))) display('\n'.join(['\t' + i for i in output_written])) else: print_waring = False output_not_possible = (grid.is_vector and not grid.is_regular) # Shape shall be (Ndrv,Ndata,Nx,Ny,Nz) or (Ndrv,Ndata,Nxyz) data = numpy.array(data) dims = 1 if grid.is_vector else 3 shape = data.shape if drv is not None and isinstance(drv, str): drv = [drv] if data.ndim < dims: output_not_possible = True display('data.ndim < ndim of grid') elif data.ndim == dims: # 3d data set data = data[numpy.newaxis, numpy.newaxis] elif data.ndim == dims + 1: # 4d data set if drv is not None: data = data[:, numpy.newaxis] else: data = data[numpy.newaxis] elif data.ndim == dims + 2: # 5d data set check if drv matches Ndrv if drv is None or len(drv) != data.shape[0]: drv = list(range(data.shape[0])) elif data.ndim > dims + 2: output_not_possible = True display('data.ndim > (ndim of grid) +2') if 'vmd' in otype and not ('cb' in otype or 'cube' in otype): otype.append('cube') if 'hx' in otype and not 'am' in otype: otype.append('am') otype = [i for i in otype if i not in omit] otype_synonyms = [synonyms[i] for i in otype] otype_ext = dict(zip(otype_synonyms, otype)) # Convert the data to a regular grid, if possible is_regular_vector = (grid.is_vector and grid.is_regular) if is_regular_vector: display( '\nConverting the regular 1d vector grid to a 3d regular grid.' ) grid.vector2grid(*grid.N_) data = numpy.array(grid.mv2g(data)) isstr = isinstance(outputname, str) if isinstance(datalabels, str): if data.shape[1] > 1: datalabels = numpy.array([ str(idata) + ',' + datalabels for idata in range(data.shape[1]) ]) else: datalabels = numpy.array([datalabels]) elif isinstance(datalabels, list): datalabels = numpy.array(datalabels) if drv is not None: fid = '%(f)s_d%(d)s.' datalabel_id = 'd/d%(d)s %(f)s' contents = { 'axis:0': numpy.array( ['d/d%s' % i if i is not None else str(i) for i in drv]), 'axis:1': datalabels } it = enumerate(drv) elif data.shape[0] > 1: fid = '%(f)s_%(d)s.' datalabel_id = '%(d)s %(f)s' it = enumerate(data.shape[0]) contents = { 'axis:0': numpy.arange(data.shape[0]).astype(str), 'axis:1': datalabels } else: fid = '%(f)s.' datalabel_id = '%(f)s' it = [(0, None)] if data.shape[1] > 1: contents = {'axis:0': datalabels} else: contents = datalabels cube_files = [] all_datalabels = [] for idrv, jdrv in it: datasetlabels = [] for idata in range(data.shape[1]): if isstr: f = { 'f': outputname + '_' + str(idata) if data.shape[1] > 1 else outputname, 'd': jdrv } else: f = {'f': outputname[idata], 'd': jdrv} c = {'f': datalabels[idata], 'd': jdrv} datalabel = datalabel_id % c datasetlabels.append(datalabel) if 'am' in otype_synonyms and not print_waring: if output_not_possible: print_waring = True else: filename = fid % f + otype_ext['am'] display('\nSaving to ZIBAmiraMesh file...\n\t' + filename) amira_creator(data[idrv, idata], filename) output_written.append(filename) if 'hx' in otype_synonyms and not print_waring: if output_not_possible: print_waring = True else: filename = fid % f + otype_ext['hx'] display('\nCreating ZIBAmira network file...\n\t' + filename) hx_network_creator(data[idrv, idata], filename) output_written.append(filename) if 'cube' in otype_synonyms and not print_waring: if output_not_possible: print_waring = True elif qc is None: display( '\nFor cube file output `qc` is a required keyword parameter in `main_output`.' ) else: filename = fid % f + otype_ext['cube'] display('\nSaving to cube file...\n\t' + filename) cube_creator(data[idrv, idata], filename, qc.geo_info, qc.geo_spec, comments=datalabel, **kwargs) output_written.append(filename) cube_files.append(filename) all_datalabels.extend(datasetlabels) if 'vmd' in otype_synonyms and not print_waring: if output_not_possible: print_waring = True else: filename = (outputname if isstr else outputname[-1]) + '.' + otype_ext['vmd'] display('\nCreating VMD network file...\n\t' + filename) vmd_network_creator(filename, cube_files=cube_files, **kwargs) output_written.append(filename) if 'h5' in otype_synonyms: filename = (outputname if isstr else outputname[-1]) + '.' + otype_ext['h5'] display('\nSaving to Hierarchical Data Format file (HDF5)...\n\t' + filename) hdf5_creator(data.reshape(shape), filename, qcinfo=qc, gname=gname, ftype='hdf5', contents=contents, mode=mode, **kwargs) output_written.append(filename) if 'npz' in otype_synonyms: filename = (outputname if isstr else outputname[-1]) display('\nSaving to a compressed .npz archive...\n\t' + filename + '.npz') hdf5_creator(data.reshape(shape), filename, qcinfo=qc, gname=gname, ftype='numpy', contents=contents, mode=mode, **kwargs) output_written.append(filename) if 'mayavi' in otype_synonyms: if output_not_possible: print_waring = True else: display('\nDepicting the results with MayaVi...\n\t') if drv == ['x', 'y', 'z'] or drv == [0, 1, 2]: is_vectorfield = True data = numpy.swapaxes(data, 0, 1) datalabels = datalabels else: is_vectorfield = False data = data.reshape((-1, ) + grid.get_shape()) datalabels = all_datalabels view_with_mayavi(grid.x, grid.y, grid.z, data, is_vectorfield=is_vectorfield, geo_spec=qc.geo_spec, datalabels=datalabels, **kwargs) if print_waring: display( 'For a non-regular vector grid (`if grid.is_vector and not grid.is_regular`)' ) display('only HDF5 is available as output format...') display('Skipping all other formats...') if is_regular_vector: # Convert the data back to a regular vector grid grid.grid2vector() return output_written
def calc_jmo(qc, ij, drv=['x', 'y', 'z'], numproc=1, otype=None, ofid='', **kwargs): '''Calculate one component (e.g. drv='x') of the transition electoronic flux density between the molecular orbitals i and j. .. math:: moTEFD_{i,j}(r) = <mo_i|\delta(r-r')\\nabla_x|mo_j> **Parameters:** i: int index of the first mo (Bra) j: int index of the second mo (Ket) drv: {0,1,2,'x','y','z'} The desired component of the vector field of the transition electoronic flux density **Returns:** mo_tefd : numpy.ndarray ''' ij = numpy.asarray(ij) if ij.ndim == 1 and len(ij) == 2: ij.shape = (1, 2) assert ij.ndim == 2 assert ij.shape[1] == 2 u, indices = numpy.unique(ij, return_inverse=True) indices.shape = (-1, 2) mo_spec = qc.mo_spec[u] qc_select = qc.copy() qc_select.mo_spec = mo_spec labels = mo_spec.get_labels(format='short') mo_matrix = core.calc_mo_matrix(qc_select, drv=drv, numproc=numproc, **kwargs) jmo = numpy.zeros((len(drv), len(indices)) + grid.get_shape()) datalabels = [] for n, (i, j) in enumerate(indices): jmo[:, n] = -0.5 * (mo_matrix[:, i, j] - mo_matrix[:, j, i]) datalabels.append('j( %s , %s )' % (labels[i], labels[j])) if not options.no_output: output_written = main_output(jmo, qc, outputname=ofid, datalabels=datalabels, otype=otype, drv=drv) return jmo
def calc_mo(qc, fid_mo_list, drv=None, otype=None, ofid=None, numproc=None, slice_length=None): '''Calculates and saves the selected molecular orbitals or the derivatives thereof. **Parameters:** qc.geo_spec, qc.geo_info, qc.ao_spec, qc.mo_spec : See :ref:`Central Variables` for details. fid_mo_list : str Specifies the filename of the molecular orbitals list or list of molecular orbital labels (cf. :mod:`orbkit.read.mo_select` for details). If fid_mo_list is 'all_mo', creates a list containing all molecular orbitals. drv : int or string, {None, 'x', 'y', 'z', 0, 1, 2}, optional If not None, a derivative calculation of the atomic orbitals is requested. otype : str or list of str, optional Specifies output file type. See :data:`otypes` for details. ofid : str, optional Specifies output file name. If None, the filename will be based on :mod:`orbkit.options.outputname`. numproc : int, optional Specifies number of subprocesses for multiprocessing. If None, uses the value from :mod:`options.numproc`. slice_length : int, optional Specifies the number of points per subprocess. If None, uses the value from :mod:`options.slice_length`. **Returns:** mo_list : numpy.ndarray, shape=((NMO,) + N) Contains the NMO=len(qc.mo_spec) molecular orbitals on a grid. mo_info : dict Contains information of the selected molecular orbitals and has following Members: :mo: - List of molecular orbital labels. :mo_ii: - List of molecular orbital indices. :mo_spec: - Selected elements of mo_spec. See :ref:`Central Variables` for details. :mo_in_file: - List of molecular orbital labels within the fid_mo_list file. :sym_select: - If True, symmetry labels have been used. ''' mo_info = mo_select(qc.mo_spec, fid_mo_list, strict=True) qc_select = qc.todict() qc_select['mo_spec'] = mo_info['mo_spec'] slice_length = options.slice_length if slice_length is None else slice_length numproc = options.numproc if numproc is None else numproc # Calculate the AOs and MOs mo_list = core.rho_compute(qc_select, calc_mo=True, drv=drv, slice_length=slice_length, numproc=numproc) if otype is None: return mo_list, mo_info if ofid is None: ofid = '%s_MO' % (options.outputname) if not options.no_output: if 'h5' in otype: output.main_output(mo_list,qc.geo_info,qc.geo_spec,data_id='MO', outputname=ofid, mo_spec=qc_select['mo_spec'],drv=drv,is_mo_output=True) # Create Output cube_files = [] for i,j in enumerate(qc_select['mo_spec']): outputname = '%s_%s' % (ofid,mo_info['mo'][i]) comments = ('%s,Occ=%.1f,E=%+.4f' % (mo_info['mo'][i], j['occ_num'], j['energy'])) index = numpy.index_exp[:,i] if drv is not None else i output_written = output.main_output(mo_list[index], qc.geo_info,qc.geo_spec, outputname=outputname, comments=comments, otype=otype,omit=['h5','vmd','mayavi'], drv=drv) for i in output_written: if i.endswith('.cb'): cube_files.append(i) if 'vmd' in otype and cube_files != []: display('\nCreating VMD network file...' + '\n\t%(o)s.vmd' % {'o': ofid}) output.vmd_network_creator(ofid,cube_files=cube_files) if 'mayavi' in otype: datalabels = ['MO %(sym)s, Occ=%(occ_num).2f, E=%(energy)+.4f E_h' % i for i in qc_select['mo_spec']] if drv is not None: tmp = [] for i in drv: for j in datalabels: tmp.append('d/d%s of %s' % (i,j)) datalabels = tmp data = mo_list.reshape((-1,) + grid.get_shape()) output.main_output(data,qc.geo_info,qc.geo_spec, otype='mayavi',datalabels=datalabels) return mo_list, mo_info
def mo_set(qc, fid_mo_list, drv=None, laplacian=None, otype=None, ofid=None, return_all=True, numproc=None, slice_length=None): '''Calculates and saves the density or the derivative thereof using selected molecular orbitals. **Parameters:** qc.geo_spec, qc.geo_info, qc.ao_spec, qc.mo_spec : See :ref:`Central Variables` for details. fid_mo_list : str Specifies the filename of the molecular orbitals list or list of molecular orbital labels (cf. :mod:`orbkit.read.mo_select` for details). otype : str or list of str, optional Specifies output file type. See :data:`otypes` for details. ofid : str, optional Specifies output file name. If None, the filename will be based on :mod:`orbkit.options.outputname`. drv : int or string, {None, 'x', 'y', 'z', 0, 1, 2}, optional If not None, a derivative calculation of the atomic orbitals is requested. return_all : bool If False, no data will be returned. numproc : int, optional Specifies number of subprocesses for multiprocessing. If None, uses the value from :mod:`options.numproc`. slice_length : int, optional Specifies the number of points per subprocess. If None, uses the value from :mod:`options.slice_length`. **Returns:** datasets : numpy.ndarray, shape=((NSET,) + N) Contains the NSET molecular orbital sets on a grid. delta_datasets : numpy.ndarray, shape=((NSET,NDRV) + N) Contains the NDRV NSET molecular orbital set on a grid. This is only present if derivatives are requested. mo_info : dict Contains information of the selected molecular orbitals and has following Members: :mo: - List of molecular orbital labels. :mo_ii: - List of molecular orbital indices. :mo_spec: - Selected elements of mo_spec. See :ref:`Central Variables` for details. :mo_in_file: - List of molecular orbital labels within the fid_mo_list file. :sym_select: - If True, symmetry labels have been used. ''' mo_info = mo_select(qc.mo_spec, fid_mo_list, strict=False) qc_select = qc.todict() drv = options.drv if drv is None else drv laplacian = options.laplacian if laplacian is None else laplacian slice_length = options.slice_length if slice_length is None else slice_length numproc = options.numproc if numproc is None else numproc if ofid is None: ofid = options.outputname if 'h5' in otype and os.path.exists('%s.h5' % ofid): raise IOError('%s.h5 already exists!' % ofid) datasets = [] delta_datasets = [] cube_files = [] for i_file,j_file in enumerate(mo_info['mo_in_file']): display('Starting with the %d. element of the molecular orbital list (%s)...\n\t' % (i_file+1,fid_mo_list) + str(j_file) + '\n\t(Only regarding existing and occupied mos.)\n') qc_select['mo_spec'] = [] for i_mo,j_mo in enumerate(mo_info['mo']): if j_mo in j_file: if mo_info['sym_select']: ii_mo = numpy.argwhere(mo_info['mo_ii'] == j_mo) else: ii_mo = i_mo qc_select['mo_spec'].append(mo_info['mo_spec'][int(ii_mo)]) data = core.rho_compute(qc_select, drv=drv, laplacian=laplacian, slice_length=slice_length, numproc=numproc) datasets.append(data) if drv is None: rho = data elif laplacian: rho, delta_rho, laplacian_rho = data delta_datasets.extend(delta_rho) delta_datasets.append(laplacian_rho) else: rho, delta_rho = data delta_datasets.append(delta_rho) if options.z_reduced_density: if grid.is_vector: display('\nSo far, reducing the density is not supported for vector grids.\n') elif drv is not None: display('\nSo far, reducing the density is not supported for the derivative of the density.\n') else: rho = integrate.simps(rho, grid.x, dx=grid.delta_[0], axis=0, even='avg') rho = integrate.simps(rho, grid.y, dx=grid.delta_[1], axis=0, even='avg') if not options.no_output: if 'h5' in otype: display('Saving to Hierarchical Data Format file (HDF5)...') group = '/mo_set:%03d' % (i_file+1) display('\n\t%s.h5 in the group "%s"' % (ofid,group)) output.HDF5_creator(rho,ofid,qc.geo_info,qc.geo_spec,data_id='rho', mode='w',group=group,mo_spec=qc_select['mo_spec']) if drv is not None: for i,j in enumerate(drv): data_id = 'rho_d%s' % j output.HDF5_creator(delta_rho[i],ofid,qc.geo_info,qc.geo_spec, data_id=data_id,data_only=True,mode='a', group=group,mo_spec=qc_select['mo_spec']) if laplacian: data_id = 'rho_laplacian' output.HDF5_creator(laplacian_rho,ofid,qc.geo_info,qc.geo_spec, data_id=data_id,data_only=True,mode='a', group=group,mo_spec=qc_select['mo_spec']) fid = '%s_%03d' % (ofid, i_file+1) cube_files.append('%s.cb' % fid) comments = ('mo_set:'+','.join(j_file)) output.main_output(rho,qc.geo_info,qc.geo_spec,outputname=fid, otype=otype,omit=['h5','vmd','mayavi'], comments=comments) if drv is not None: for i,j in enumerate(drv): fid = '%s_%03d_d%s' % (ofid, i_file+1, j) cube_files.append('%s.cb' % fid) comments = ('d%s_of_mo_set:' % j + ','.join(j_file)) output.main_output(delta_rho[i],qc.geo_info,qc.geo_spec,outputname=fid, otype=otype,omit=['h5','vmd','mayavi'], comments=comments) if laplacian: fid = '%s_%03d_laplacian' % (ofid, i_file+1) cube_files.append('%s.cb' % fid) comments = ('laplacian_of_mo_set:' + ','.join(j_file)) output.main_output(laplacian_rho,qc.geo_info,qc.geo_spec,outputname=fid, otype=otype,omit=['h5','vmd','mayavi'], comments=comments) if 'vmd' in otype and cube_files != []: display('\nCreating VMD network file...' + '\n\t%(o)s.vmd' % {'o': ofid}) output.vmd_network_creator(ofid,cube_files=cube_files) datasets = numpy.array(datasets) if drv is None: if 'mayavi' in otype: output.main_output(datasets,qc.geo_info,qc.geo_spec, otype='mayavi',datalabels=mo_info['mo_in_file']) return datasets, mo_info else: delta_datasets = numpy.array(delta_datasets) if 'mayavi' in otype: datalabels = [] for i in mo_info['mo_in_file']: datalabels.extend(['d/d%s of %s' % (j,i) for j in drv]) if laplacian: datalabels.append('laplacian of %s' % i) output.main_output(delta_datasets.reshape((-1,) + grid.get_shape()), qc.geo_info,qc.geo_spec,otype='mayavi',datalabels=datalabels) return datasets, delta_datasets, mo_info
from orbkit.test.tools import equal from orbkit.qcinfo import QCinfo from orbkit import options import os, inspect, tempfile, numpy options.quiet = True tests_home = os.path.dirname(inspect.getfile(inspect.currentframe())) folder = os.path.join(tests_home, '../outputs_for_testing') filepath = os.path.join(folder, 'NaCl_molden_files.tar.gz') qc_old = main_read(filepath) tests_home = tempfile.gettempdir() filepath = os.path.join(tests_home, 'tmp.npz') main_output(qc_old, outputname=filepath, otype='native', ftype='numpy') qc_new = main_read(filepath) equal(qc_old, qc_new) os.remove(filepath) # Test restart from standard npz data output set_grid(0, 0, 0, is_vector=False) main_output(numpy.zeros(get_shape()), qc=qc_old, outputname=filepath) qc_new = main_read(filepath) equal(qc_old, qc_new) os.remove(filepath)
from orbkit.grid import set_grid, get_shape from orbkit.test.tools import equal from orbkit.qcinfo import QCinfo from orbkit import options options.quiet = True tests_home = os.path.dirname(inspect.getfile(inspect.currentframe())) folder = os.path.join(tests_home, '../outputs_for_testing') filepath = os.path.join(folder, 'NaCl_molden_files.tar.gz') qc_old = main_read(filepath) test_dir = tempfile.mkdtemp() testfile = os.path.join(test_dir, 'tmp.hdf5') main_output(qc_old, outputname=testfile, otype='native', ftype='hdf5') qc_new = main_read(testfile) equal(qc_old, qc_new) # Test restart from standard HDF5 data output set_grid(0, 0, 0, is_vector=False) main_output(numpy.zeros(get_shape()), qc=qc_old, outputname=testfile) qc_new = main_read(filepath) equal(qc_old, qc_new) shutil.rmtree(test_dir)