def info(): """Show versions of GPAW and its dependencies.""" results = [('python-' + sys.version.split()[0], sys.executable)] for name in ['gpaw', 'ase', 'numpy', 'scipy']: try: module = import_module(name) except ImportError: results.append((name, False)) else: results.append((name + '-' + module.__version__, module.__file__.rsplit('/', 1)[0] + '/')) module = import_module('_gpaw') results.append( ('_gpaw', op.normpath(getattr(module, '__file__', 'built-in')))) p = subprocess.Popen(['which', 'gpaw-python'], stdout=subprocess.PIPE) results.append(('parallel', p.communicate()[0].strip().decode() or False)) results.append(('FFTW', fftw.FFTPlan is fftw.FFTWPlan)) results.append(('scalapack', compiled_with_sl())) results.append(('libvdwxc', compiled_with_libvdwxc())) paths = [ '{0}: {1}'.format(i + 1, path) for i, path in enumerate(gpaw.setup_paths) ] results.append(('PAW-datasets', '\n '.join(paths))) for a, b in results: if isinstance(b, bool): b = ['no', 'yes'][b] print('{0:16}{1}'.format(a, b))
def info(): """Show versions of GPAW and its dependencies.""" results = [('python-' + sys.version.split()[0], sys.executable)] for name in ['gpaw', 'ase', 'numpy', 'scipy']: try: module = import_module(name) except ImportError: results.append((name, False)) else: # Search for git hash githash = search_current_git_hash(module) if githash is None: githash = '' else: githash = '-{:.10}'.format(githash) results.append((name + '-' + module.__version__ + githash, module.__file__.rsplit('/', 1)[0] + '/')) results.append(('libxc-' + _gpaw.libxc_version, '')) module = import_module('_gpaw') if hasattr(module, 'githash'): githash = '-{:.10}'.format(module.githash()) results.append( ('_gpaw' + githash, op.normpath(getattr(module, '__file__', 'built-in')))) p = subprocess.Popen(['which', 'gpaw-python'], stdout=subprocess.PIPE) results.append(('parallel', p.communicate()[0].strip().decode() or False)) results.append(('MPI enabled', have_mpi)) if have_mpi: have_sl = compiled_with_sl() have_elpa = LibElpa.have_elpa() else: have_sl = have_elpa = 'no (MPI unavailable)' results.append(('scalapack', have_sl)) results.append(('Elpa', have_elpa)) results.append(('FFTW', fftw.FFTPlan is fftw.FFTWPlan)) results.append(('libvdwxc', compiled_with_libvdwxc())) paths = [ '{0}: {1}'.format(i + 1, path) for i, path in enumerate(gpaw.setup_paths) ] results.append(('PAW-datasets', '\n{:25}'.format('').join(paths))) if rank == 0: for a, b in results: if isinstance(b, bool): b = ['no', 'yes'][b] print('{0:25}{1}'.format(a, b))
print >> stderr, 'Args:' print >> stderr, formula, vacuum, cell, pbc, morekwargs print >> stderr, parallel raise AssertionError(msg) # only kpt-parallelization, this is the reference run() # kpt-parallelization=2, state-parallelization=2, # domain-decomposition=(1,2,1) parallel['band'] = 2 parallel['domain'] = (1, 2, 1) run() if compiled_with_sl(): # kpt-parallelization=2, state-parallelization=2, # domain-decomposition=(1,2,1) # with blacs parallel['sl_default'] = (2, 2, 2) run() # perform spin polarization test parallel = dict() basekwargs = dict( mode='fd', maxiter=3, nbands=6, kpts=(4, 4, 4), # 8 kpts in the IBZ parallel=parallel)
print('gemv err', gemv_err) print('r2k err', r2k_err) print('rk_err', rk_err) print('hemm_err', hemm_err) else: gemm_err = 0.0 gemv_err = 0.0 r2k_err = 0.0 rk_err = 0.0 hemm_err = 0.0 gemm_err = world.sum(gemm_err) # We don't like exceptions on only one cpu gemv_err = world.sum(gemv_err) r2k_err = world.sum(r2k_err) rk_err = world.sum(rk_err) hemm_err = world.sum(hemm_err) equal(gemm_err, 0, tol) equal(gemv_err, 0, tol) equal(r2k_err, 0, tol) equal(rk_err, 0, tol) equal(hemm_err, 0, tol) if __name__ in ['__main__', '__builtin__']: if not compiled_with_sl(): print('Not built with ScaLAPACK. Test does not apply.') else: main(dtype=float) main(dtype=complex)
print >> stderr, 'Args:' print >> stderr, formula, vacuum, cell, pbc, morekwargs print >> stderr, parallel raise AssertionError(msg) # reference: # domain-decomposition = (1, 2, 2) run() # state-parallelization = 2, # domain-decomposition = (1, 2, 1) parallel['band'] = 2 parallel['domain'] = (1, 2, 1) run() if compiled_with_sl(): # state-parallelization = 2, # domain-decomposition = (1, 2, 1) # with blacs parallel['sl_default'] = (2, 2, 2) run() # domain-decomposition = (1, 2, 2) # with blacs del parallel['band'] del parallel['domain'] run() # perform spin polarization test parallel = dict()
'parallel/scalapack_diag_simple.py', 'parallel/realspace_blacs.py', 'AA_exx_enthalpy.py', 'bse_aluminum.py', 'bse_diamond.py', 'bse_silicon.py', 'bse_vs_lrtddft.py', 'fileio/parallel.py'] if mpi.size != 4: exclude += ['parallel/lcao_parallel.py'] exclude += ['parallel/fd_parallel.py'] exclude += ['parallel/scalapack_mpirecv_crash.py'] exclude += ['parallel/scalapack_pdlasrt_hang.py'] if mpi.size == 1 or not compiled_with_sl(): exclude += ['parallel/submatrix_redist.py'] if mpi.size != 1 and not compiled_with_sl(): exclude += ['ralda_energy_H2.py', 'ralda_energy_N2.py', 'ralda_energy_Ni.py', 'ralda_energy_Si.py', 'bse_sym.py', 'bse_silicon.py', 'gwsi.py', 'rpa_energy_N2.py', 'pw/fulldiag.py', 'pw/fulldiagk.py', 'au02_absorption.py']
## general_diag_ex_err = 0.0 general_diag_dc_err = 0.0 ## general_diag_mr3_err = 0.0 inverse_chol_err = 0.0 # We don't like exceptions on only one cpu ## diag_ex_err = world.sum(diag_ex_err) diag_dc_err = world.sum(diag_dc_err) ## diag_mr3_err = world.sum(diag_mr3_err) ## general_diag_ex_err = world.sum(general_diag_ex_err) general_diag_dc_err = world.sum(general_diag_dc_err) ## general_diag_mr3_err = world.sum(general_diag_mr3_err) inverse_chol_err = world.sum(inverse_chol_err) ## assert diag_ex_err < tol assert diag_dc_err < tol ## assert diag_mr3_err < tol ## assert general_diag_ex_err < tol assert general_diag_dc_err < tol ## assert general_diag_mr3_err < tol assert inverse_chol_err < tol if __name__ in ['__main__', '__builtin__']: if not compiled_with_sl(): print('Not built with ScaLAPACK. Test does not apply.') else: main(dtype=float) main(dtype=complex)
'parallel/pblas.py', 'parallel/scalapack.py', 'parallel/scalapack_diag_simple.py', 'parallel/realspace_blacs.py', 'exx/AA_enthalpy.py', 'exx/exx_scf.py', 'response/bse_aluminum.py', 'response/iron_sf_ALDA.py', 'response/bse_MoS2_cut.py', 'fileio/parallel.py', 'parallel/diamond_gllb.py', 'parallel/lcao_parallel_kpt.py', 'parallel/fd_parallel_kpt.py', 'response/na_plasmons.py', 'response/na_plasmons_tetrahedron.py' ] if mpi.size != 4: exclude += [ 'parallel/scalapack_mpirecv_crash.py', 'parallel/scalapack_pdlasrt_hang.py', 'response/bse_silicon.py' ] if mpi.size == 1 or not compiled_with_sl(): exclude += [ 'parallel/submatrix_redist.py', 'lcaotddft/parallel_options.py' ] if mpi.size != 1 and not compiled_with_sl(): exclude += [ 'ralda/ralda_energy_H2.py', 'ralda/ralda_energy_N2.py', 'ralda/ralda_energy_Ni.py', 'ralda/ralda_energy_Si.py', 'response/bse_silicon.py', 'response/bse_MoS2_cut.py', 'response/gwsi.py', 'response/gw_MoS2_cut.py', 'rpa/rpa_energy_N2.py', 'pw/expert_diag.py', 'pw/fulldiag.py', 'pw/fulldiagk.py', 'response/gw_hBN_extrapolate.py', 'response/gw0_hBN.py', 'response/au02_absorption.py' ]
block_desc.diagonalize_ex(H_mm, C_mm, eps_M) # Collect eigenvectors on MASTER C_MM = local_desc.empty() redistributor2 = Redistributor(world, block_desc, local_desc) redistributor2.redistribute(C_mm, C_MM) # Return eigenvalues and -vectors on Master if world.rank == MASTER: return eps_M, C_MM else: return None, None from gpaw.utilities import compiled_with_sl if __name__ == '__main__' and compiled_with_sl(): # Test script which should be run on 1, 2, 4, or 8 CPUs if world.size == 1: blacsgrid = (1, 1) elif world.size == 2: blacsgrid = (2, 1) elif world.size == 4: blacsgrid = (2, 2) elif world.size == 8: blacsgrid = (4, 2) else: raise RuntimeError('Please use 1, 2, 4, or 8 nodes for this test') if world.rank == MASTER: a = np.diag(range(1,51)).astype(float)
eps_n = np.zeros(bd.mynbands) blacs_diagonalize(ksl, H_Nn, U_nN, eps_n) print "U_nN" parallelprint(world, U_nN) print "eps_n" parallelprint(world, eps_n) # Inverse Cholesky S_Nn = np.zeros((nbands, mynbands), dtype=dtype) C_nN = np.empty((mynbands, nbands), dtype=dtype) if ksl.Nndescriptor: # hack scalapack_set(ksl.Nndescriptor, S_Nn, 0.1, 75.0, 'L') else: assert gd.comm.rank != 0 print "S_Nn" parallelprint(world, S_Nn) blacs_inverse_cholesky(ksl, S_Nn, C_nN) print "C_nN" parallelprint(world, C_nN) if __name__ in ['__main__', '__builtin__']: if not compiled_with_sl(True): print('Not built with ScaLAPACK. Test does not apply.') else: main(dtype=float) main(dtype=complex)
atoms = GR GSsettings = [{'symmetry': 'off', 'kpts': {'density': 2.5, 'gamma': False}}, {'symmetry': {}, 'kpts': {'density': 2.5, 'gamma': False}}, {'symmetry': 'off', 'kpts': {'density': 2.5, 'gamma': True}}, {'symmetry': {}, 'kpts': {'density': 2.5, 'gamma': True}}] DFsettings = [{'disable_point_group': True, 'disable_time_reversal': True}, {'disable_point_group': False, 'disable_time_reversal': True}, {'disable_point_group': True, 'disable_time_reversal': False}, {'disable_point_group': False, 'disable_time_reversal': False}] if world.size > 1 and compiled_with_sl(): DFsettings.append({'disable_point_group': False, 'disable_time_reversal': False, 'nblocks': 2}) for GSkwargs in GSsettings: calc = GPAW(h=0.18, mode=PW(600), occupations=FermiDirac(0.2), **GSkwargs) atoms.set_calculator(calc) atoms.get_potential_energy() calc.write('gr.gpw', 'all') dfs = []
+ sep + {False:'Synchronous', True:'Asynchronous'}[async] class MetaPrototype(UTConstantWavefunctionBlacsSetup, object): __doc__ = UTConstantWavefunctionBlacsSetup.__doc__ dtype = dtype parstride_bands = parstride_bands blocking = blocking async = async MetaPrototype.__name__ = classname return MetaPrototype # ------------------------------------------------------------------- if __name__ in ['__main__', '__builtin__'] and compiled_with_sl(True): # We may have been imported by test.py, if so we should redirect to logfile if __name__ == '__builtin__': testrunner = CustomTextTestRunner('ut_hsblacs.log', verbosity=2) else: from gpaw.utilities import devnull stream = (world.rank == 0) and sys.stdout or devnull testrunner = TextTestRunner(stream=stream, verbosity=2) parinfo = [] # Initial Verification only tests case : dtype = float for test in [UTBandParallelBlacsSetup_Blocked ]: #, UTBandParallelBlacsSetup_Strided]: info = ['', test.__name__, test.__doc__.strip('\n'), ''] testsuite = initialTestLoader.loadTestsFromTestCase(test) map(testrunner.stream.writeln, info)
world.broadcast(eps_N, 0) # all MPI tasks now have eps_N world.barrier() # wait for everyone to finish if rank == 0: print 'ScaLAPACK diagonalize_dc', t2 - t1 # Create replicated NumPy array diagonal = np.eye(nbands, dtype=float) offdiagonal = np.tril(np.ones((nbands, nbands)), -1) H0 = beta * diagonal + alpha * offdiagonal E0 = np.empty((nbands), dtype=float) t1 = time() diagonalize(H0, E0) t2 = time() if rank == 0: print 'LAPACK diagonalize', t2 - t1 delta = abs(E0 - eps_N).max() if rank == 0: print delta assert delta < tol if __name__ in ['__main__', '__builtin__']: if not compiled_with_sl(True): print('Not built with ScaLAPACK. Test does not apply.') else: main()
+ sep + {'fast':'Fast', 'light':'Light', 'intdiv':'Intdiv', 'nonintdiv1': 'Nonintdiv1', 'nonintdiv2':'Nonintdiv2'}[blocking] \ + sep + {False:'Synchronous', True:'Asynchronous'}[async] class MetaPrototype(UTConstantWavefunctionBlacsSetup, object): __doc__ = UTConstantWavefunctionBlacsSetup.__doc__ dtype = dtype parstride_bands = parstride_bands blocking = blocking async = async MetaPrototype.__name__ = classname return MetaPrototype # ------------------------------------------------------------------- if __name__ in ['__main__', '__builtin__'] and compiled_with_sl(): # We may have been imported by test.py, if so we should redirect to logfile if __name__ == '__builtin__': testrunner = CustomTextTestRunner('ut_hsblacs.log', verbosity=2) else: from gpaw.utilities import devnull stream = (world.rank == 0) and sys.stdout or devnull testrunner = TextTestRunner(stream=stream, verbosity=2) parinfo = [] # Initial Verification only tests case : dtype = float for test in [UTBandParallelBlacsSetup_Blocked]: #, UTBandParallelBlacsSetup_Strided]: info = ['', test.__name__, test.__doc__.strip('\n'), ''] testsuite = initialTestLoader.loadTestsFromTestCase(test) map(testrunner.stream.writeln, info) testresult = testrunner.run(testsuite)
block_desc.diagonalize_ex(H_mm, C_mm, eps_M) # Collect eigenvectors on MASTER C_MM = local_desc.empty() redistributor2 = Redistributor(world, block_desc, local_desc) redistributor2.redistribute(C_mm, C_MM) # Return eigenvalues and -vectors on Master if world.rank == MASTER: return eps_M, C_MM else: return None, None from gpaw.utilities import compiled_with_sl if __name__ == '__main__' and compiled_with_sl(): # Test script which should be run on 1, 2, 4, or 8 CPUs if world.size == 1: blacsgrid = (1, 1) elif world.size == 2: blacsgrid = (2, 1) elif world.size == 4: blacsgrid = (2, 2) elif world.size == 8: blacsgrid = (4, 2) else: raise RuntimeError('Please use 1, 2, 4, or 8 nodes for this test') if world.rank == MASTER: a = np.diag(range(1, 51)).astype(float)