コード例 #1
0
def TestSolveOverlapSpeed():
    def timeIt(func):
        t1 = time.time()
        func()
        t2 = time.time()
        Print("  Function '%s' took %4.1f s." % (func.func_name, (t2 - t1)))

    numSolves = 100

    Print("")
    Print("Now testing multiple S^-1 * psi...")
    pyprop.Redirect.Enable(silent=True)

    seed(0)

    conf = pyprop.Load("config_eigenvalues.ini")
    psi = pyprop.CreateWavefunction(conf)
    tmpPsi = psi.Copy()

    Print("  Size of wavefunction is: %s" % repr(psi.GetData().shape))

    #Calculate S^-1 * psi
    Print("  Performing %i solves..." % numSolves)

    def solve():
        for i in range(numSolves):
            psi.GetRepresentation().MultiplyOverlap(tmpPsi)

    timeIt(solve)

    #finish and cleanup
    pypar.barrier()
    pyprop.Redirect.Disable()
    Print("\n...done!")
コード例 #2
0
def TestSolveOverlapSpeed():
	def timeIt(func):
		t1 = time.time()
		func()
		t2 = time.time()
		Print("  Function '%s' took %4.1f s." % (func.func_name, (t2-t1)))

	numSolves = 100

	Print("")
	Print("Now testing multiple S^-1 * psi...")
	pyprop.Redirect.Enable(silent=True)

	seed(0)

	conf = pyprop.Load("config_eigenvalues.ini")
	psi = pyprop.CreateWavefunction(conf)
	tmpPsi = psi.Copy()

	Print("  Size of wavefunction is: %s" % repr(psi.GetData().shape)) 

	#Calculate S^-1 * psi
	Print("  Performing %i solves..." % numSolves)
	def solve():
		for i in range(numSolves):
			psi.GetRepresentation().MultiplyOverlap(tmpPsi)
	timeIt(solve)
	
	#finish and cleanup
	pypar.barrier()
	pyprop.Redirect.Disable()
	Print("\n...done!")
コード例 #3
0
ファイル: Environment.py プロジェクト: shambo001/peat
	def wait(self, error=False):
	
		'''This method will not return until all process in the environment have called it.
		
		This is a wrapper around MPI_Barrier which handles the case where MPI is not available'''

		from inspect import stack
	
		if self.verbose is True:
			string = '(%s) Waiting at line %d of %s' % (datetime.datetime.now().strftime('%H:%M:%S'),
					 stack()[1][0].f_lineno, stack()[1][0].f_code.co_filename)
			self.log(string)
			
		if Environment.isParallel:
			import pypar
			pypar.barrier()	
			#Because MPI_ABORT doesn't work in pypar if called from one process
			#we need a way for process to communicate to each other if an error occurred
			#during the code they executed before this barrier. We do a scatter/gather of
			#the error parameter - This isn't very efficient but it's all we can do now
			errors = self.combineArray([error])
			if True in errors:
				self.exit(1)
				
		if self.verbose is True:
			string = '(%s) Finished waiting' % (datetime.datetime.now().strftime('%H:%M:%S'))
			self.log(string)		
コード例 #4
0
    def multi_proc_mutate_and_integrate(self, prmt, mutation):
        """subroutine to send jobs to multiple processors using pypar
        
        It send one network - and not the complete population- to each processor
        and explicitly we take care of synchronization.
        
        Args:
            prmt (dict): the inits parameters for integration
            mutation (list): tuple (id,mut) indicating the mutation flag of integration for each network
        
        Returns:
            int: the total number of mutations
        """
        numproc = self.numproc  #computes number of available proc for integration : the  proc 0 is used as master proc
        l = len(mutation)
        n_mut = 0
        for index_job in range(l):
            nproc = 1 + index_job % (
                numproc - 1
            )  #computes the proc number where to send the job, we start at 1, proc 0 is master proc
            args = {
                'net': self.genus[index_job],
                'prmt': prmt,
                'nnetwork': index_job,
                'tgeneration': self.tgeneration,
                'mutation': mutation[index_job]
            }
            pypar.send((
                'net.mutate_and_integrate(prmt,nnetwork,tgeneration,mutation)',
                args), nproc)  #send integration job to the selected proc
            if ((index_job + 1) % (numproc - 1) == 0):
                pypar.barrier(
                )  # every numproc jobs sent, we wait for all other processors to finish their job to synchronize
                results = [
                    pypar.receive(worker) for worker in range(1, numproc)
                ]  #receives results from all processors
                for i in results:
                    n_mut += i[0]  #updates number of mutations
                    self.genus[i[1]] = i[2]  #updates mutated network
                    self.update_fitness(i[1], i[3])  #updates fitness values

        #at that point, we may have jobs still running on some subset of the procs, but we only take the results from the last working processors
        if (l % (numproc - 1) > 0):
            for i in range(l % (numproc - 1), numproc - 1):
                pypar.send(
                    ('0', {}), i + 1
                )  #send dummy jobs to the still processors for synchornization purpose
            pypar.barrier()  #synchronize the proc
            results = [pypar.receive(worker) for worker in range(1, numproc)]
            #only take the results we are interested in
            for i in range(l % (numproc - 1)):
                n_mut += results[i][0]
                self.genus[results[i][1]] = results[i][2]
                self.update_fitness(results[i][1],
                                    results[i][3])  #updates fitness values
        # Shut down workers
        #for worker in range(1,numproc):
        #    pypar.send(SystemExit(),worker)
        return n_mut
コード例 #5
0
ファイル: topomult.py プロジェクト: wcarthur/topomultipliers
def run():
    """
    Run the process, handling any parallelisation.
    """
    
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument("-c", "--config",
                        help="Configuration file",
                        type=str)
    parser.add_argument("-i", "--inputfile",
                        help="Input DEM file (ascii format)",
                        type=str)
    parser.add_argument("-o", "--output", 
                        help="Output path",
                        type=str)
    parser.add_argument("-v", "--verbose", 
                        help=("Verbose output (not available when invoking"
                                "parallel run)") )
                                
    args = parser.parse_args() 
                          
    logfile = 'topomult.log'
    loglevel = 'INFO'
    
    if args.verbose:
        verbose = args.verbose
    else:
        verbose = False

    if args.config:
        cfg = ConfigParser.ConfigParser()
        cfg.read(args.config)

        input_file = cfg.get('Input', 'Filename')
        output_path = cfg.get('Output', 'Path')
        logfile = cfg.get('Logging', 'LogFile')
        loglevel = cfg.get('Logging', 'LogLevel')
        verbose = cfg.get('Logging', 'Verbose')
        
    if args.inputfile:
        input_file = args.inputfile

    if args.output:
        output_path = args.output
    
    attemptParallel()
    if pp.size() > 1 and pp.rank() > 0:
        logfile += '-' + str(pp.rank())
        verbose = False  # to stop output to console

    flStartLog(logfile, loglevel, verbose)
    
    pp.barrier()
    work(input_file, output_path,
             ['n','s','e','w','ne','nw','se','sw'])
    pp.barrier()
    
    pp.finalize()
コード例 #6
0
ファイル: parallel.py プロジェクト: nisarahmadkarimzada/eqrm
 def barrier(self):
     """
     Synchronisation point. Makes processors wait until all
            processors have reached this point.
     """
     if self.is_parallel is True:
         import pypar
         pypar.barrier()
コード例 #7
0
ファイル: parallel.py プロジェクト: dynaryu/eqrm
 def barrier(self):
     """
     Synchronisation point. Makes processors wait until all
            processors have reached this point.
     """
     if self.is_parallel is True:
         import pypar
         pypar.barrier()
コード例 #8
0
ファイル: mpi_nstat.py プロジェクト: qiuxing/corrperm
def abnormalexit(reason):
    """this tells each worker node to exit, then kills the server process.
       this should only be called by the server node"""
    print 'abnormal exit'
    print reason
    sendtoall(('Die', 0))
    pypar.barrier()
    pypar.finalize()
    sys.exit(2)
コード例 #9
0
def _mpi_end_embarrass():
    global _mpi_initialized
    if _mpi_initialized:
        import pypar
        print(pypar.rank() + 1, " of ", pypar.size(), ": BARRIER")
        pypar.barrier()
        print(pypar.rank() + 1, " of ", pypar.size(), ": FINALIZE")
        pypar.finalize()
        _mpi_initialized = False
    else:
        print("Non-MPI run : Exit without MPI_Finalize")
コード例 #10
0
ファイル: __init__.py プロジェクト: AtomAleks/PyProp
def SerialPrint(str, proc=-1):
	if ProcCount == 1:
		print str
	else:
		if proc==-1: procList = range(ProcCount)
		else: procList = [proc]
		for i in procList:
			if i == ProcId:
				print "Proc %4i: %s" % (ProcId, str,)
			sys.stdout.flush()	
			pypar.barrier()
コード例 #11
0
ファイル: tasks.py プロジェクト: AtomAleks/einelektron
def CreatePath(absFileName):
	"""Create directories in abspath
	
	"""
	logger = GetFunctionLogger()
	if pyprop.ProcId == 0:
		filePath = os.path.dirname(absFileName)
		if not os.path.exists(filePath) and len(filePath) > 0:
			logger.debug("Creating folder: %s" % filePath)
			os.makedirs(filePath)
	pypar.barrier()
コード例 #12
0
ファイル: tasks.py プロジェクト: AtomAleks/pyprop-helium
	def callback(self, prop):
		if self.StoreDuringPropagation:
			#create unique filename
			filename = "%s_%03i.h5" % (self.OutputFileName.strip(".h5"), self.Counter)
			
			#store current wavefunction and propagation time
			prop.SaveWavefunctionHDF(filename, "/wavefunction")
			if pyprop.ProcId == 0:
				with tables.openFile(filename, "r+", MAX_THREADS=1) as h5:
					h5.setNodeAttr("/wavefunction", "prop_time", prop.PropagatedTime)
			pypar.barrier()

			self.Counter += 1
コード例 #13
0
def SerialPrint(str, proc=-1):
    if ProcCount == 1:
        print str
    else:
        if proc == -1: procList = range(ProcCount)
        else: procList = [proc]
        for i in procList:
            if i == ProcId:
                print "Proc %4i: %s" % (
                    ProcId,
                    str,
                )
            sys.stdout.flush()
            pypar.barrier()
コード例 #14
0
def TestFindEigenvalues():
	#Custom matvec product
	def matvec(psi, tmpPsi, t, dt):
		tmpPsi.Clear()
		potMatrix.Multiply(psi, tmpPsi)
		tmpPsi.GetRepresentation().SolveOverlap(tmpPsi)

	pyprop.PrintOut("")
	pyprop.PrintOut("Now testing eigenvalue computation...")

	#Setup problem
	Print("  Setting up problem...", [0])
	prop = SetupProblem(config='config_eigenvalues.ini', silent = True)
	psi = prop.psi
	pyprop.Redirect.Enable(silent=True)

	#Setup Epetra potential and copy Tensor potential data into it
	Print("  Converting tensor potential to Epetra matrix...", [0])
	potMatrix = EpetraPotential_3()
	potMatrix.Setup(psi)
	for pot in prop.Propagator.BasePropagator.PotentialList:
		localBasisPairs = pot.BasisPairs
		potMatrix.AddTensorPotentialData(pot.PotentialData, localBasisPairs, 0)
		del pot.PotentialData
	potMatrix.GlobalAssemble()

	#Setup PIRAM
	Print("  Setting up Piram...", [0])
	prop.Config.Arpack.matrix_vector_func = matvec
	solver = pyprop.PiramSolver(prop)

	#Find eigenvalues
	Print("  Calculating eigenvalues...", [0])
	solver.Solve()
	eigs = solver.GetEigenvalues()
	Print("    Eigenvalues = %s" % str(eigs), [0])

	#Test first eigenvalue
	prop.psi.Clear()
	tmpPsi = prop.psi.Copy()
	solver.SetEigenvector(prop.psi, 0)
	prop.psi.Normalize()
	matvec(prop.psi, tmpPsi, 0, 0)
	eigRes = abs(prop.psi.InnerProduct(tmpPsi) - solver.GetEigenvalues()[0])
	Print("    ||H * v - lambda * v|| = %s" % eigRes,[0])

	#finish and cleanup
	pypar.barrier()
	pyprop.Redirect.Disable()
	pyprop.PrintOut("\n...done!")
コード例 #15
0
ファイル: dkmeans.py プロジェクト: kaloyan13/vise2
def compute_subsample(pnt_fn, feat_fn, N):
    import file_utils
    if rank == 0:
        print 'Computing subsample:'

        file_utils.remove_if_newer(pnt_fn, [feat_fn])
        file_utils.remove_if_corrupt_points(pnt_fn)

        if not os.path.exists(pnt_fn):
            subsample_db5(feat_fn, pnt_fn, N)
        else:
            print '... already exists ...'

    mpi.barrier()
コード例 #16
0
def TestFindEigenvalues():
    #Custom matvec product
    def matvec(psi, tmpPsi, t, dt):
        tmpPsi.Clear()
        potMatrix.Multiply(psi, tmpPsi)
        tmpPsi.GetRepresentation().SolveOverlap(tmpPsi)

    pyprop.PrintOut("")
    pyprop.PrintOut("Now testing eigenvalue computation...")

    #Setup problem
    Print("  Setting up problem...", [0])
    prop = SetupProblem(config='config_eigenvalues.ini', silent=True)
    psi = prop.psi
    pyprop.Redirect.Enable(silent=True)

    #Setup Epetra potential and copy Tensor potential data into it
    Print("  Converting tensor potential to Epetra matrix...", [0])
    potMatrix = EpetraPotential_3()
    potMatrix.Setup(psi)
    for pot in prop.Propagator.BasePropagator.PotentialList:
        localBasisPairs = pot.BasisPairs
        potMatrix.AddTensorPotentialData(pot.PotentialData, localBasisPairs, 0)
        del pot.PotentialData
    potMatrix.GlobalAssemble()

    #Setup PIRAM
    Print("  Setting up Piram...", [0])
    prop.Config.Arpack.matrix_vector_func = matvec
    solver = pyprop.PiramSolver(prop)

    #Find eigenvalues
    Print("  Calculating eigenvalues...", [0])
    solver.Solve()
    eigs = solver.GetEigenvalues()
    Print("    Eigenvalues = %s" % str(eigs), [0])

    #Test first eigenvalue
    prop.psi.Clear()
    tmpPsi = prop.psi.Copy()
    solver.SetEigenvector(prop.psi, 0)
    prop.psi.Normalize()
    matvec(prop.psi, tmpPsi, 0, 0)
    eigRes = abs(prop.psi.InnerProduct(tmpPsi) - solver.GetEigenvalues()[0])
    Print("    ||H * v - lambda * v|| = %s" % eigRes, [0])

    #finish and cleanup
    pypar.barrier()
    pyprop.Redirect.Disable()
    pyprop.PrintOut("\n...done!")
コード例 #17
0
def TestMultiplyOverlap():
    pyprop.PrintOut("Now testing S * psi...")
    pyprop.Redirect.Enable(silent=True)

    fileName = "test_multiplyoverlap.h5"
    seed(0)

    conf = pyprop.Load("config-test.ini")
    psi = pyprop.CreateWavefunction(conf)
    initPsi = pyprop.CreateWavefunction(conf)

    if pyprop.ProcCount == 1:
        psi.GetData()[:] = random(psi.GetData().shape)
        print "Normalizing wavefunction..."
        psi.Normalize()
        initPsi.GetData()[:] = psi.GetData()[:]
        pyprop.serialization.SaveWavefunctionHDF(fileName, "/wavefunction",
                                                 psi, conf)
        psi.GetRepresentation().MultiplyOverlap(psi)
        pyprop.serialization.SaveWavefunctionHDF(fileName,
                                                 "/wavefunctionoverlap", psi,
                                                 conf)
    else:
        pyprop.serialization.LoadWavefunctionHDF(fileName, "/wavefunction",
                                                 initPsi)
        pyprop.serialization.LoadWavefunctionHDF(fileName,
                                                 "/wavefunctionoverlap", psi)

    destPsi = initPsi.Copy()
    destPsi.Clear()
    tmpPsi = initPsi.Copy()
    tmpPsi.Clear()

    destPsi.GetData()[:] = initPsi.GetData()[:]
    destPsi.GetRepresentation().MultiplyOverlap(destPsi)

    Print()
    Print("  Proc %s: ||S * psi - S'*psi||_max = %s" %
          (pyprop.ProcId,
           numpy.max(numpy.max(psi.GetData() - destPsi.GetData()))))
    Print("  Proc %s: ||S * psi - S'*psi|| = %s" %
          (pyprop.ProcId, linalg.norm(psi.GetData() - destPsi.GetData())))

    #finish and cleanup
    pypar.barrier()
    pyprop.Redirect.Disable()
    pyprop.PrintOut("\n...done!")
コード例 #18
0
def TestEpetraMatvecSpeed():
    numMatVecs = 500

    Print("")
    Print("Now testing Epetra matvec speed...")
    pyprop.Redirect.Enable(silent=True)

    #Test
    conf = pyprop.Load("config_propagation.ini")
    psi = pyprop.CreateWavefunction(conf)
    Print("  Size of wavefunction is: %s" % repr(psi.GetData().shape))

    #Setup problem
    Print("  Setting up propagator w/potentials...")
    prop = SetupProblem(config='config_propagation.ini')
    psi = prop.psi
    tmpPsi = psi.Copy()
    tmpPsi.Clear()

    Print("  Local size of wavefunction is: %s" %
          str(prop.psi.GetData().shape))
    Print("  Global size of wavefunction is: %s" %
          str(prop.psi.GetRepresentation().GetFullShape()))

    #Get Epetra potential
    #pot = prop.Propagator.BasePropagator.PotentialList[1]
    Print("  Number of potentials: %s" %
          len(prop.Propagator.BasePropagator.PotentialList))

    #Calculate S^-1 * psi
    Print("  Performing %i matvecs..." % numMatVecs)

    def matvecs():
        for i in range(numMatVecs):
            #pot.MultiplyPotential(psi, tmpPsi, 0, 0)
            prop.Propagator.BasePropagator.MultiplyHamiltonianNoOverlap(
                psi, tmpPsi, 0, 0)
            #tmpPsi.GetRepresentation().SolveOverlap(tmpPsi)

    timeIt(matvecs)

    #finish and cleanup
    pypar.barrier()
    pyprop.Redirect.Disable()
    pyprop.PrintOut("\n...done!")
コード例 #19
0
def TestInnerProduct():
    pyprop.PrintOut("")
    pyprop.PrintOut("Now testing innerproduct...")
    pyprop.Redirect.Enable(silent=True)

    seed(0)

    fileName = "test_innerproduct.h5"

    conf = pyprop.Load("config-test.ini")
    psi = pyprop.CreateWavefunction(conf)
    tmpPsi = pyprop.CreateWavefunction(conf)

    if pyprop.ProcCount == 1:
        psi.GetData()[:] = random(psi.GetData().shape)
        psi.Normalize()

        tmpPsi.GetData()[:] = random(psi.GetData().shape)
        tmpPsi.Normalize()

        pyprop.serialization.SaveWavefunctionHDF(fileName, "/wavefunction1",
                                                 psi, conf)
        pyprop.serialization.SaveWavefunctionHDF(fileName, "/wavefunction2",
                                                 tmpPsi, conf)
    else:
        pyprop.serialization.LoadWavefunctionHDF(fileName, "/wavefunction1",
                                                 psi)
        pyprop.serialization.LoadWavefunctionHDF(fileName, "/wavefunction2",
                                                 tmpPsi)

    inner1 = psi.InnerProduct(tmpPsi)
    inner2 = psi.InnerProduct(psi)

    Print()
    Print("<psi|tmpPsi> = %s" % inner1, range(pyprop.ProcCount))
    Print("<psi|psi> = %s" % inner2, range(pyprop.ProcCount))

    #finish and cleanup
    pypar.barrier()
    pyprop.Redirect.Disable()
    pyprop.PrintOut("\n...done!")
コード例 #20
0
def TestMultiplyOverlap():
	pyprop.PrintOut("Now testing S * psi...")
	pyprop.Redirect.Enable(silent=True)

	fileName  = "test_multiplyoverlap.h5"
	seed(0)

	conf = pyprop.Load("config-test.ini")
	psi = pyprop.CreateWavefunction(conf)
	initPsi = pyprop.CreateWavefunction(conf)


	if pyprop.ProcCount == 1:
		psi.GetData()[:] = random(psi.GetData().shape)
		print "Normalizing wavefunction..."
		psi.Normalize()
		initPsi.GetData()[:] = psi.GetData()[:]
		pyprop.serialization.SaveWavefunctionHDF(fileName, "/wavefunction", psi, conf)
		psi.GetRepresentation().MultiplyOverlap(psi)
		pyprop.serialization.SaveWavefunctionHDF(fileName, "/wavefunctionoverlap", psi, conf)
	else:
		pyprop.serialization.LoadWavefunctionHDF(fileName, "/wavefunction", initPsi)
		pyprop.serialization.LoadWavefunctionHDF(fileName, "/wavefunctionoverlap", psi)

	destPsi = initPsi.Copy()
	destPsi.Clear()
	tmpPsi = initPsi.Copy()
	tmpPsi.Clear()

	destPsi.GetData()[:] = initPsi.GetData()[:]
	destPsi.GetRepresentation().MultiplyOverlap(destPsi)

	Print()
	Print("  Proc %s: ||S * psi - S'*psi||_max = %s" % (pyprop.ProcId, numpy.max(numpy.max(psi.GetData() - destPsi.GetData()))))
	Print("  Proc %s: ||S * psi - S'*psi|| = %s" % (pyprop.ProcId, linalg.norm(psi.GetData() - destPsi.GetData())))

	#finish and cleanup
	pypar.barrier()
	pyprop.Redirect.Disable()
	pyprop.PrintOut("\n...done!")
コード例 #21
0
def test_lock(Nmpi,fields,pbc_opt=None):
    if myrank == 0:
	print 'PBC : %s, start' % pbc_opt
    mpi.barrier()
    for i in xrange(len(fields)):
	fields[i][:,:,:6] = 1.
	fields[i][:,:,6:] = 0.
	#print 'I`m', myrank,'Field %s Direction x1 sum before = '%i,fields[i][:,:,6].sum()
	#print 'I`m', myrank,'Field %s Direction x2 sum before = '%i,fields[i][:,:,7].sum()
	#print 'I`m', myrank,'Field %s Direction y1 sum before = '%i,fields[i][:,:,8].sum()
	#print 'I`m', myrank,'Field %s Direction y2 sum before = '%i,fields[i][:,:,9].sum()
	#print 'I`m', myrank,'Field %s Direction z1 sum before = '%i,fields[i][:,:,10].sum()
	#print 'I`m', myrank,'Field %s Direction z2 sum before = '%i,fields[i][:,:,11].sum()
    mpi.barrier()

    if myrank != 0:
	targets = MPI.calc_mpitarget(Nmpi, myrank)
	targets_pbc = MPI.calc_mpitarget_pbc(Nmpi, myrank, pbc_opt)
	message_range = MPI.test_making_message_range()
	MPI.test_mpi_exchange(fields, Nmpi, myrank, targets, message_range)
	MPI.test_mpi_exchange_pbc(fields, myrank,targets_pbc, message_range, pbc_opt)

	for i in xrange(len(fields)):
	    print 'I`m', myrank,'Field %s Direction x1 sum after = '%i,fields[i][:,:,6].sum()
	    print 'I`m', myrank,'Field %s Direction x2 sum after = '%i,fields[i][:,:,7].sum()
	    print 'I`m', myrank,'Field %s Direction y1 sum after = '%i,fields[i][:,:,8].sum()
	    print 'I`m', myrank,'Field %s Direction y2 sum after = '%i,fields[i][:,:,9].sum()
	    print 'I`m', myrank,'Field %s Direction z1 sum after = '%i,fields[i][:,:,10].sum()
	    print 'I`m', myrank,'Field %s Direction z2 sum after = '%i,fields[i][:,:,11].sum()
    mpi.barrier()
    if myrank == 0:
	print 'PBC : %s, Done' % pbc_opt
	print
	print
	print
コード例 #22
0
def TestEpetraMatvecSpeed():
	numMatVecs = 500

	Print("")
	Print("Now testing Epetra matvec speed...")
	pyprop.Redirect.Enable(silent=True)

	#Test
	conf = pyprop.Load("config_propagation.ini")
	psi = pyprop.CreateWavefunction(conf)
	Print("  Size of wavefunction is: %s" % repr(psi.GetData().shape)) 

	#Setup problem
	Print("  Setting up propagator w/potentials...")
	prop = SetupProblem(config='config_propagation.ini')
	psi = prop.psi
	tmpPsi = psi.Copy()
	tmpPsi.Clear()

	Print("  Local size of wavefunction is: %s" % str(prop.psi.GetData().shape)) 
	Print("  Global size of wavefunction is: %s" % str(prop.psi.GetRepresentation().GetFullShape())) 

	#Get Epetra potential
	#pot = prop.Propagator.BasePropagator.PotentialList[1]
	Print("  Number of potentials: %s" % len(prop.Propagator.BasePropagator.PotentialList))

	#Calculate S^-1 * psi
	Print("  Performing %i matvecs..." % numMatVecs)
	def matvecs():
		for i in range(numMatVecs):
			#pot.MultiplyPotential(psi, tmpPsi, 0, 0)
			prop.Propagator.BasePropagator.MultiplyHamiltonianNoOverlap(psi, tmpPsi, 0, 0)
			#tmpPsi.GetRepresentation().SolveOverlap(tmpPsi)

	timeIt(matvecs)
	
	#finish and cleanup
	pypar.barrier()
	pyprop.Redirect.Disable()
	pyprop.PrintOut("\n...done!")
コード例 #23
0
def TestInnerProduct():
	pyprop.PrintOut("")
	pyprop.PrintOut("Now testing innerproduct...")
	pyprop.Redirect.Enable(silent=True)

	seed(0)

	fileName  = "test_innerproduct.h5"

	conf = pyprop.Load("config-test.ini")
	psi = pyprop.CreateWavefunction(conf)
	tmpPsi = pyprop.CreateWavefunction(conf)

	if pyprop.ProcCount == 1:
		psi.GetData()[:] = random(psi.GetData().shape)
		psi.Normalize()

		tmpPsi.GetData()[:] = random(psi.GetData().shape)
		tmpPsi.Normalize()

		pyprop.serialization.SaveWavefunctionHDF(fileName, "/wavefunction1", psi, conf)
		pyprop.serialization.SaveWavefunctionHDF(fileName, "/wavefunction2", tmpPsi, conf)
	else:
		pyprop.serialization.LoadWavefunctionHDF(fileName, "/wavefunction1", psi)
		pyprop.serialization.LoadWavefunctionHDF(fileName, "/wavefunction2", tmpPsi)

	
	inner1 = psi.InnerProduct(tmpPsi)
	inner2 = psi.InnerProduct(psi)

	Print()
	Print("<psi|tmpPsi> = %s" % inner1, range(pyprop.ProcCount))
	Print("<psi|psi> = %s" % inner2, range(pyprop.ProcCount))

	#finish and cleanup
	pypar.barrier()
	pyprop.Redirect.Disable()
	pyprop.PrintOut("\n...done!")
コード例 #24
0
def compute_clusters(clst_fn, pnt_fn, nclusters, niters=30, ntrees=8, 
                     nchecks=512, seed=42, iters_to_output=[], pnts_step=50000,
                     approx=True,
                     featureWrapper= featureNoWrapper):
  import file_utils

  if rank==0:
    print 'Computing clusters:'
    sys.stdout.flush()
    file_utils.remove_if_newer(clst_fn, [pnt_fn])
    file_utils.remove_if_corrupt_clusters(clst_fn)
  
  mpi.barrier()
  
  if not os.path.exists(clst_fn):
    if approx:
      nn_class = lambda y: nn.nn_approx(y, ntrees, nchecks, seed)
    else:
      nn_class = nn.nn
    dkmeans3(pnt_fn, nclusters, niters, clst_fn, nn_class=nn_class, seed=seed, iters_to_output=iters_to_output, pnts_step=pnts_step, featureWrapper= featureWrapper)
  else:
    if rank==0:
      print '... already exists ...'
コード例 #25
0
def main():

    # Ensure all Processors are ready
    pypar.barrier()
    print "Processor %d is ready" % (myid)

    # Connect to MySQL db
    db = MySQLdb.connect(host="localhost",
                         user="******",
                         passwd="samsung",
                         db="sat")
    cur = db.cursor()

    # Option parser from wrapper script
    parser = optparse.OptionParser()
    # PDB
    parser.add_option("-p",
                      "--pdb",
                      help="Choose all or a pdb id",
                      dest="pdb",
                      default="all")
    # PDB directory
    parser.add_option("-d", "--dir", help="i", dest="i", default="all")

    parser.add_option("-m",
                      "--mutationList",
                      help="Location of mutation list file",
                      dest="m",
                      default="ALA")

    (opts, args) = parser.parse_args()

    # Run calculations
    do_run(opts.pdb, opts.i, cur, db, opts.m)

    # Finalize and exit
    pypar.finalize()
コード例 #26
0
def main():
    
    # Ensure all Processors are ready
    pypar.barrier()
    print "Processor %d is ready" % (myid)
    
    # Connect to MySQL db
    db = MySQLdb.connect(host="localhost", 
                         user = "******", 
                         passwd = "samsung", 
                         db = "sat")
    cur = db.cursor()


    # Option parser from wrapper script
    parser = optparse.OptionParser()
    # PDB
    parser.add_option("-p", "--pdb", 
                      help="Choose all or a pdb id", 
                      dest="pdb", default ="all")
    # PDB directory
    parser.add_option("-d", "--dir", 
                      help="i", 
                      dest="i", default ="all")

    parser.add_option("-m", "--mutationList", 
                      help="Location of mutation list file", 
                      dest="m", default="ALA")
    
    (opts, args) = parser.parse_args()
    
    # Run calculations
    do_run(opts.pdb, opts.i, cur, db, opts.m)

    # Finalize and exit
    pypar.finalize()
コード例 #27
0
ファイル: dkmeans.py プロジェクト: kaloyan13/vise2
def dkmeans3(pnts_fn,
             nk,
             niters,
             clst_fn,
             nn_class=nn.nn,
             seed=42,
             pnts_step=50000,
             iters_to_output=[],
             root_rank=0,
             checkpoint=True,
             featureWrapper=featureNoWrapper):
    """
  Distributed k-means.
  """

    if featureWrapper == None:
        featureWrapper = featureNoWrapper
    elif featureWrapper == 'hell':
        featureWrapper = toHellinger

    npr.seed(seed)

    pnts = pointsObj(pnts_fn)
    npnts = pnts.shape[0]
    ndims = pnts.shape[1]

    if rank == root_rank:
        print 'Using a (%d x %d) %s array for the datapoints' % (npnts, ndims,
                                                                 pnts.dtype)

    if rank == root_rank and ndims > npnts:
        raise RuntimeError, 'dodgy matrix format -- number of dimensions is greater than the number of points!'

    # Find preferred dtype
    if pnts.dtype == 'float64': pref_dtype = 'float64'
    else: pref_dtype = 'float32'

    start_iter = np.zeros((1, ), dtype='int')
    distortion = np.zeros((1, ))
    clst_data = np.empty((nk, ndims), dtype=pref_dtype)
    if rank == root_rank:
        print 'Using a (%d x %d) %s array for the clusters' % (
            clst_data.shape[0], clst_data.shape[1], clst_data.dtype)
        checkpoint_fn = clst_fn + '.checkpoint'
        if os.path.exists(checkpoint_fn):
            start_iter[0], clst_data, distortion[0] = dkmeans3_read_clusters(
                checkpoint_fn)
            print 'Restarting from checkpoint. Start iteration = %d' % start_iter
        else:
            clst_inds = np.arange(npnts)
            npr.shuffle(clst_inds)
            clst_inds = clst_inds[:nk]
            clst_inds.sort()
            for i, ind in enumerate(clst_inds):
                clst_data[i] = featureWrapper(pnts[ind])

            if 0 in iters_to_output:
                dkmeans3_save_clusters(clst_fn + '.000', clst_data, 0, niters,
                                       pnts.shape, seed, 0.0)

    mpi.broadcast(start_iter, root_rank)

    # Start iterations
    for iter_num in range(start_iter[0], niters):
        t1 = time.time()

        mpi.broadcast(clst_data,
                      root_rank)  # Broadcast the cluster centers to all nodes.

        nn_functor = nn_class(clst_data)  # Build the NN functor

        clst_sums = np.zeros((nk, ndims), dtype=pref_dtype)
        # NOTE: The accumulator here is floating point to avoid a cast when used with numpy.
        clst_sums_n = np.zeros(
            nk, dtype=pref_dtype
        )  # Be careful here -- float32 has 24bits of integer precision.
        distortion = np.zeros((1, ))

        # Let's do nearest neighbours
        stack = []
        if rank == root_rank:
            for l in range(0, npnts, pnts_step):
                r = min(l + pnts_step, npnts)
                stack.append((l, r))
            stack.reverse()

        mpi_queue.mpi_queue(stack,
                            dkmeans3_worker_func(
                                pnts,
                                nn_functor,
                                clst_sums,
                                clst_sums_n,
                                distortion,
                                pref_dtype,
                                featureWrapper=featureWrapper),
                            dkmeans3_result_func,
                            queue_rank=root_rank)

        mpi.inplace_reduce(clst_sums, mpi.SUM, root_rank)
        mpi.inplace_reduce(clst_sums_n, mpi.SUM, root_rank)
        mpi.inplace_reduce(distortion, mpi.SUM, root_rank)

        if rank == root_rank:
            # Check for clusters with no assignments.
            noassign_inds = np.where(clst_sums_n == 0)[0]
            if len(noassign_inds):
                warnings.warn(
                    'iter %d: %d clusters have zero points assigned to them - using random points'
                    % (iter_num, len(noassign_inds)))
                clst_sums_n[noassign_inds] = 1
                for ind in noassign_inds:
                    clst_sums[ind] = featureWrapper(pnts[npr.randint(
                        0, pnts.shape[0])])

            clst_sums /= clst_sums_n.reshape(-1, 1)
            clst_data = clst_sums

            t2 = time.time()

            #print 'Iteration %d, sse = %g, mem = %.2fMB, took %.2fs' % (iter_num+1, distortion[0], resident()/2**20, t2-t1)
            print 'relja_retrival,dkmeans::cluster,%s,%d,%d,%g' % (str(
                datetime.datetime.now()), iter_num + 1, niters, distortion[0])

            # Potentially save the clusters.
            if checkpoint:
                dkmeans3_save_clusters(checkpoint_fn, clst_data, iter_num + 1,
                                       niters, pnts.shape, seed, distortion[0])
            if (iter_num + 1) in iters_to_output:
                dkmeans3_save_clusters(clst_fn + '.%03d' % (iter_num + 1),
                                       clst_data, iter_num + 1, niters,
                                       pnts.shape, seed, distortion[0])

        del clst_sums
        del clst_sums_n

    if rank == root_rank:
        dkmeans3_save_clusters(clst_fn, clst_data, niters, niters, pnts.shape,
                               seed, distortion[0])
        if checkpoint:
            try:
                os.remove(checkpoint_fn
                          )  # Remove the checkpoint file once we've got here.
            except OSError:
                pass

    del clst_data
    #del clst_sums
    #del clst_sums_n

    mpi.barrier()  # Is this needed?
コード例 #28
0
def run():
    """
    Run the wind multiplier calculations.

    This will attempt to run the calculation in parallel by tiling the
    domain, but also provides a sane fallback mechanism to execute
    in serial.

    """

    # add subfolders into path
    cmd_folder = os.path.realpath(
        os.path.abspath(
            os.path.split(
                inspect.getfile(
                    inspect.currentframe()))[0]))
    if cmd_folder not in sys.path:
        sys.path.insert(0, cmd_folder)

    cmd_subfolder1 = pjoin(cmd_folder, "terrain")
    if cmd_subfolder1 not in sys.path:
        sys.path.insert(0, cmd_subfolder1)

    cmd_subfolder2 = pjoin(cmd_folder, "shielding")
    if cmd_subfolder2 not in sys.path:
        sys.path.insert(0, cmd_subfolder2)

    cmd_subfolder3 = pjoin(cmd_folder, "topographic")
    if cmd_subfolder3 not in sys.path:
        sys.path.insert(0, cmd_subfolder2)

    cmd_subfolder4 = pjoin(cmd_folder, "utilities")
    if cmd_subfolder4 not in sys.path:
        sys.path.insert(0, cmd_subfolder2)

    config = ConfigParser.RawConfigParser()
    config.read(pjoin(cmd_folder, 'multiplier_conf.cfg'))

    root = config.get('inputValues', 'root')
    upwind_length = float(config.get('inputValues', 'upwind_length'))

    logfile = config.get('Logging', 'LogFile')
    logdir = dirname(realpath(logfile)) 

    # If log file directory does not exist, create it 
    if not isdir(logdir):
        try: 
            os.makedirs(logdir)
        except OSError: 
            logfile = pjoin(os.getcwd(), 'multipliers.log') 
   
    loglevel = config.get('Logging', 'LogLevel')
    verbose = config.getboolean('Logging', 'Verbose')

    attempt_parallel()

    if pp.size() > 1 and pp.rank() > 0:
        logfile += '_' + str(pp.rank())
        verbose = False
    else:
        pass

    fl_start_log(logfile, loglevel, verbose)

    # set input maps and output folder
    terrain_map = pjoin(pjoin(root, 'input'), "lc_terrain_class.img")
    dem = pjoin(pjoin(root, 'input'), "dems1_whole.img")
    cyclone_area = pjoin(pjoin(root, 'input'), "cyclone_dem_extent.img")

    do_output_directory_creation(root)
    global output_folder
    output_folder = pjoin(root, 'output')

    log.info("get the tiles")
    tg = TileGrid(upwind_length, terrain_map)
    tiles = get_tiles(tg)
    log.info('the number of tiles is {0}'.format(str(len(tiles))))

    pp.barrier()

    multiplier = Multipliers(terrain_map, dem, cyclone_area)
    multiplier.parallelise_on_tiles(tiles)

    pp.barrier()

    log.info("Successfully completed wind multipliers calculation")
コード例 #29
0
def TestSolveOverlap():
	pyprop.PrintOut("")
	pyprop.PrintOut("Now testing S^-1 * psi...")
	pyprop.Redirect.Enable(silent=True)

	seed(0)

	fileName  = "test_solveoverlap.h5"

	conf = pyprop.Load("config-test.ini")
	psi = pyprop.CreateWavefunction(conf)
	initPsi = pyprop.CreateWavefunction(conf)

	if pyprop.ProcCount == 1:
		psi.GetData()[:] = random(psi.GetData().shape)
		psi.Normalize()
		initPsi.GetData()[:] = psi.GetData()[:]

		#Store initial (random) psi
		pyprop.serialization.SaveWavefunctionHDF(fileName, "/wavefunction", psi, conf)

		#Store S^-1 * psi
		psi.GetRepresentation().SolveOverlap(psi)
		pyprop.serialization.SaveWavefunctionHDF(fileName, "/wavefunctionoverlap", psi, conf)

		#determine overlap matrix condition number
		overlap = psi.GetRepresentation().GetGlobalOverlapMatrix(0)
		A = overlap.GetOverlapBlasBanded()
		B = pyprop.core.ConvertMatrixBlasBandedToFull(A)
		Print("  Overlap matrix condition number = %e" % cond(B))
		
	else:
		pyprop.serialization.LoadWavefunctionHDF(fileName, "/wavefunction", initPsi)
		pyprop.serialization.LoadWavefunctionHDF(fileName, "/wavefunctionoverlap", psi)

	destPsi = initPsi.Copy()
	destPsi.Clear()
	tmpPsi = initPsi.Copy()
	tmpPsi.Clear()

	#Calculate S^-1 * psi
	destPsi.GetData()[:] = initPsi.GetData()[:]
	destPsi.GetRepresentation().SolveOverlap(destPsi)
	tmpPsi.GetData()[:] = destPsi.GetData()[:]

	#Calculate S * S^-1 * psi
	destPsi.GetRepresentation().MultiplyOverlap(destPsi)
	
	Print()
	a = numpy.max(numpy.max(psi.GetData() - tmpPsi.GetData()))
	Print("  Proc %s: ||S^-1 * psi - S'^-1 * psi||_max = %s" % (pyprop.ProcId, a), range(pyprop.ProcCount))
	Print()
	b = numpy.max(numpy.max(initPsi.GetData() - destPsi.GetData()))
	Print("  Proc %s: ||S * S^-1 * psi - I * psi||_max = %s" % (pyprop.ProcId, b), range(pyprop.ProcCount))
	
	c = linalg.norm(initPsi.GetData() - destPsi.GetData())
	Print("  Proc %s: ||S * S^-1 * psi - I * psi|| = %s" % (pyprop.ProcId, c), range(pyprop.ProcCount))

	#finish and cleanup
	pypar.barrier()
	pyprop.Redirect.Disable()
	pyprop.PrintOut("\n...done!")
コード例 #30
0
def run():
    """
    Run the wind multiplier calculations.

    This will attempt to run the calculation in parallel by tiling the
    domain, but also provides a sane fallback mechanism to execute
    in serial.

    """

    # add subfolders into path
    cmd_folder = os.path.realpath(
        os.path.abspath(
            os.path.split(inspect.getfile(inspect.currentframe()))[0]))
    if cmd_folder not in sys.path:
        sys.path.insert(0, cmd_folder)

    cmd_subfolder1 = pjoin(cmd_folder, "terrain")
    if cmd_subfolder1 not in sys.path:
        sys.path.insert(0, cmd_subfolder1)

    cmd_subfolder2 = pjoin(cmd_folder, "shielding")
    if cmd_subfolder2 not in sys.path:
        sys.path.insert(0, cmd_subfolder2)

    cmd_subfolder3 = pjoin(cmd_folder, "topographic")
    if cmd_subfolder3 not in sys.path:
        sys.path.insert(0, cmd_subfolder3)

    cmd_subfolder4 = pjoin(cmd_folder, "utilities")
    if cmd_subfolder4 not in sys.path:
        sys.path.insert(0, cmd_subfolder4)

    config = ConfigParser.RawConfigParser()
    config.read(pjoin(cmd_folder, 'multiplier_conf.cfg'))

    root = config.get('inputValues', 'root')
    upwind_length = float(config.get('inputValues', 'upwind_length'))

    logfile = config.get('Logging', 'LogFile')
    logdir = dirname(realpath(logfile))

    # If log file directory does not exist, create it
    if not isdir(logdir):
        try:
            os.makedirs(logdir)
        except OSError:
            logfile = pjoin(os.getcwd(), 'multipliers.log')

    loglevel = config.get('Logging', 'LogLevel')
    verbose = config.getboolean('Logging', 'Verbose')

    if verbose:
        verbose = True
    else:
        verbose = False

    attempt_parallel()

    if pp.size() > 1 and pp.rank() > 0:
        logfile += '_' + str(pp.rank())
        verbose = False
    else:
        pass

    fl_start_log(logfile, loglevel, verbose)

    # set input maps and output folder
    terrain_map = pjoin(pjoin(root, 'input'), "lc_terrain_class.img")
    dem = pjoin(pjoin(root, 'input'), "dems1_whole.img")

    do_output_directory_creation(root)
    global output_folder
    output_folder = pjoin(root, 'output')

    log.info("get the tiles based on the DEM")
    tg = TileGrid(upwind_length, dem)
    tiles = get_tiles(tg)
    log.info('the number of tiles is {0}'.format(str(len(tiles))))

    pp.barrier()

    multiplier = Multipliers(terrain_map, dem)
    multiplier.parallelise_on_tiles(tiles)

    pp.barrier()

    log.info("Successfully completed wind multipliers calculation")
コード例 #31
0
ファイル: mpi.py プロジェクト: lelou6666/PySOL
		print p.rank(), res

	if True:		
		v = [ 2 for i in xrange(10000000) ]
		res = p_dot_all(v,v)
		#import time
		#time.sleep(p.rank()*2+1)
		print p.rank(), res

	if False:
		s = 0
		for i in xrange(100):
			r = p.rank()
			r = broadcast(r)
			s += (r + 1)
			p.barrier()
		print "%d %d" % ( p.rank(), s )

	if False:
		m = None
		v = None
		if root():
			m = eye_matrix(3000)
			v = range(3000)
		r = p_mv(m,v)
		if root():
			print r

	if root():
		end = p.time()
		total = end - start
コード例 #32
0
ファイル: interface.py プロジェクト: GeoscienceAustralia/PF3D
def run_multiple_windfields(scenario,
                            windfield_directory=None,
                            hazard_output_folder=None,
                            dircomment=None,
                            echo=False,
                            verbose=True):
    """Run volcanic ash impact model for multiple wind fields.

    The wind fields are assumed to be in subfolder specified by windfield_directory,
    have the extension *.profile and follow the format use with scenarios.

    This function makes use of Open MPI and Pypar to execute in parallel but can also run sequentially.
    """

    try:
        import pypar
    except:
        P = 1
        p = 0
        processor_name = os.uname()[1]

        print 'Pypar could not be imported. Running sequentially on node %s' % processor_name,
    else:
        time.sleep(1)
        P = pypar.size()
        p = pypar.rank()
        processor_name = pypar.get_processor_name()

        print 'Processor %d initialised on node %s' % (p, processor_name)

        pypar.barrier()


    if p == 0:

        # Put logs along with the results
        logdir = os.path.join(hazard_output_folder, 'logs')
        makedir(logdir)

        header('Hazard modelling using multiple wind fields')
        print '*  Wind profiles obtained from: %s' % windfield_directory
        print '*  Scenario results stored in:  %s' %  hazard_output_folder
        print '*  Log files:'

        t_start = time.time()

        # Communicate hazard output directory name to all nodes to ensure they have exactly the same time stamp.
        for i in range(P):
            pypar.send((hazard_output_folder), i)
    else:
        # Receive correctly timestamped output directory names
        hazard_output_folder = pypar.receive(0)
        logdir = os.path.join(hazard_output_folder, 'logs')


    try:
        name = os.path.splitext(scenario)[0]
    except:
        name = 'run'


    # Wait until log dir has been created
    pypar.barrier()

    params = get_scenario_parameters(scenario)

    # Start processes staggered to avoid race conditions for disk access (otherwise it is slow to get started)
    time.sleep(2*p)

    # Logging
    s = 'Proc %i' % p
    print '     %s -' % string.ljust(s, 8),
    AIM_logfile = os.path.join(logdir, 'P%i.log' % p)
    start_logging(filename=AIM_logfile, echo=False)

    # Get cracking
    basename, _ = os.path.splitext(scenario)
    count_local = 0
    count_all = 0
    for i, file in enumerate(os.listdir(windfield_directory)):

        count_all += 1

        # Distribute jobs cyclically to processors
        if i%P == p:

            if not file.endswith('.profile'):
                continue

            count_local += 1

            windfield = '%s/%s' % (windfield_directory, file)
            windname, _ = os.path.splitext(file)
            header('Computing event %i on processor %i using wind field: %s' % (i, p, windfield))



            if dircomment is None:
                dircomment = params['eruption_comment']

            # Override or create parameters derived from native Fall3d wind field
            params['wind_profile'] = windfield
            params['wind_altitudes'] = get_layers_from_windfield(windfield) # FIXME: Try to comment this out.
            params['Meteorological_model'] = 'profile'

            if hazard_output_folder is None:
                hazard_output_folder = basename + '_hazard_outputs'

            if p == 0:
                print 'Storing multiple outputs in directory: %s' % hazard_output_folder

            # Run scenario
            aim = _run_scenario(params,
                                timestamp_output=True,
                                dircomment=dircomment + '_run%i_proc%i' % (i, p))

            # Make sure folder is present and can be shared by group
            makedir(hazard_output_folder)
            s = 'chmod -R g+w %s' % hazard_output_folder
            run(s)

            # Copy result file to output folder
            result_file = aim.scenario_name + '.res.nc'
            newname = aim.scenario_name + '.%s.res.nc' % windname # Name after wind file
            s = 'cp %s/%s %s/%s' % (aim.output_dir, result_file, hazard_output_folder, newname)
            run(s)

            # Create projectionfile in hazard output
            if i == 0:
                s = 'cp %s %s/%s' % (aim.projection_file, hazard_output_folder, 'HazardMaps.res.prj')
                run(s)

            # Clean up outputs from this scenario
            print 'P%i: Cleaning up %s' % (p, aim.output_dir)
            s = '/bin/rm -rf %s' % aim.output_dir
            run(s)

    print 'Processor %i done %i windfields' % (p, count_local)
    print 'Outputs available in directory: %s' % hazard_output_folder

    pypar.barrier()
    if p == 0:
        print 'Parallel simulation finished %i windfields in %i seconds' % (count_all, time.time() - t_start)


    pypar.finalize()
コード例 #33
0
ファイル: eigenvalues.py プロジェクト: nepstad/pyprop-helium
def SaveEigenvalueSolverShiftInvert(solver, shiftInvertSolver):
    """
	Saves the output of FindEigenvaluesNearShift, including error estimates 
	to a hdf file.
	"""

    logger = GetFunctionLogger()

    conf = solver.BaseProblem.Config
    L = conf.AngularRepresentation.index_iterator.L
    assert len(L) == 1
    shift = conf.Arpack.shift

    # generate filename
    filename = NameGen.GetBoundstateFilename(conf, L[0])

    # Get eigenvalue error estimates
    errorEstimatesPIRAM = solver.Solver.GetErrorEstimates()
    convergenceEstimatesEig = solver.Solver.GetConvergenceEstimates()
    errorEstimatesGMRES = shiftInvertSolver.Solver.GetErrorEstimateList()

    # Get eigenvalues
    prop = solver.BaseProblem
    E = 1.0 / array(solver.GetEigenvalues()) + shift

    # remove file if it exists
    try:
        if os.path.exists(filename):
            if pyprop.ProcId == 0:
                os.remove(filename)
    except:
        logger.error("Could not remove %s (%s)" % (filename, sys.exc_info()[1]))

        # Store eigenvalues and eigenvectors
    logger.info("Now storing eigenvectors...")
    for i in range(len(E)):
        solver.SetEigenvector(prop.psi, i)
        prop.SaveWavefunctionHDF(filename, NameGen.GetEigenvectorDatasetPath(i))

    if pyprop.ProcId == 0:
        RemoveExistingDataset(filename, "/Eig/Eigenvalues")
        RemoveExistingDataset(filename, "/Eig/ErrorEstimateListGMRES")
        RemoveExistingDataset(filename, "/Eig/ErrorEstimateListPIRAM")
        RemoveExistingDataset(filename, "/Eig/ConvergenceEstimateEig")
        h5file = tables.openFile(filename, "r+")
        try:
            # myGroup = h5file.createGroup("/", "Eig")
            myGroup = h5file.getNode("/Eig")
            h5file.createArray(myGroup, "Eigenvalues", E)
            h5file.createArray(myGroup, "ErrorEstimateListGMRES", errorEstimatesGMRES)
            h5file.createArray(myGroup, "ErrorEstimateListPIRAM", errorEstimatesPIRAM)
            h5file.createArray(myGroup, "ConvergenceEstimateEig", convergenceEstimatesEig)

            # Store config
            myGroup._v_attrs.configObject = prop.Config.cfgObj

            # PIRAM stats
            myGroup._v_attrs.opCount = solver.Solver.GetOperatorCount()
            myGroup._v_attrs.restartCount = solver.Solver.GetRestartCount()
            myGroup._v_attrs.orthCount = solver.Solver.GetOrthogonalizationCount()
        except:
            logger.warning("Warning: could not store eigenvalues and error estimates!")
        finally:
            h5file.close()

    pypar.barrier()
コード例 #34
0
ファイル: mpi.py プロジェクト: whigg/PySOL
        print p.rank(), res

    if True:
        v = [2 for i in xrange(10000000)]
        res = p_dot_all(v, v)
        #import time
        #time.sleep(p.rank()*2+1)
        print p.rank(), res

    if False:
        s = 0
        for i in xrange(100):
            r = p.rank()
            r = broadcast(r)
            s += (r + 1)
            p.barrier()
        print "%d %d" % (p.rank(), s)

    if False:
        m = None
        v = None
        if root():
            m = eye_matrix(3000)
            v = range(3000)
        r = p_mv(m, v)
        if root():
            print r

    if root():
        end = p.time()
        total = end - start
コード例 #35
0
def save_eigenfunction_couplings(filename_el, nr_kept, xmin, xmax, xsize, order):
    """
    save_eigenfunction_couplings(filename_el, nr_kept, xmin, xmax, xsize, order)

    This program sets up the laser interaction hamiltonian for the 
    eigenfunction basis, and stores it in an HDF5 file. 
    This program must be run in parallel.
    
    Example
    -------
    To run this program on 5 processors:
    
    $ mpirun -n 5 python -c "execfile('vibrational_BO.py');save_eigenstates()"
    """
    
    #Retrieve the electronic energy curves.
    f = tables.openFile(filename_el)
    try:
	r_grid = f.root.R_grid[:]
	#Get number of tasks.
	el_basis_size = f.root.couplings.shape[0]
    finally:
	f.close()
    
    #Filter function, describing what index pairs should be included in the 
    #calculations.
    def no_filter(index_pair):
	"""
	All couplings included.
	"""
	return True
    
    def symmetry_filter(index_pair):
	"""
	Only include the upper/lower triangular, since the hermeticity means
	that they are the same.
	"""
	i = index_pair[0]
	j = index_pair[1]
	if i >= j:
	    return True
	else:
	    return False    
    
    #Make a list of the coupling indices that should be included.
    index_table = create_index_table(el_basis_size, no_filter)
    nr_tasks = len(index_table)

    #Initialize the B-spline_basis.
    spline_basis = vibrational_methods.Bspline_basis(xmin, xmax, xsize, order)
    vib_basis_size = spline_basis.nr_splines
    
    #Generate a filename.
    filename = name_gen.eigenfunction_couplings(filename_el, spline_basis)

    #Name of vib states.
    filename_vib = name_gen.vibrational_eigenstates(filename_el, spline_basis)
    
    #Parallel stuff
    #--------------
    #Get processor 'name'.
    my_id = pypar.rank() 
    
    #Get total number of processors.
    nr_procs = pypar.size()
    
    #Get a list of the indices of this processors share of R_grid. 
    my_tasks = nice_stuff.distribute_work(nr_procs, nr_tasks, my_id)

    #The processors will be writing to the same file.
    #In order to avoid problems, the procs will do a relay race of writing to
    #file. This is handeled by blocking send() and receive().
    #Hopefully there will not be to much waiting.

    #ID of the processor that will start writing.
    starter = 0

    #ID of the processor that will be the last to write.
    ender = (nr_tasks - 1) % nr_procs

    #Buffer for the baton, i.e. the permission slip for file writing.
    baton = r_[0]

    #The processor one is to receive the baton from.
    receive_from = (my_id - 1) % nr_procs 

    #The processor one is to send the baton to.
    send_to = (my_id + 1) % nr_procs 
    #-------------------------------

    
    #Initializing the HDF5 file
    #--------------------------
    if my_id == 0:

	f = tables.openFile(filename, 'w')
	g = tables.openFile(filename_vib)
	try:
	    f.createArray("/", "electronicFilename", [filename_el])	    
	    
	    #Initializing the arrays for the time dependent couplings of H.
	    f.createCArray('/','couplings', 
		tables.atom.FloatAtom(), 
		(nr_kept * el_basis_size, 
		 nr_kept * el_basis_size),
		chunkshape=(nr_kept,nr_kept))
	    
	    #Energy diagonal. Time independent part of H. 
	    energy_diagonal = zeros(nr_kept * el_basis_size)
	    for i in range(el_basis_size):
		energy_diagonal[nr_kept * i:
		    nr_kept * (i + 1)] = g.root.E[:nr_kept,i]

	    f.createArray("/", "energyDiagonal", energy_diagonal)
	    
	finally:
	    f.close()
	    g.close()
	
	#Save spline info.
	spline_basis.bsplines.save_spline_info(filename)
    #----------------------------------


    #Setting up the hamiltonian
    #--------------------------
    #Looping over the tasks of this processor.
    for i in my_tasks:
	#Retrieve indices.
	row_index, column_index = index_table[i]

	#Retrieve electronic couplings.
	f = tables.openFile(filename_el)
	try:
	    el_coupling = f.root.couplings[row_index, column_index,:]
	finally:
	    f.close()
	
#	#TODO REMOVE?
#	#Remove errors from the coupling. (A hack, unfortunately.) 
#	r_grid_2, el_coupling_2 = remove_spikes(r_grid, el_coupling)
#
#	#Setup potential matrix.
#	couplings = spline_basis.setup_potential_matrix(
#	    r_grid_2, el_coupling_2)
#	
	#Setup potential matrix. Aij = <Bi | f(R) | Bj>
	bfb_matrix = spline_basis.setup_potential_matrix(
	    r_grid, el_coupling)
	
	couplings = zeros([nr_kept, nr_kept])
	
	#Retrieve eigensvectors.
	g = tables.openFile(filename_vib)
	try:
	    Vr = g.root.V[:,:,row_index] 
	    Vc = g.root.V[:,:,column_index]
	finally:
	    g.close()
	
	#Calculate couplings.
	for r_index in range(nr_kept):
	    for c_index in range(nr_kept):
		couplings[r_index, c_index] = dot(Vr[:,r_index], 
		    dot(Vc[:,c_index]))
	
	

	#First file write. (Send, but not receive baton.)
	if starter == my_id:
	    #Write to file.
	    spline_basis.save_couplings(filename, couplings, 
		row_index, column_index)

	    #Avoiding this statement 2nd time around.
	    starter = -1

	    #Sending the baton to the next writer.
	    pypar.send(baton, send_to, use_buffer = True)
	
	#Last file write. (Receive, but not send baton.)
	elif i == my_tasks[-1] and ender == my_id :
	    #Receiving the baton from the previous writer.
	    pypar.receive(receive_from, buffer = baton)

	    #Write to file.
	    spline_basis.save_couplings(filename, couplings, 
		row_index, column_index)
	
	#The rest of the file writes.
	else:
	    #Receiving the baton from the previous writer.
	    pypar.receive(receive_from, buffer = baton)

	    #Write to file.
	    spline_basis.save_couplings(filename, couplings, 
		row_index, column_index)

	    #Sending the baton to the next writer.
	    pypar.send(baton, send_to, use_buffer = True)
	
	
	#Showing the progress of the work.
	if my_id == 0:
	    nice_stuff.status_bar("Calculating couplings:", 
		i, len(my_tasks))
    #----------------------------

    #Letting everyone catch up. 
    pypar.barrier()
コード例 #36
0
def save_all_eigenstates(filename_el, nr_kept, xmin, xmax, xsize, order):
    """
    save_all_eigenstates(filename_el, nr_kept, xmin, xmax, xsize, order)

    This program solves the vibrational TISE for a set of energy curves, 
    and stores them in an HDF5 file. 
    This program must be run in parallel.
    
    Example
    -------
    To run this program on 5 processors:
    
    $ mpirun -n 5 python -c "execfile('vibrational_BO.py');save_eigenstates()"
    """

    #Retrieve the electronic energy curves.
    f = tables.openFile(filename_el)
    try:
	r_grid = f.root.R_grid[:]
	energy_curves = f.root.E[:]

    finally:
	f.close()
    
    #Initialize the B-spline_basis.
    spline_basis = vibrational_methods.Bspline_basis(xmin, xmax, xsize, order)
    spline_basis.setup_kinetic_hamiltonian()
    spline_basis.setup_overlap_matrix()
    
    #Generate a filename.
    filename = name_gen.vibrational_eigenstates(filename_el, spline_basis)
    
    #Parallel stuff
    #--------------
    #Get processor 'name'.
    my_id = pypar.rank() 
    
    #Get total number of processors.
    nr_procs = pypar.size()
    
    #Get number of tasks.
    nr_tasks = len(energy_curves)

    #Get a list of the indices of this processors share of R_grid. 
    my_tasks = nice_stuff.distribute_work(nr_procs, nr_tasks, my_id)

    #The processors will be writing to the same file.
    #In order to avoid problems, the procs will do a relay race of writing to
    #file. This is handeled by blocking send() and receive().
    #Hopefully there will not be to much waiting.

    #ID of the processor that will start writing.
    starter = 0

    #ID of the processor that will be the last to write.
    ender = (nr_tasks - 1) % nr_procs

    #Buffer for the baton, i.e. the permission slip for file writing.
    baton = r_[0]

    #The processor one is to receive the baton from.
    receive_from = (my_id - 1) % nr_procs 

    #The processor one is to send the baton to.
    send_to = (my_id + 1) % nr_procs 
    #-------------------------------

    
    #Initializing the HDF5 file
    #--------------------------
    if my_id == 0:

	f = tables.openFile(filename, 'w')
	try:
	    f.createArray("/", "electronicFilename", [filename_el])	    
	    
	    f.createArray("/", "R_grid", r_grid)	    
	    
	    f.createArray("/", "overlap", spline_basis.overlap_matrix)	    
	    
	    #Initializing the arrays for the eigenvalues and states.
	    f.createCArray('/','E', 
		tables.atom.FloatAtom(), 
		(nr_kept, nr_tasks),
		chunkshape=(nr_kept, 1))
	    
	    f.createCArray('/','V', 
		tables.atom.FloatAtom(), 
		(spline_basis.nr_splines, nr_kept, nr_tasks),
		chunkshape=(spline_basis.nr_splines, nr_kept, 1))
	    
	    f.createCArray('/','hamiltonian', 
		tables.atom.FloatAtom(), 
		(spline_basis.nr_splines, spline_basis.nr_splines, nr_tasks),
		chunkshape=(spline_basis.nr_splines, spline_basis.nr_splines, 
		1))
	    

	    
	finally:
	    f.close()
	
	#Save spline info.
	spline_basis.bsplines.save_spline_info(filename)
    #----------------------------------

    #Solving the TISE
    #----------------
    #Looping over the tasks of this processor.
    for i in my_tasks:

	
	#TODO REMOVE?
	#remove_spikes removes points where the diagonalization has failed.
	#potential_hamiltonian = spline_basis.setup_potential_matrix(
	#    r_grid, remove_spikes(energy_curves[i,:]) + 1/r_grid)
	####

	#Setup potential matrix. 
	potential_hamiltonian = spline_basis.setup_potential_matrix(
	    r_grid, energy_curves[i,:] + 1/r_grid)
		
	#The total hamiltonian.
	hamiltonian_matrix = (spline_basis.kinetic_hamiltonian + 
	    potential_hamiltonian)

	#Diagonalizing the hamiltonian.
	E, V = spline_basis.solve(hamiltonian_matrix, nr_kept)
	
	#First file write. (Send, but not receive baton.)
	if starter == my_id:
	    #Write to file.
	    spline_basis.save_eigenstates(filename, E, V, 
		hamiltonian_matrix, i)

	    #Avoiding this statement 2nd time around.
	    starter = -1

	    #Sending the baton to the next writer.
	    pypar.send(baton, send_to, use_buffer = True)
	
	#Last file write. (Receive, but not send baton.)
	elif i == my_tasks[-1] and ender == my_id :
	    #Receiving the baton from the previous writer.
	    pypar.receive(receive_from, buffer = baton)

	    #Write to file.
	    spline_basis.save_eigenstates(filename, E, V, 
		hamiltonian_matrix, i)
	
	#The rest of the file writes.
	else:
	    #Receiving the baton from the previous writer.
	    pypar.receive(receive_from, buffer = baton)

	    #Write to file.
	    spline_basis.save_eigenstates(filename, E, V,
		hamiltonian_matrix, i)

	    #Sending the baton to the next writer.
	    pypar.send(baton, send_to, use_buffer = True)
	
	
	#Showing the progress of the work.
	if my_id == 0:
	    nice_stuff.status_bar("Vibrational BO calculations", 
		i, len(my_tasks))
    #----------------------------

    #Letting everyone catch up. 
    pypar.barrier()
コード例 #37
0
def TestPropagation(inputFile="groundstate_propagation.h5", **args):
    pyprop.PrintMemoryUsage("Before TestPropagation")

    Print("")
    Print("Now testing propagation...")
    pyprop.Redirect.Enable(silent=True)

    #Set up propagation problem
    potList = []
    #if not args.get("laserOff", False):
    #	Print("Setting up new problem with laser potentials...")
    #	potList += ["LaserPotentialVelocityDerivativeR1", "LaserPotentialVelocityDerivativeR2", "LaserPotentialVelocity"]
    #else:
    #	Print("Setting up new problem WITHOUT laser potentials...")

    #if not args.get("absorberOff", False):
    #	potList += ["Absorber"]
    #	Print("Setting up new problem with absorber...")
    #else:
    #	Print("Setting up new problem WITHOUT absorber...")

    pyprop.PrintMemoryUsage("Before SetupProblem")
    args["config"] = args.get("config", "config_propagation_nonorthdistr.ini")
    #args["config"] = args.get("config", "config_propagation.ini")
    prop = SetupProblem(**args)
    Print(
        "Proc %i has wavefunction shape = %s" %
        (pyprop.ProcId, list(prop.psi.GetData().shape)), [pyprop.ProcId])
    pyprop.PrintMemoryUsage("After SetupProblem")

    #Load initial state
    Print("Loading intial state...")
    pyprop.PrintMemoryUsage("Before Loading InitialState")
    prop.psi.Clear()
    pyprop.serialization.LoadWavefunctionHDF(inputFile, "/wavefunction",
                                             prop.psi)
    prop.psi.Normalize()
    initPsi = prop.psi.Copy()
    initialEnergyCalculated = prop.GetEnergyExpectationValue()
    Print("Initial State Energy = %s" % initialEnergyCalculated)
    pyprop.PrintMemoryUsage("After Loading InitialState")

    Print("Done setting up problem!")

    #Propagate
    Print("Starting propagation")
    outputCount = args.get("outputCount", 10)
    startTime = time.time()
    for step, t in enumerate(prop.Advance(outputCount)):
        #calculate values
        norm = prop.psi.GetNorm()
        corr = abs(initPsi.InnerProduct(prop.psi))**2

        #estimate remaining time
        curTime = time.time() - startTime
        totalTime = (curTime / t) * prop.Duration
        eta = totalTime - curTime

        #Print stats
        Print("t = %.2f; N = %.15f; Corr = %.10f, ETA = %s" %
              (t, norm, corr, FormatDuration(eta)))
        Print("GMRES errors = %s" %
              prop.Propagator.Solver.GetErrorEstimateList())
        pyprop.PrintMemoryUsage("At t = %s" % t)

    endTime = time.time()
    Print("Propagation time = %s" % FormatDuration(endTime - startTime))

    #Final output
    pyprop.PrintMemoryUsage("After Propagation")
    norm = prop.psi.GetNorm()
    corr = abs(initPsi.InnerProduct(prop.psi))**2

    Print("Final status: N = %.10f; Corr = %.10f" % (norm, corr))

    Print("")
    prop.Propagator.Solver.PrintStatistics()

    #finish and cleanup
    pypar.barrier()
    pyprop.Redirect.Disable()
    pyprop.PrintMemoryUsage("After TestPropagation")
    Print("\n...done!")
コード例 #38
0
    def BO_dipole_couplings(self, m_list, q_list, E_lim):
	"""
	BO_dipole_couplings(m_list, q_list, E_lim)

	Parallel program that calculates the dipole couplings for a 
	z-polarized laser in lenght gauge. An eigenstate basis is used, of 
	states whose quantum numbers are in <m_list> and <q_list>, that have 
	energies below <E_lim>. The couplings are stored to an HDF5 file.

	Parameters
	----------
	m_list : list of integers, containing the m values wanted in 
	    the basis.
	q_list : list of integers, containing the q values wanted in 
	    the basis.
	E_lim : float, the upper limit of the energies wanted in 
	    the basis, for R ~ 2.0.

	Notes
	-----
	I sometimes observe unnatural spikes in the couplings 
	(as a function of R), which should be removed before the couplings 
	are used. I don't know why they are there.    

	Example
	-------
	>>> filename = "el_states_m_0_nu_70_mu_25_beta_1_00_theta_0_00.h5"
	>>> tdse = tdse_electron.TDSE_length_z(filename = filename)
	>>> m = [0]
	>>> q = [0,1,2,3]
	>>> E_lim = 5.0
	>>> tdse.BO_dipole_couplings(m, q, E_lim)
	"""
	#Name of the HDF5 file where the couplings will be saved.
	self.coupling_file = name_gen.electronic_eig_couplings_R(self, 
	    m_list, q_list, E_lim)

	#Parallel stuff
	#--------------
	#Get processor 'name'.
	my_id = pypar.rank() 
	
	#Get total number of processors.
	nr_procs = pypar.size()

	#Size of eigenstate basis. (Buffer for broadcast.)
	basis_size_buffer = r_[0]

	#Get number of tasks.
	f = tables.openFile(self.eigenstate_file)
	try:
	    R_grid = f.root.R_grid[:]
	finally:
	    f.close()
	
	nr_tasks = len(R_grid)

	#Get a list of the indices of this processors share of R_grid. 
	my_tasks = nice_stuff.distribute_work(nr_procs, nr_tasks, my_id)

	#The processors will be writing to the same file.
	#In order to avoid problems, the procs will do a relay race of writing to
	#file. This is handeled by blocking send() and receive().
	#Hopefully there will not be to much waiting.

	#ID of the processor that will start writing.
	starter = 0

	#ID of the processor that will be the last to write.
	ender = (nr_tasks - 1) % nr_procs

	#Buffer for the baton, i.e. the permission slip for file writing.
	baton = r_[0]

	#The processor one is to receive the baton from.
	receive_from = (my_id - 1) % nr_procs 

	#The processor one is to send the baton to.
	send_to = (my_id + 1) % nr_procs 
	#-------------------------------

	
	#Initializing the HDF5 file
	#--------------------------
	if my_id == 0:
	    
	    #Initialize index list.
	    index_array = []

	    #Find the index of the R closest to 2.0.
	    R_index = argmin(abs(R_grid - 2.0))
	    
	    #Choose basis functions.
	    f = tables.openFile(self.eigenstate_file)
	    try:
		for m in m_list:
		    m_group = name_gen.m_name(m)
		    for q in q_list:
			q_group = name_gen.q_name(q)
			for i in range(self.config.nu_max + 1):
			    if eval("f.root.%s.%s.E[%i,%i]"%(m_group, q_group, 
				i, R_index)) > E_lim:
				break
			    else:
				#Collect indices of the basis functions.
				index_array.append(r_[m, q, i])
	    finally:
		f.close()
	    
	    #Cast index list as an array.
	    index_array = array(index_array)
	    
	    #Number of eigenstates in the basis.
	    basis_size = len(index_array)
	    print basis_size, "is the basis size"
	    basis_size_buffer[0] = basis_size

	    f = tables.openFile(self.coupling_file, 'w')
	    try:
		f.createArray("/", "R_grid", R_grid)
		
		#Saving the index array.
		f.createArray("/", "index_array", index_array)
		
		#Initializing the arrays for the couplings and energies.
		f.createCArray('/', 'E', 
		    tables.atom.FloatAtom(), 
		    (basis_size, nr_tasks),
		    chunkshape=(basis_size, 1))
		
		f.createCArray('/', 'couplings', 
		    tables.atom.ComplexAtom(16), 
		    (basis_size, basis_size, nr_tasks),
		    chunkshape=(basis_size, basis_size, 1))
		
	    finally:
		f.close()
	    
	    #Save config instance.
	    self.config.save_config(self.coupling_file)
	#----------------------------------


	#Calculating the dipole couplings
	#--------------------------------
	#Broadcasting the basis size from processor 0.
	pypar.broadcast(basis_size_buffer, 0)

	#Initializing the index array.
	if my_id != 0:
	    index_array = zeros([basis_size_buffer[0], 3], dtype=int)
	
	#Broadcasting the index array from proc. 0.
	pypar.broadcast(index_array, 0)


	#Looping over the tasks of this processor.
	for i in my_tasks:

	    #Calculate the dipole couplings for one value of R.
	    couplings, E = self.calculate_dipole_eig_R(index_array, R_grid[i])


	    #First file write. (Send, but not receive baton.)
	    if starter == my_id:
		#Write to file.
		self.save_dipole_eig_R(couplings, E, R_grid[i])
		
		#Avoiding this statement 2nd time around.
		starter = -1

		#Sending the baton to the next writer.
		pypar.send(baton, send_to, use_buffer = True)

	    
	    #Last file write. (Receive, but not send baton.)
	    elif i == my_tasks[-1] and ender == my_id :
		#Receiving the baton from the previous writer.
		pypar.receive(receive_from, buffer = baton)

		#Write to file.
		self.save_dipole_eig_R(couplings, E, R_grid[i])
	    
	    #The rest of the file writes.
	    else:
		#Receiving the baton from the previous writer.
		pypar.receive(receive_from, buffer = baton)

		#Write to file.
		self.save_dipole_eig_R(couplings, E, R_grid[i])

		#Sending the baton to the next writer.
		pypar.send(baton, send_to, use_buffer = True)
	    
	    
	    #Showing the progress of the work.
	    if my_id == 0:
		nice_stuff.status_bar("Electronic dipole couplings:", 
		    i, len(my_tasks))
	#----------------------------
	
	#Letting everyone catch up. 
	pypar.barrier()
コード例 #39
0
def TestPropagation(inputFile = "groundstate_propagation.h5", **args):
	pyprop.PrintMemoryUsage("Before TestPropagation")
	
	Print("")
	Print("Now testing propagation...")
	pyprop.Redirect.Enable(silent=True)

	#Set up propagation problem
	potList = []
	#if not args.get("laserOff", False):
	#	Print("Setting up new problem with laser potentials...")
	#	potList += ["LaserPotentialVelocityDerivativeR1", "LaserPotentialVelocityDerivativeR2", "LaserPotentialVelocity"]
	#else:
	#	Print("Setting up new problem WITHOUT laser potentials...")

	#if not args.get("absorberOff", False):
	#	potList += ["Absorber"]
	#	Print("Setting up new problem with absorber...")
	#else:
	#	Print("Setting up new problem WITHOUT absorber...")

	pyprop.PrintMemoryUsage("Before SetupProblem")
	args["config"] = args.get("config", "config_propagation_nonorthdistr.ini")
	#args["config"] = args.get("config", "config_propagation.ini")
	prop = SetupProblem(**args)
	Print("Proc %i has wavefunction shape = %s" % (pyprop.ProcId, list(prop.psi.GetData().shape)), [pyprop.ProcId])
	pyprop.PrintMemoryUsage("After SetupProblem")

	#Load initial state
	Print("Loading intial state...")
	pyprop.PrintMemoryUsage("Before Loading InitialState")
	prop.psi.GetData()[:] = 0
	pyprop.serialization.LoadWavefunctionHDF(inputFile, "/wavefunction", prop.psi)
	prop.psi.Normalize()
	initPsi = prop.psi.Copy()
	initialEnergyCalculated = prop.GetEnergyExpectationValue()
	Print("Initial State Energy = %s" % initialEnergyCalculated)
	pyprop.PrintMemoryUsage("After Loading InitialState")

	Print("Done setting up problem!")
	
	#Propagate
	Print("Starting propagation")
	outputCount = args.get("outputCount", 10)
	startTime = time.time()
	for step, t in enumerate(prop.Advance(outputCount)):
		#calculate values
		norm = prop.psi.GetNorm()
		corr = abs(initPsi.InnerProduct(prop.psi))**2

		#estimate remaining time
		curTime = time.time() - startTime
		totalTime = (curTime / t) * prop.Duration
		eta = totalTime - curTime

		#Print stats
		Print("t = %.2f; N = %.15f; Corr = %.10f, ETA = %s" % (t, norm, corr, FormatDuration(eta)))
		Print("GMRES errors = %s" % prop.Propagator.Solver.GetErrorEstimateList())
		pyprop.PrintMemoryUsage("At t = %s" % t)
	
	endTime = time.time()
	Print("Propagation time = %s" % FormatDuration(endTime-startTime))

	#Final output
	pyprop.PrintMemoryUsage("After Propagation")
	norm = prop.psi.GetNorm()
	corr = abs(initPsi.InnerProduct(prop.psi))**2

	Print("Final status: N = %.10f; Corr = %.10f" % (norm, corr))

	Print("")
	prop.Propagator.Solver.PrintStatistics()

	#finish and cleanup
	pypar.barrier()
	pyprop.Redirect.Disable()
	pyprop.PrintMemoryUsage("After TestPropagation")
	Print("\n...done!")
コード例 #40
0
        #            bvalue = config_params[i/3]['BVAL_BEST']
        #        if i % 3 == 1:
        #            bvalue = config_params[i/3]['BVAL_UPPER']
        #        if i % 3 == 2:
        #            bvalue = config_params[i/3]['BVAL_LOWER']
        mmin = completeness_table[0][1]
        print 'mmin', mmin
        config = {
            "BandWidth": 50.,
            "Length_Limit": 3.,
            "increment": False,
            "bvalue": bvalue
        }
        ystart = completeness_table[-1][0]
        # Call the smoothing module
        run_smoothing(grid_lims, config, catalogue_depth_clean,
                      completeness_table, map_config, run, overwrite)

pypar.barrier()

if myid == 0:
    ss = int(pypar.time() - t0)
    h = ss / 3600
    m = (ss % 3600) / 60
    s = (ss % 3600) % 60
    print "--------------------------------------------------------"
    print 'P0: Total time (%i seconds): %s:%s:%s (hh:mm:ss)' % (
        ss, string.zfill(h, 2), string.zfill(m, 2), string.zfill(s, 2))
    print "--------------------------------------------------------"
pypar.finalize()
コード例 #41
0
ファイル: mpi.py プロジェクト: xj361685640/fdtd_accelerate
	return node_length_x_list


if __name__ == '__main__':
	import scipy as sc

	Ny = 100
	Nz = 100

	Ey = sc.zeros((4, Ny, Nz),'f')
	Ez = sc.zeros((4, Ny, Nz),'f')

	pbc_opt=None
	if myrank == server:
		print 'PBC : %s, start' % pbc_opt
	mpi.barrier()

	Ey[:,:,:] = 0.
	Ez[:,:,:] = 0.
	Ey[1:3,:,:] = 1.
	Ez[1:3,:,:] = 1.
	mpi.barrier()


	if myrank != server:

		target_list, mpi_func_list = calc_oddeven( myrank )
		mpi_exchange( Ey, Ez, myrank, target_list, mpi_func_list )
		mpi_exchange_pbc( Ey, Ez, myrank, pbc_opt )
		print 'I`m', myrank,'Ey Direction x1 sum after = ', Ey[ 0,:,:].sum()
		print 'I`m', myrank,'Ey Direction x2 sum after = ', Ey[-1,:,:].sum()
コード例 #42
0
def test():
	xmax = 2.
	N = 5
	bands = 3 
	
	dx = 2 * xmax / N

	#Create original matrix
	A = zeros((N, N), dtype=complex)
	for i in range(N):
		x = -xmax + i*dx
		for j in range(-bands, bands+1):
			if 0 <= j+i < N:
				A[i,j+i] = x**2 / (abs(j)+1)
	

	#Create packed matrix
	PackedA = zeros((N, 2*bands+1), complex)
	for i in range(N):
		for j in range(-bands, bands+1):
			if 0 <= j+i < N:
				row = i
				col = i+j
				packedRow, packedCol = MapRowColToPacked(row, col, N, bands)
				PackedA[packedRow, packedCol] = A[row, col]

	"""
	figure()
	imshow(A, interpolation="nearest")
	figure()
	imshow(PackedA, interpolation="nearest")
	"""

	#Create in-vector
	psi = rand(N) + 0.0j
	#send psi from proc0 to everyone
	pypar.broadcast(psi, 0)

	#output
	refOutput = dot(A, psi)

	#Create local vectors and matrices
	localSize = GetDistributedShape(N, ProcCount, ProcId)
	globalStartIndex = GetGlobalStartIndex(N, ProcCount, ProcId)
	globalEndIndex = globalStartIndex+localSize

	localPackedA = PackedA[globalStartIndex:globalEndIndex, :]
	localPsi = psi[globalStartIndex:globalEndIndex]
	localRefOutput = refOutput[globalStartIndex:globalEndIndex]

	localTestOutput = zeros(localSize, dtype=complex)
	
	for i in range(ProcCount):
		if i == ProcId:
			print "ProcId == %i" % (i)
			print localSize
			print globalStartIndex, " -> ", globalEndIndex
			print ""
		pypar.barrier()
	
	#BandedMatrixVectorMultiply(localPackedA, N, bands, localPsi, localTestOutput, ProcCount, ProcId)
	#BandedMatrixMultiply_Wrapper(localPackedA.reshape(localPackedA.size), 1.0, localPsi, localTestOutput, N, bands)
	TensorPotentialMultiply_BandedDistributed(localPackedA.reshape(localPackedA.size), 1.0, localPsi, localTestOutput, N, bands)

	#the verdict
	for i in range(ProcCount):
		if i == ProcId:
			if i == 0:
				print ""
				print refOutput
				print ""
			print "ProcId == %i" % (i)
			print sqrt(sum(abs(localRefOutput)**2))
			print sqrt(sum(abs(localTestOutput)**2))
			print sqrt(sum(abs(localRefOutput - localTestOutput)**2))
			#print localTestOutput
			#print localRefOutput
			print ""
		pypar.barrier()
コード例 #43
0
numprocs = pypar.size()
myid = pypar.rank()
processor_name = pypar.get_processor_name()

if myid == 0:
  # Main process - Create message, pass on, verify correctness and log timing
  #
  print "MAXM = %d, number of processors = %d" %(MAXM, numprocs)
  print "Measurements are repeated %d times for reliability" %repeats

if numprocs < 2:
  print "Program needs at least two processors - aborting\n"
  pypar.abort()
   
pypar.barrier() #Synchronize all before timing   
print "I am process %d on %s" %(myid,processor_name)


#Initialise data and timings
#

try:
  from numpy.random import uniform, seed
  seed(17)
  A = uniform(0.0,100.0,MAXM)
except:
  print 'problem with RandomArray'
  from numpy import ones, Float
  A = ones(MAXM).astype('f')
  
コード例 #44
0
ファイル: mpi_nstat.py プロジェクト: qiuxing/corrperm
def main():
    #--------------------#
    # server code
    #--------------------#
    if rank == 0:
        print 'server running on ', procname

        opts = task(sys.argv)

        opts.printruninfo()

        sendtoall(('Start', sys.argv))
        server = serverdata(opts)

        #set up the collector and generator
        start = time.time()

        collector = resultcollector(server)
        end = time.time()
        print end-start
        
        jobs = jobgenerator(server)

        numjobsreceived = 0
        #begin distributing work
        for proc in xrange(1, min(numnodes, jobs.numjobs+1)):
            job = jobs.next(proc)
            pypar.send(('job',job), proc, tag=OUT)
        while numjobsreceived < jobs.jobindex:#while any job is still running
            #wait for any node to send a result
            msg, status = pypar.receive(pypar.any_source, return_status=True, tag=RETURN)
            numjobsreceived += 1
            proc, response = msg

            if jobs.hasnext(proc):#see if there is more work to be done
                job = jobs.next(proc)
                pypar.send(('job',job), proc, tag=OUT)#send it to the node that just completed

            #combine the results *after* sending the new job
            #(this way the worker can proceed while the results are being combined)
            collector.collect(response)


        #all jobs collected, kill the workers
        sendtoall(('Done', 0))

        #finish up the computation
        collector.finish()
        
    #--------------------#    
    # worker code
    #--------------------#
    else:
        while True:
            start = time.time()
            (code, msg), status = pypar.receive(0, return_status=True, tag=OUT)
            end = time.time()
            print 'waiting', end-start
            if code == 'Done':#all work is done
                opts.printruninfo()
                break
            elif code == 'Die':#abnormal exit
                break
            elif code == 'Start':
                opts = task(msg)
                sys.stdout = open(opts.logprefix+'%02d.log'%rank, 'w') #logfile
                print 'client', rank, 'running on', procname                
            else:
                start = time.time()
                jobnum, job = msg
                print jobnum
                result = opts.dojob(job)#do the job
                end = time.time()
                print 'working',msg[0], end-start

                start = time.time()
                pypar.send((rank, (jobnum, result)), 0, tag=RETURN)#return the result to the server
                end = time.time()
                print 'sending', end-start

    #------------------#
    #end of parallel code
    pypar.barrier()
    pypar.finalize()
コード例 #45
0
ファイル: tasks.py プロジェクト: AtomAleks/pyprop-helium
def CreatePath(absFileName):
	if pyprop.ProcId == 0:
		filePath = os.path.dirname(absFileName)
		if not os.path.exists(filePath):
			os.makedirs(filePath)
	pypar.barrier()
コード例 #46
0
ファイル: pypar.py プロジェクト: pynbody/tangos
def barrier():
    pypar.barrier()
コード例 #47
0
def save_electronic_eigenstates(m_max, nu_max, mu_max, R_grid, beta, theta):
    """
    save_electronic_eigenstates(m_max, nu_max, mu_max, R_grid, beta, theta)

    This program solves the electronic TISE for a range of internuclear 
    distances, given in <R_grid>, and stores them in an HDF5 file. 
    This program must be run in parallel.
    
    Example
    -------
    To run this program on 5 processors:
    
	$  mpirun -n 5 python electronic_BO.py 
    """

    #Parallel stuff
    #--------------
    #Get processor 'name'.
    my_id = pypar.rank() 
    
    #Get total number of processors.
    nr_procs = pypar.size()
    
    #Get number of tasks.
    nr_tasks = len(R_grid)

    #Get a list of the indices of this processors share of R_grid. 
    my_tasks = nice_stuff.distribute_work(nr_procs, nr_tasks, my_id)

    #The processors will be writing to the same file.
    #In order to avoid problems, the procs will do a relay race of writing to
    #file. This is handeled by blocking send() and receive().
    #Hopefully there will not be to much waiting.

    #ID of the processor that will start writing.
    starter = 0

    #ID of the processor that will be the last to write.
    ender = (nr_tasks - 1) % nr_procs

    #Buffer for the baton, i.e. the permission slip for file writing.
    baton = r_[0]

    #The processor one is to receive the baton from.
    receive_from = (my_id - 1) % nr_procs 

    #The processor one is to send the baton to.
    send_to = (my_id + 1) % nr_procs 
    #-------------------------------

    
    #Initializing the HDF5 file
    #--------------------------
    if my_id == 0:
	#Creates a config instance.
	my_config = config.Config(m = m_max, nu = nu_max, mu = mu_max, 
	    R = R_grid[0], beta = beta, theta = theta)
	
	#Number of basis functions.
	basis_size = (2 * m_max + 1) * (nu_max + 1) * (mu_max + 1)

	#Generate a filename.
	filename = name_gen.electronic_eigenstates_R(my_config)

	f = tables.openFile(filename, 'w')
	try:
	    f.createArray("/", "R_grid", R_grid)	    
	    
	    #Looping over the m values.
	    for m in range(-1 * m_max, m_max + 1):
		#Creating an m group in the file.
		m_group = name_gen.m_name(m)
		f.createGroup("/", m_group)
		
		#Looping over th q values.
		for q in range(mu_max + 1):
		    #Creating a q group in the m group in the file.
		    q_group = name_gen.q_name(q)
		    f.createGroup("/%s/"%m_group, q_group)

		    #Initializing the arrays for the eigenvalues and states.
		    f.createCArray('/%s/%s/'%(m_group, q_group),'E', 
			tables.atom.FloatAtom(), 
			(basis_size/(mu_max + 1), nr_tasks),
			chunkshape=(basis_size/(mu_max + 1), 1))
		    
		    f.createCArray('/%s/%s/'%(m_group, q_group),'V', 
			tables.atom.ComplexAtom(16), 
			(basis_size, basis_size/(mu_max + 1), nr_tasks),
			chunkshape=(basis_size, basis_size/(mu_max + 1), 1))
	    
	finally:
	    f.close()
	
	#Save config instance.
	my_config.save_config(filename)
    #----------------------------------


    #Solving the TISE
    #----------------
    #Looping over the tasks of this processor.
    for i in my_tasks:
	#Creating TISE instance.
	tise = tise_electron.TISE_electron(m = m_max, nu = nu_max, 
	    mu = mu_max, R = R_grid[i], beta = beta, theta = theta)
	
	#Diagonalizing the hamiltonian.
	E,V = tise.solve()
	
	#First file write. (Send, but not receive baton.)
	if starter == my_id:
	    #Write to file.
	    tise.save_eigenfunctions_R(E, V, R_grid[i])

	    #Avoiding this statement 2nd time around.
	    starter = -1

	    #Sending the baton to the next writer.
	    pypar.send(baton, send_to, use_buffer = True)
	
	#Last file write. (Receive, but not send baton.)
	elif i == my_tasks[-1] and ender == my_id :
	    #Receiving the baton from the previous writer.
	    pypar.receive(receive_from, buffer = baton)

	    #Write to file.
	    tise.save_eigenfunctions_R(E, V, R_grid[i])
	
	#The rest of the file writes.
	else:
	    #Receiving the baton from the previous writer.
	    pypar.receive(receive_from, buffer = baton)

	    #Write to file.
	    tise.save_eigenfunctions_R(E, V, R_grid[i])

	    #Sending the baton to the next writer.
	    pypar.send(baton, send_to, use_buffer = True)
	
	
	#Showing the progress of the work.
	if my_id == 0:
	    nice_stuff.status_bar("Electronic BO calculations", 
		i, len(my_tasks))
    #----------------------------
    
    #Letting everyone catch up. 
    pypar.barrier()

    #Since the sign of the eigenfunctions are completely arbitrary, one must
    #make sure they do not change sign from one R to another.
    if my_id == 0:
	tise.align_all_phases()
    
    #Letting 0 catch up. 
    pypar.barrier()
コード例 #48
0
def run_multiple_windfields(scenario,
                            windfield_directory=None,
                            hazard_output_folder=None,
                            dircomment=None,
                            echo=False,
                            verbose=True):
    """Run volcanic ash impact model for multiple wind fields.

    The wind fields are assumed to be in subfolder specified by windfield_directory,
    have the extension *.profile and follow the format use with scenarios.

    This function makes use of Open MPI and Pypar to execute in parallel but can also run sequentially.
    """

    try:
        import pypar
    except:
        P = 1
        p = 0
        processor_name = os.uname()[1]

        print 'Pypar could not be imported. Running sequentially on node %s' % processor_name,
    else:
        time.sleep(1)
        P = pypar.size()
        p = pypar.rank()
        processor_name = pypar.get_processor_name()

        print 'Processor %d initialised on node %s' % (p, processor_name)

        pypar.barrier()

    if p == 0:

        # Put logs along with the results
        logdir = os.path.join(hazard_output_folder, 'logs')
        makedir(logdir)

        header('Hazard modelling using multiple wind fields')
        print '*  Wind profiles obtained from: %s' % windfield_directory
        print '*  Scenario results stored in:  %s' % hazard_output_folder
        print '*  Log files:'

        t_start = time.time()

        # Communicate hazard output directory name to all nodes to ensure they have exactly the same time stamp.
        for i in range(P):
            pypar.send((hazard_output_folder), i)
    else:
        # Receive correctly timestamped output directory names
        hazard_output_folder = pypar.receive(0)
        logdir = os.path.join(hazard_output_folder, 'logs')

    try:
        name = os.path.splitext(scenario)[0]
    except:
        name = 'run'

    # Wait until log dir has been created
    pypar.barrier()

    params = get_scenario_parameters(scenario)

    # Start processes staggered to avoid race conditions for disk access (otherwise it is slow to get started)
    time.sleep(2 * p)

    # Logging
    s = 'Proc %i' % p
    print '     %s -' % string.ljust(s, 8),
    AIM_logfile = os.path.join(logdir, 'P%i.log' % p)
    start_logging(filename=AIM_logfile, echo=False)

    # Get cracking
    basename, _ = os.path.splitext(scenario)
    count_local = 0
    count_all = 0
    for i, file in enumerate(os.listdir(windfield_directory)):

        count_all += 1

        # Distribute jobs cyclically to processors
        if i % P == p:

            if not file.endswith('.profile'):
                continue

            count_local += 1

            windfield = '%s/%s' % (windfield_directory, file)
            windname, _ = os.path.splitext(file)
            header('Computing event %i on processor %i using wind field: %s' %
                   (i, p, windfield))

            if dircomment is None:
                dircomment = params['eruption_comment']

            # Override or create parameters derived from native Fall3d wind field
            params['wind_profile'] = windfield
            params['wind_altitudes'] = get_layers_from_windfield(
                windfield)  # FIXME: Try to comment this out.
            params['Meteorological_model'] = 'profile'

            if hazard_output_folder is None:
                hazard_output_folder = basename + '_hazard_outputs'

            if p == 0:
                print 'Storing multiple outputs in directory: %s' % hazard_output_folder

            # Run scenario
            aim = _run_scenario(params,
                                timestamp_output=True,
                                dircomment=dircomment + '_run%i_proc%i' %
                                (i, p))

            # Make sure folder is present and can be shared by group
            makedir(hazard_output_folder)
            s = 'chmod -R g+w %s' % hazard_output_folder
            run(s)

            # Copy result file to output folder
            result_file = aim.scenario_name + '.res.nc'
            newname = aim.scenario_name + '.%s.res.nc' % windname  # Name after wind file
            s = 'cp %s/%s %s/%s' % (aim.output_dir, result_file,
                                    hazard_output_folder, newname)
            run(s)

            # Create projectionfile in hazard output
            if i == 0:
                s = 'cp %s %s/%s' % (aim.projection_file, hazard_output_folder,
                                     'HazardMaps.res.prj')
                run(s)

            # Clean up outputs from this scenario
            print 'P%i: Cleaning up %s' % (p, aim.output_dir)
            s = '/bin/rm -rf %s' % aim.output_dir
            run(s)

    print 'Processor %i done %i windfields' % (p, count_local)
    print 'Outputs available in directory: %s' % hazard_output_folder

    pypar.barrier()
    if p == 0:
        print 'Parallel simulation finished %i windfields in %i seconds' % (
            count_all, time.time() - t_start)

    pypar.finalize()
コード例 #49
0
def test():
    xmax = 2.0
    N = 5
    bands = 3

    dx = 2 * xmax / N

    # Create original matrix
    A = zeros((N, N), dtype=complex)
    for i in range(N):
        x = -xmax + i * dx
        for j in range(-bands, bands + 1):
            if 0 <= j + i < N:
                A[i, j + i] = x ** 2 / (abs(j) + 1)

                # Create packed matrix
    PackedA = zeros((N, 2 * bands + 1), complex)
    for i in range(N):
        for j in range(-bands, bands + 1):
            if 0 <= j + i < N:
                row = i
                col = i + j
                packedRow, packedCol = MapRowColToPacked(row, col, N, bands)
                PackedA[packedRow, packedCol] = A[row, col]

    """
	figure()
	imshow(A, interpolation="nearest")
	figure()
	imshow(PackedA, interpolation="nearest")
	"""

    # Create in-vector
    psi = rand(N) + 0.0j
    # send psi from proc0 to everyone
    pypar.broadcast(psi, 0)

    # output
    refOutput = dot(A, psi)

    # Create local vectors and matrices
    localSize = GetDistributedShape(N, ProcCount, ProcId)
    globalStartIndex = GetGlobalStartIndex(N, ProcCount, ProcId)
    globalEndIndex = globalStartIndex + localSize

    localPackedA = PackedA[globalStartIndex:globalEndIndex, :]
    localPsi = psi[globalStartIndex:globalEndIndex]
    localRefOutput = refOutput[globalStartIndex:globalEndIndex]

    localTestOutput = zeros(localSize, dtype=complex)

    for i in range(ProcCount):
        if i == ProcId:
            print "ProcId == %i" % (i)
            print localSize
            print globalStartIndex, " -> ", globalEndIndex
            print ""
        pypar.barrier()

        # BandedMatrixVectorMultiply(localPackedA, N, bands, localPsi, localTestOutput, ProcCount, ProcId)
        # BandedMatrixMultiply_Wrapper(localPackedA.reshape(localPackedA.size), 1.0, localPsi, localTestOutput, N, bands)
    TensorPotentialMultiply_BandedDistributed(
        localPackedA.reshape(localPackedA.size), 1.0, localPsi, localTestOutput, N, bands
    )

    # the verdict
    for i in range(ProcCount):
        if i == ProcId:
            if i == 0:
                print ""
                print refOutput
                print ""
            print "ProcId == %i" % (i)
            print sqrt(sum(abs(localRefOutput) ** 2))
            print sqrt(sum(abs(localTestOutput) ** 2))
            print sqrt(sum(abs(localRefOutput - localTestOutput) ** 2))
            # print localTestOutput
            # print localRefOutput
            print ""
        pypar.barrier()
コード例 #50
0
ファイル: rmat.py プロジェクト: AndreasFetzer/VTK
if a+b+c+d != 1.0:
  if me == 0: print "ERROR: a,b,c,d must sum to 1"
  sys.exit()

if fraction >= 1.0:
  if me == 0: print "ERROR: fraction must be < 1"
  sys.exit()

random.seed(seed+me)
order = 1 << nlevels

mr = mrmpi()

# loop until desired number of unique nonzero entries

pypar.barrier()
tstart = pypar.time()

niterate = 0
ntotal = (1 << nlevels) * nnonzero
nremain = ntotal
while nremain:
  niterate += 1
  ngenerate = nremain/nprocs
  if me < nremain % nprocs: ngenerate += 1
  mr.map(nprocs,generate,None,1)
  nunique = mr.collate()
  if nunique == ntotal: break
  mr.reduce(cull)
  nremain = ntotal - nunique
コード例 #51
0
def TestSolveOverlap():
    pyprop.PrintOut("")
    pyprop.PrintOut("Now testing S^-1 * psi...")
    pyprop.Redirect.Enable(silent=True)

    seed(0)

    fileName = "test_solveoverlap.h5"

    conf = pyprop.Load("config-test.ini")
    psi = pyprop.CreateWavefunction(conf)
    initPsi = pyprop.CreateWavefunction(conf)

    if pyprop.ProcCount == 1:
        psi.GetData()[:] = random(psi.GetData().shape)
        psi.Normalize()
        initPsi.GetData()[:] = psi.GetData()[:]

        #Store initial (random) psi
        pyprop.serialization.SaveWavefunctionHDF(fileName, "/wavefunction",
                                                 psi, conf)

        #Store S^-1 * psi
        psi.GetRepresentation().SolveOverlap(psi)
        pyprop.serialization.SaveWavefunctionHDF(fileName,
                                                 "/wavefunctionoverlap", psi,
                                                 conf)

        #determine overlap matrix condition number
        overlap = psi.GetRepresentation().GetGlobalOverlapMatrix(0)
        A = overlap.GetOverlapBlasBanded()
        B = pyprop.core.ConvertMatrixBlasBandedToFull(A)
        Print("  Overlap matrix condition number = %e" % cond(B))

    else:
        pyprop.serialization.LoadWavefunctionHDF(fileName, "/wavefunction",
                                                 initPsi)
        pyprop.serialization.LoadWavefunctionHDF(fileName,
                                                 "/wavefunctionoverlap", psi)

    destPsi = initPsi.Copy()
    destPsi.Clear()
    tmpPsi = initPsi.Copy()
    tmpPsi.Clear()

    #Calculate S^-1 * psi
    destPsi.GetData()[:] = initPsi.GetData()[:]
    destPsi.GetRepresentation().SolveOverlap(destPsi)
    tmpPsi.GetData()[:] = destPsi.GetData()[:]

    #Calculate S * S^-1 * psi
    destPsi.GetRepresentation().MultiplyOverlap(destPsi)

    Print()
    a = numpy.max(numpy.max(psi.GetData() - tmpPsi.GetData()))
    Print(
        "  Proc %s: ||S^-1 * psi - S'^-1 * psi||_max = %s" %
        (pyprop.ProcId, a), range(pyprop.ProcCount))
    Print()
    b = numpy.max(numpy.max(initPsi.GetData() - destPsi.GetData()))
    Print(
        "  Proc %s: ||S * S^-1 * psi - I * psi||_max = %s" %
        (pyprop.ProcId, b), range(pyprop.ProcCount))

    c = linalg.norm(initPsi.GetData() - destPsi.GetData())
    Print("  Proc %s: ||S * S^-1 * psi - I * psi|| = %s" % (pyprop.ProcId, c),
          range(pyprop.ProcCount))

    #finish and cleanup
    pypar.barrier()
    pyprop.Redirect.Disable()
    pyprop.PrintOut("\n...done!")