def BO_dipole_couplings(self, m_list, q_list, E_lim):
	"""
	BO_dipole_couplings(m_list, q_list, E_lim)

	Parallel program that calculates the dipole couplings for a 
	z-polarized laser in lenght gauge. An eigenstate basis is used, of 
	states whose quantum numbers are in <m_list> and <q_list>, that have 
	energies below <E_lim>. The couplings are stored to an HDF5 file.

	Parameters
	----------
	m_list : list of integers, containing the m values wanted in 
	    the basis.
	q_list : list of integers, containing the q values wanted in 
	    the basis.
	E_lim : float, the upper limit of the energies wanted in 
	    the basis, for R ~ 2.0.

	Notes
	-----
	I sometimes observe unnatural spikes in the couplings 
	(as a function of R), which should be removed before the couplings 
	are used. I don't know why they are there.    

	Example
	-------
	>>> filename = "el_states_m_0_nu_70_mu_25_beta_1_00_theta_0_00.h5"
	>>> tdse = tdse_electron.TDSE_length_z(filename = filename)
	>>> m = [0]
	>>> q = [0,1,2,3]
	>>> E_lim = 5.0
	>>> tdse.BO_dipole_couplings(m, q, E_lim)
	"""
	#Name of the HDF5 file where the couplings will be saved.
	self.coupling_file = name_gen.electronic_eig_couplings_R(self, 
	    m_list, q_list, E_lim)

	#Parallel stuff
	#--------------
	#Get processor 'name'.
	my_id = pypar.rank() 
	
	#Get total number of processors.
	nr_procs = pypar.size()

	#Size of eigenstate basis. (Buffer for broadcast.)
	basis_size_buffer = r_[0]

	#Get number of tasks.
	f = tables.openFile(self.eigenstate_file)
	try:
	    R_grid = f.root.R_grid[:]
	finally:
	    f.close()
	
	nr_tasks = len(R_grid)

	#Get a list of the indices of this processors share of R_grid. 
	my_tasks = nice_stuff.distribute_work(nr_procs, nr_tasks, my_id)

	#The processors will be writing to the same file.
	#In order to avoid problems, the procs will do a relay race of writing to
	#file. This is handeled by blocking send() and receive().
	#Hopefully there will not be to much waiting.

	#ID of the processor that will start writing.
	starter = 0

	#ID of the processor that will be the last to write.
	ender = (nr_tasks - 1) % nr_procs

	#Buffer for the baton, i.e. the permission slip for file writing.
	baton = r_[0]

	#The processor one is to receive the baton from.
	receive_from = (my_id - 1) % nr_procs 

	#The processor one is to send the baton to.
	send_to = (my_id + 1) % nr_procs 
	#-------------------------------

	
	#Initializing the HDF5 file
	#--------------------------
	if my_id == 0:
	    
	    #Initialize index list.
	    index_array = []

	    #Find the index of the R closest to 2.0.
	    R_index = argmin(abs(R_grid - 2.0))
	    
	    #Choose basis functions.
	    f = tables.openFile(self.eigenstate_file)
	    try:
		for m in m_list:
		    m_group = name_gen.m_name(m)
		    for q in q_list:
			q_group = name_gen.q_name(q)
			for i in range(self.config.nu_max + 1):
			    if eval("f.root.%s.%s.E[%i,%i]"%(m_group, q_group, 
				i, R_index)) > E_lim:
				break
			    else:
				#Collect indices of the basis functions.
				index_array.append(r_[m, q, i])
	    finally:
		f.close()
	    
	    #Cast index list as an array.
	    index_array = array(index_array)
	    
	    #Number of eigenstates in the basis.
	    basis_size = len(index_array)
	    print basis_size, "is the basis size"
	    basis_size_buffer[0] = basis_size

	    f = tables.openFile(self.coupling_file, 'w')
	    try:
		f.createArray("/", "R_grid", R_grid)
		
		#Saving the index array.
		f.createArray("/", "index_array", index_array)
		
		#Initializing the arrays for the couplings and energies.
		f.createCArray('/', 'E', 
		    tables.atom.FloatAtom(), 
		    (basis_size, nr_tasks),
		    chunkshape=(basis_size, 1))
		
		f.createCArray('/', 'couplings', 
		    tables.atom.ComplexAtom(16), 
		    (basis_size, basis_size, nr_tasks),
		    chunkshape=(basis_size, basis_size, 1))
		
	    finally:
		f.close()
	    
	    #Save config instance.
	    self.config.save_config(self.coupling_file)
	#----------------------------------


	#Calculating the dipole couplings
	#--------------------------------
	#Broadcasting the basis size from processor 0.
	pypar.broadcast(basis_size_buffer, 0)

	#Initializing the index array.
	if my_id != 0:
	    index_array = zeros([basis_size_buffer[0], 3], dtype=int)
	
	#Broadcasting the index array from proc. 0.
	pypar.broadcast(index_array, 0)


	#Looping over the tasks of this processor.
	for i in my_tasks:

	    #Calculate the dipole couplings for one value of R.
	    couplings, E = self.calculate_dipole_eig_R(index_array, R_grid[i])


	    #First file write. (Send, but not receive baton.)
	    if starter == my_id:
		#Write to file.
		self.save_dipole_eig_R(couplings, E, R_grid[i])
		
		#Avoiding this statement 2nd time around.
		starter = -1

		#Sending the baton to the next writer.
		pypar.send(baton, send_to, use_buffer = True)

	    
	    #Last file write. (Receive, but not send baton.)
	    elif i == my_tasks[-1] and ender == my_id :
		#Receiving the baton from the previous writer.
		pypar.receive(receive_from, buffer = baton)

		#Write to file.
		self.save_dipole_eig_R(couplings, E, R_grid[i])
	    
	    #The rest of the file writes.
	    else:
		#Receiving the baton from the previous writer.
		pypar.receive(receive_from, buffer = baton)

		#Write to file.
		self.save_dipole_eig_R(couplings, E, R_grid[i])

		#Sending the baton to the next writer.
		pypar.send(baton, send_to, use_buffer = True)
	    
	    
	    #Showing the progress of the work.
	    if my_id == 0:
		nice_stuff.status_bar("Electronic dipole couplings:", 
		    i, len(my_tasks))
	#----------------------------
	
	#Letting everyone catch up. 
	pypar.barrier()
def save_all_eigenstates(filename_el, nr_kept, xmin, xmax, xsize, order):
    """
    save_all_eigenstates(filename_el, nr_kept, xmin, xmax, xsize, order)

    This program solves the vibrational TISE for a set of energy curves, 
    and stores them in an HDF5 file. 
    This program must be run in parallel.
    
    Example
    -------
    To run this program on 5 processors:
    
    $ mpirun -n 5 python -c "execfile('vibrational_BO.py');save_eigenstates()"
    """

    #Retrieve the electronic energy curves.
    f = tables.openFile(filename_el)
    try:
	r_grid = f.root.R_grid[:]
	energy_curves = f.root.E[:]

    finally:
	f.close()
    
    #Initialize the B-spline_basis.
    spline_basis = vibrational_methods.Bspline_basis(xmin, xmax, xsize, order)
    spline_basis.setup_kinetic_hamiltonian()
    spline_basis.setup_overlap_matrix()
    
    #Generate a filename.
    filename = name_gen.vibrational_eigenstates(filename_el, spline_basis)
    
    #Parallel stuff
    #--------------
    #Get processor 'name'.
    my_id = pypar.rank() 
    
    #Get total number of processors.
    nr_procs = pypar.size()
    
    #Get number of tasks.
    nr_tasks = len(energy_curves)

    #Get a list of the indices of this processors share of R_grid. 
    my_tasks = nice_stuff.distribute_work(nr_procs, nr_tasks, my_id)

    #The processors will be writing to the same file.
    #In order to avoid problems, the procs will do a relay race of writing to
    #file. This is handeled by blocking send() and receive().
    #Hopefully there will not be to much waiting.

    #ID of the processor that will start writing.
    starter = 0

    #ID of the processor that will be the last to write.
    ender = (nr_tasks - 1) % nr_procs

    #Buffer for the baton, i.e. the permission slip for file writing.
    baton = r_[0]

    #The processor one is to receive the baton from.
    receive_from = (my_id - 1) % nr_procs 

    #The processor one is to send the baton to.
    send_to = (my_id + 1) % nr_procs 
    #-------------------------------

    
    #Initializing the HDF5 file
    #--------------------------
    if my_id == 0:

	f = tables.openFile(filename, 'w')
	try:
	    f.createArray("/", "electronicFilename", [filename_el])	    
	    
	    f.createArray("/", "R_grid", r_grid)	    
	    
	    f.createArray("/", "overlap", spline_basis.overlap_matrix)	    
	    
	    #Initializing the arrays for the eigenvalues and states.
	    f.createCArray('/','E', 
		tables.atom.FloatAtom(), 
		(nr_kept, nr_tasks),
		chunkshape=(nr_kept, 1))
	    
	    f.createCArray('/','V', 
		tables.atom.FloatAtom(), 
		(spline_basis.nr_splines, nr_kept, nr_tasks),
		chunkshape=(spline_basis.nr_splines, nr_kept, 1))
	    
	    f.createCArray('/','hamiltonian', 
		tables.atom.FloatAtom(), 
		(spline_basis.nr_splines, spline_basis.nr_splines, nr_tasks),
		chunkshape=(spline_basis.nr_splines, spline_basis.nr_splines, 
		1))
	    

	    
	finally:
	    f.close()
	
	#Save spline info.
	spline_basis.bsplines.save_spline_info(filename)
    #----------------------------------

    #Solving the TISE
    #----------------
    #Looping over the tasks of this processor.
    for i in my_tasks:

	
	#TODO REMOVE?
	#remove_spikes removes points where the diagonalization has failed.
	#potential_hamiltonian = spline_basis.setup_potential_matrix(
	#    r_grid, remove_spikes(energy_curves[i,:]) + 1/r_grid)
	####

	#Setup potential matrix. 
	potential_hamiltonian = spline_basis.setup_potential_matrix(
	    r_grid, energy_curves[i,:] + 1/r_grid)
		
	#The total hamiltonian.
	hamiltonian_matrix = (spline_basis.kinetic_hamiltonian + 
	    potential_hamiltonian)

	#Diagonalizing the hamiltonian.
	E, V = spline_basis.solve(hamiltonian_matrix, nr_kept)
	
	#First file write. (Send, but not receive baton.)
	if starter == my_id:
	    #Write to file.
	    spline_basis.save_eigenstates(filename, E, V, 
		hamiltonian_matrix, i)

	    #Avoiding this statement 2nd time around.
	    starter = -1

	    #Sending the baton to the next writer.
	    pypar.send(baton, send_to, use_buffer = True)
	
	#Last file write. (Receive, but not send baton.)
	elif i == my_tasks[-1] and ender == my_id :
	    #Receiving the baton from the previous writer.
	    pypar.receive(receive_from, buffer = baton)

	    #Write to file.
	    spline_basis.save_eigenstates(filename, E, V, 
		hamiltonian_matrix, i)
	
	#The rest of the file writes.
	else:
	    #Receiving the baton from the previous writer.
	    pypar.receive(receive_from, buffer = baton)

	    #Write to file.
	    spline_basis.save_eigenstates(filename, E, V,
		hamiltonian_matrix, i)

	    #Sending the baton to the next writer.
	    pypar.send(baton, send_to, use_buffer = True)
	
	
	#Showing the progress of the work.
	if my_id == 0:
	    nice_stuff.status_bar("Vibrational BO calculations", 
		i, len(my_tasks))
    #----------------------------

    #Letting everyone catch up. 
    pypar.barrier()
def save_eigenfunction_couplings(filename_el, nr_kept, xmin, xmax, xsize, order):
    """
    save_eigenfunction_couplings(filename_el, nr_kept, xmin, xmax, xsize, order)

    This program sets up the laser interaction hamiltonian for the 
    eigenfunction basis, and stores it in an HDF5 file. 
    This program must be run in parallel.
    
    Example
    -------
    To run this program on 5 processors:
    
    $ mpirun -n 5 python -c "execfile('vibrational_BO.py');save_eigenstates()"
    """
    
    #Retrieve the electronic energy curves.
    f = tables.openFile(filename_el)
    try:
	r_grid = f.root.R_grid[:]
	#Get number of tasks.
	el_basis_size = f.root.couplings.shape[0]
    finally:
	f.close()
    
    #Filter function, describing what index pairs should be included in the 
    #calculations.
    def no_filter(index_pair):
	"""
	All couplings included.
	"""
	return True
    
    def symmetry_filter(index_pair):
	"""
	Only include the upper/lower triangular, since the hermeticity means
	that they are the same.
	"""
	i = index_pair[0]
	j = index_pair[1]
	if i >= j:
	    return True
	else:
	    return False    
    
    #Make a list of the coupling indices that should be included.
    index_table = create_index_table(el_basis_size, no_filter)
    nr_tasks = len(index_table)

    #Initialize the B-spline_basis.
    spline_basis = vibrational_methods.Bspline_basis(xmin, xmax, xsize, order)
    vib_basis_size = spline_basis.nr_splines
    
    #Generate a filename.
    filename = name_gen.eigenfunction_couplings(filename_el, spline_basis)

    #Name of vib states.
    filename_vib = name_gen.vibrational_eigenstates(filename_el, spline_basis)
    
    #Parallel stuff
    #--------------
    #Get processor 'name'.
    my_id = pypar.rank() 
    
    #Get total number of processors.
    nr_procs = pypar.size()
    
    #Get a list of the indices of this processors share of R_grid. 
    my_tasks = nice_stuff.distribute_work(nr_procs, nr_tasks, my_id)

    #The processors will be writing to the same file.
    #In order to avoid problems, the procs will do a relay race of writing to
    #file. This is handeled by blocking send() and receive().
    #Hopefully there will not be to much waiting.

    #ID of the processor that will start writing.
    starter = 0

    #ID of the processor that will be the last to write.
    ender = (nr_tasks - 1) % nr_procs

    #Buffer for the baton, i.e. the permission slip for file writing.
    baton = r_[0]

    #The processor one is to receive the baton from.
    receive_from = (my_id - 1) % nr_procs 

    #The processor one is to send the baton to.
    send_to = (my_id + 1) % nr_procs 
    #-------------------------------

    
    #Initializing the HDF5 file
    #--------------------------
    if my_id == 0:

	f = tables.openFile(filename, 'w')
	g = tables.openFile(filename_vib)
	try:
	    f.createArray("/", "electronicFilename", [filename_el])	    
	    
	    #Initializing the arrays for the time dependent couplings of H.
	    f.createCArray('/','couplings', 
		tables.atom.FloatAtom(), 
		(nr_kept * el_basis_size, 
		 nr_kept * el_basis_size),
		chunkshape=(nr_kept,nr_kept))
	    
	    #Energy diagonal. Time independent part of H. 
	    energy_diagonal = zeros(nr_kept * el_basis_size)
	    for i in range(el_basis_size):
		energy_diagonal[nr_kept * i:
		    nr_kept * (i + 1)] = g.root.E[:nr_kept,i]

	    f.createArray("/", "energyDiagonal", energy_diagonal)
	    
	finally:
	    f.close()
	    g.close()
	
	#Save spline info.
	spline_basis.bsplines.save_spline_info(filename)
    #----------------------------------


    #Setting up the hamiltonian
    #--------------------------
    #Looping over the tasks of this processor.
    for i in my_tasks:
	#Retrieve indices.
	row_index, column_index = index_table[i]

	#Retrieve electronic couplings.
	f = tables.openFile(filename_el)
	try:
	    el_coupling = f.root.couplings[row_index, column_index,:]
	finally:
	    f.close()
	
#	#TODO REMOVE?
#	#Remove errors from the coupling. (A hack, unfortunately.) 
#	r_grid_2, el_coupling_2 = remove_spikes(r_grid, el_coupling)
#
#	#Setup potential matrix.
#	couplings = spline_basis.setup_potential_matrix(
#	    r_grid_2, el_coupling_2)
#	
	#Setup potential matrix. Aij = <Bi | f(R) | Bj>
	bfb_matrix = spline_basis.setup_potential_matrix(
	    r_grid, el_coupling)
	
	couplings = zeros([nr_kept, nr_kept])
	
	#Retrieve eigensvectors.
	g = tables.openFile(filename_vib)
	try:
	    Vr = g.root.V[:,:,row_index] 
	    Vc = g.root.V[:,:,column_index]
	finally:
	    g.close()
	
	#Calculate couplings.
	for r_index in range(nr_kept):
	    for c_index in range(nr_kept):
		couplings[r_index, c_index] = dot(Vr[:,r_index], 
		    dot(Vc[:,c_index]))
	
	

	#First file write. (Send, but not receive baton.)
	if starter == my_id:
	    #Write to file.
	    spline_basis.save_couplings(filename, couplings, 
		row_index, column_index)

	    #Avoiding this statement 2nd time around.
	    starter = -1

	    #Sending the baton to the next writer.
	    pypar.send(baton, send_to, use_buffer = True)
	
	#Last file write. (Receive, but not send baton.)
	elif i == my_tasks[-1] and ender == my_id :
	    #Receiving the baton from the previous writer.
	    pypar.receive(receive_from, buffer = baton)

	    #Write to file.
	    spline_basis.save_couplings(filename, couplings, 
		row_index, column_index)
	
	#The rest of the file writes.
	else:
	    #Receiving the baton from the previous writer.
	    pypar.receive(receive_from, buffer = baton)

	    #Write to file.
	    spline_basis.save_couplings(filename, couplings, 
		row_index, column_index)

	    #Sending the baton to the next writer.
	    pypar.send(baton, send_to, use_buffer = True)
	
	
	#Showing the progress of the work.
	if my_id == 0:
	    nice_stuff.status_bar("Calculating couplings:", 
		i, len(my_tasks))
    #----------------------------

    #Letting everyone catch up. 
    pypar.barrier()
def save_electronic_eigenstates(m_max, nu_max, mu_max, R_grid, beta, theta):
    """
    save_electronic_eigenstates(m_max, nu_max, mu_max, R_grid, beta, theta)

    This program solves the electronic TISE for a range of internuclear 
    distances, given in <R_grid>, and stores them in an HDF5 file. 
    This program must be run in parallel.
    
    Example
    -------
    To run this program on 5 processors:
    
	$  mpirun -n 5 python electronic_BO.py 
    """

    #Parallel stuff
    #--------------
    #Get processor 'name'.
    my_id = pypar.rank() 
    
    #Get total number of processors.
    nr_procs = pypar.size()
    
    #Get number of tasks.
    nr_tasks = len(R_grid)

    #Get a list of the indices of this processors share of R_grid. 
    my_tasks = nice_stuff.distribute_work(nr_procs, nr_tasks, my_id)

    #The processors will be writing to the same file.
    #In order to avoid problems, the procs will do a relay race of writing to
    #file. This is handeled by blocking send() and receive().
    #Hopefully there will not be to much waiting.

    #ID of the processor that will start writing.
    starter = 0

    #ID of the processor that will be the last to write.
    ender = (nr_tasks - 1) % nr_procs

    #Buffer for the baton, i.e. the permission slip for file writing.
    baton = r_[0]

    #The processor one is to receive the baton from.
    receive_from = (my_id - 1) % nr_procs 

    #The processor one is to send the baton to.
    send_to = (my_id + 1) % nr_procs 
    #-------------------------------

    
    #Initializing the HDF5 file
    #--------------------------
    if my_id == 0:
	#Creates a config instance.
	my_config = config.Config(m = m_max, nu = nu_max, mu = mu_max, 
	    R = R_grid[0], beta = beta, theta = theta)
	
	#Number of basis functions.
	basis_size = (2 * m_max + 1) * (nu_max + 1) * (mu_max + 1)

	#Generate a filename.
	filename = name_gen.electronic_eigenstates_R(my_config)

	f = tables.openFile(filename, 'w')
	try:
	    f.createArray("/", "R_grid", R_grid)	    
	    
	    #Looping over the m values.
	    for m in range(-1 * m_max, m_max + 1):
		#Creating an m group in the file.
		m_group = name_gen.m_name(m)
		f.createGroup("/", m_group)
		
		#Looping over th q values.
		for q in range(mu_max + 1):
		    #Creating a q group in the m group in the file.
		    q_group = name_gen.q_name(q)
		    f.createGroup("/%s/"%m_group, q_group)

		    #Initializing the arrays for the eigenvalues and states.
		    f.createCArray('/%s/%s/'%(m_group, q_group),'E', 
			tables.atom.FloatAtom(), 
			(basis_size/(mu_max + 1), nr_tasks),
			chunkshape=(basis_size/(mu_max + 1), 1))
		    
		    f.createCArray('/%s/%s/'%(m_group, q_group),'V', 
			tables.atom.ComplexAtom(16), 
			(basis_size, basis_size/(mu_max + 1), nr_tasks),
			chunkshape=(basis_size, basis_size/(mu_max + 1), 1))
	    
	finally:
	    f.close()
	
	#Save config instance.
	my_config.save_config(filename)
    #----------------------------------


    #Solving the TISE
    #----------------
    #Looping over the tasks of this processor.
    for i in my_tasks:
	#Creating TISE instance.
	tise = tise_electron.TISE_electron(m = m_max, nu = nu_max, 
	    mu = mu_max, R = R_grid[i], beta = beta, theta = theta)
	
	#Diagonalizing the hamiltonian.
	E,V = tise.solve()
	
	#First file write. (Send, but not receive baton.)
	if starter == my_id:
	    #Write to file.
	    tise.save_eigenfunctions_R(E, V, R_grid[i])

	    #Avoiding this statement 2nd time around.
	    starter = -1

	    #Sending the baton to the next writer.
	    pypar.send(baton, send_to, use_buffer = True)
	
	#Last file write. (Receive, but not send baton.)
	elif i == my_tasks[-1] and ender == my_id :
	    #Receiving the baton from the previous writer.
	    pypar.receive(receive_from, buffer = baton)

	    #Write to file.
	    tise.save_eigenfunctions_R(E, V, R_grid[i])
	
	#The rest of the file writes.
	else:
	    #Receiving the baton from the previous writer.
	    pypar.receive(receive_from, buffer = baton)

	    #Write to file.
	    tise.save_eigenfunctions_R(E, V, R_grid[i])

	    #Sending the baton to the next writer.
	    pypar.send(baton, send_to, use_buffer = True)
	
	
	#Showing the progress of the work.
	if my_id == 0:
	    nice_stuff.status_bar("Electronic BO calculations", 
		i, len(my_tasks))
    #----------------------------
    
    #Letting everyone catch up. 
    pypar.barrier()

    #Since the sign of the eigenfunctions are completely arbitrary, one must
    #make sure they do not change sign from one R to another.
    if my_id == 0:
	tise.align_all_phases()
    
    #Letting 0 catch up. 
    pypar.barrier()