def cube_generator_dftbplus(project_name, time_step, min_band, max_band,
                            waveplot_exe, isUKS):
    """
    This function generates the cube files by first forming the 'fchk' file from 'chk' file.
    Then it will generate the cube files from min_band to max_band the same as CP2K naming.
    This will helps us to use the read_cube and integrate_cube functions easier.

    Args:

        project_name (string): The project_name.

        time_step (integer): The time step.

        min_band (integer): The minimum state number.

        max_band (integer): The maximum state number.

        waveplot_exe (str): Location of the executable for the waveplot program of the dftb+ software package

        isUKS (integer): The unrestricted spin calculation flag. 1 is for spin unrestricted calculations.
                         Other numbers are for spin restricted calculations

    Returns:

        None

    """

    # Make the cubefiles. For dftb+, this simply means executing the waveplot program
    # The min and max band are already defind in the waveplot template. So, we just use
    # them here for renaming the cubefiles
    os.system(waveplot_exe)

    # For now, only spin-restricted
    for state in range(min_band, max_band + 1):
        # Use cp2k names because the rest of the code expects this format
        state_name = CP2K_methods.state_num_cp2k(state)
        cube_name = '%s-%d-WFN_%s_1-1_0.cube' % (project_name, time_step,
                                                 state_name)
        print('Renaming cube for state %d' % state)
        # Now, rename the cubefile from what waveplots calls it to the standard format
        os.system("mv *" + str(state) + "-real.cube " + cube_name)
Ejemplo n.º 2
0
def cube_generator_gaussian( project_name, time_step, min_band, max_band, nprocs, sample_cube_file, isUKS ):
    """
    This function generates the cube files by first forming the 'fchk' file from 'chk' file.
    Then it will generate the cube files from min_band to max_band the same as CP2K naming.
    This will helps us to use the read_cube and integrate_cube functions easier.

    Args:

        project_name (string): The project_name.

        time_step (integer): The time step.

        min_band (integer): The minimum state number.

        max_band (integer): The maximum state number.

        nprocs (integer): The number of processors used to generate the cubes using 'cubegen'.

        sample_cube_file (str): The path to a sample cube file. This file is used to generate the cube
                                files according the mesh of this file. Therefore the integration will be plausible.

        isUKS (integer): The unrestricted spin calculation flag. 1 is for spin unrestricted calculations.
                         Other numbers are for spin restricted calculations

    Returns:

        None

    """

    # Form the fchk file from the chk file
    os.system('formchk %s-%d.chk %s-%d.fchk'%(project_name, time_step, project_name, time_step))
    # Print the commands...
    print('formchk %s-%d.chk %s-%d.fchk'%(project_name, time_step, project_name, time_step))
    # Generate the sample cube file, if it exists it will not create it again
    if not os.path.isfile(sample_cube_file):
        # Generate the sample cube file with the maximum fineness ---> 100 as in Gaussian website
        os.system('cubegen %d MO=H**o %s-%d.fchk %s 100 h'%(nprocs, project_name, time_step, sample_cube_file))
    # For spin unrestricted
    if isUKS == 1:
        # Generate the names and cube files for each state. Here we use the cube names by CP2K. This will increase the speed of our work.
        for state in range(min_band,max_band+1):
            # State name in CP2K format
            state_name = CP2K_methods.state_num_cp2k(state)
            # Cube file name 
            cube_name = '%s-%d-WFN_%s_1-1_0.cube'%(project_name, time_step, state_name)
            print('Generating cube for state %d'%state)
            # Generate cube files for alpha spin using the 'cubegen'
            os.system('cubegen %d AMO=%d %s-%d.fchk %s -1 h %s'%(nprocs, state, project_name, time_step, cube_name, sample_cube_file))
            print('cubegen %d AMO=%d %s-%d.fchk %s -1 h %s'%(nprocs, state, project_name, time_step, cube_name, sample_cube_file))
            cube_name = '%s-%d-WFN_%s_2-1_0.cube'%(project_name, time_step, state_name)
            print('Generating cube for state %d'%state)
            # Generate cube files for beta spin using the 'cubegen'
            os.system('cubegen %d BMO=%d %s-%d.fchk %s -1 h %s'%(nprocs, state, project_name, time_step, cube_name, sample_cube_file))
            print('cubegen %d BMO=%d %s-%d.fchk %s -1 h %s'%(nprocs, state, project_name, time_step, cube_name, sample_cube_file))
    # Spin restricted case
    else:
        for state in range(min_band,max_band+1):
            # Use cp2k names because the rest of the code expects this format
            state_name = CP2K_methods.state_num_cp2k(state)
            cube_name = '%s-%d-WFN_%s_1-1_0.cube'%(project_name, time_step, state_name)
            print('Generating cube for state %d'%state)
            # Generate the cubes for alpha spin only
            os.system('cubegen %d MO=%d %s-%d.fchk %s -1 h %s'%(nprocs, state, project_name, time_step, cube_name, sample_cube_file))
            print('cubegen %d MO=%d %s-%d.fchk %s -1 h %s'%(nprocs, state, project_name, time_step, cube_name, sample_cube_file))
Ejemplo n.º 3
0
def gaussian_distribute( istep, fstep, nsteps_this_job, trajectory_xyz_file, gaussian_input, curr_job_number ):
    """
    Distributes Gaussian jobs for trivial parallelization 

    Make sure that your Gaussian input file has absolute paths to the following input parameters:
        This parameters should be set in the input template
        chk file ---> e.g.  %chk=/home/username/gaussian_calculations/check_file.chk
        rwf file ---> e.g.  %rwf=/home/username/gaussian_calculations/check_file.rwf

    Args:
        
        istep (integer): The initial time step in the trajectory xyz file.

        fstep (integer): The final time step in the trajectory xyz file.

        nsteps_this_job (integer): The number of steps for this job.

        trajectory_xyz_file (string): The full path to trajectory xyz file.

        gaussian_input (string): The sample Gaussian input template.

        curr_job_number (integer): The current job number.

    Returns:

        None

    """

    # Now we need to distribute the jobs into job batches
    # First, make a working directory where the calculations will take place
    os.chdir("wd")

    nsteps = fstep - istep + 1
    njobs  = int( nsteps / nsteps_this_job ) 

    # Initialize the curr_step to istep
    curr_step = istep

    # Make the job directory folders
    os.system("mkdir job"+str(curr_job_number)+"")
    os.chdir("job"+str(curr_job_number)+"")
    # Copy the trajectory file and input template there
    os.system("cp ../../"+trajectory_xyz_file+" .")
    os.system("cp ../../"+gaussian_input+" .")

    # Now, we need to edit the submit file
    # Now, in jobs folder njob, we should do only a certain number of steps
    for step in range( nsteps_this_job ):

        # Extract the coordinates and write them to a xyz file
        CP2K_methods.read_trajectory_xyz_file( trajectory_xyz_file, curr_step )

        # Now, we need to edit the gaussian_input file by adding the 
        # coordinates to the input file
        tmp = open(gaussian_input)
        A   = tmp.readlines()
        sz  = len(A)
        tmp.close()
        tmp2 = open("step_%d"%curr_step+".gjf","w"); tmp2.close()
        for i in range(sz):

            b = A[i].strip().split()

            if not b:
                continue

            tmp2 = open("step_%d"%curr_step+".gjf","a")
            tmp2.write(A[i])
            tmp2.close()
        # add to the curr_step
        curr_step += 1

    # Go back to the main directory
    os.chdir("../../") 
Ejemplo n.º 4
0
import os
import sys
from libra_py import CP2K_methods

run_slurm = False
submit_template = 'submit_template.slm'
run_python_file = 'run_template.py'
istep = 0
fstep = 5
njobs = 1
os.system('rm -rf res job* all_logfiles all_pdosfiles')

print('Distributing jobs...')
CP2K_methods.distribute_cp2k_xtb_jobs(submit_template, run_python_file, istep,
                                      fstep, njobs, run_slurm)
Ejemplo n.º 5
0
#es_software = "gaussian"
istep = 0
fstep = 1  #3000
njobs = 1  #600
for njob in range(njobs):

    job_init_step, job_final_step = step2_many_body.curr_and_final_step_job(
        istep, fstep, njobs, njob)
    nsteps_this_job = job_final_step - job_init_step + 1

    print('\n1- We are before creating job ', njob, ' in directory:',
          os.getcwd())
    # Here we create the jobs through auxiliary_functions.cp2k_distribute
    if es_software.lower() == "cp2k":
        CP2K_methods.cp2k_distribute(job_init_step, job_final_step,
                                     nsteps_this_job, trajectory_xyz_file,
                                     es_software_input_template, njob)

    elif es_software.lower() == "gaussian":
        Gaussian_methods.gaussian_distribute(job_init_step, job_final_step,
                                             nsteps_this_job,
                                             trajectory_xyz_file,
                                             es_software_input_template, njob)

    print('2- Finished distributing for job ', njob,
          '\nNow we are in directory', os.getcwd())
    os.chdir("wd/job" + str(njob) + "/")

    print('3- Now we have changed directory to :', os.getcwd(),
          'To submit job', njob)
    os.mkdir("cubefiles")
params['highest_orbital'] = 28+20
# extended tight-binding calculation type
params['isxTB'] = False
# DFT calculation type
params['isUKS'] = False
# Periodic calculations flag
params['is_periodic'] = False
# Set the cell parameters for periodic calculations
if params['is_periodic']:
    params['A_cell_vector'] = [50.0,0.0,0.0]
    params['B_cell_vector'] = [0.0,50.0,0.0]
    params['C_cell_vector'] = [0.0,0.0,50.0]
    params['periodicity_type'] = 'XYZ'
    # Set the origin
    origin = [0,0,0]
    tr_vecs = params['translational_vectors'] = CP2K_methods.generate_translational_vectors(origin, [1,1,1],
                                                                                            params['periodicity_type'])
    
    print('The translational vectors for the current periodic system are:\n')
    print(tr_vecs)
    print(F'Will compute the S^AO between R(0,0,0) and {tr_vecs.shape[0]+1} translational vectors')

# The AO overlaps in spherical or Cartesian coordinates
params['is_spherical'] =  True
# Remove the molden files, which are large files for some systems, 
# after the computaion is done for tha system
params['remove_molden'] = True
# Cube visualization using VMD
params['cube_visualization'] = True
if params['cube_visualization']:
    # The only parts that we will change in this template are loading the cubes and rendering the images.
    params['vmd_input_template'] = '../vmd_cube_template.tcl'
Ejemplo n.º 7
0
def run_cp2k_xtb_step2(params):
    """
    This function runs the step2 for computing the MO overlaps for xTB calculations and saves them as sparse 
    matrices using scipy.sparse library. In order to load the saved sparse matrices you need to use
    scipy.sparse.load_npz command.

    Args:

        params (dictionary): A dictionary containing the following parameters:

                             res_dir (string): The directory for saving the MO overlaps.

                             all_logfiles (string): The directory to save all log files.

                             all_pdosfiles (string): The directory to save all pdos files.

                             istep (integer): The initial step.

                             fstep (integer): The final step.

                             init_state (integer): The initial state.

                             final_state (integer): The final state.

                             is_spherical (bool): Flag for spherical coordinates.

                             remove_molden (bool): Flag for removing the molden files after computing the MO overlaps.

                             nprocs (integer): Number of processors.

                             cp2k_ot_input_template (string): The CP2K OT input template file name.

                             cp2k_diag_input_template (string): The CP2K diagonalization input template file name.

                             trajectory_xyz_filename (string): The trajectory xyz file name.

                             step (integer): The time step.

                             cp2k_exe (string): The full path to CP2K executable.

    """
    # Making the required directories for gatherig all the information needed
    # including the overlap data and pdos files. The logfiles do not contain
    # specific data but we finally move them to all_logfiles
    if not os.path.exists(params['res_dir']):
        os.mkdir(params['res_dir'])
    if not os.path.exists(params['all_logfiles']):
        os.mkdir(params['all_logfiles'])
    if not os.path.exists(params['all_pdosfiles']):
        os.mkdir(params['all_pdosfiles'])
    # setting up the initial step and final step
    istep = params['istep']
    fstep = params['fstep']
    # spherical or cartesian GTOs
    is_spherical = params['is_spherical']
    # number of processors
    nprocs = params['nprocs']
    # the counter for a job steps, this counter is needed for not reading
    # the data of a molden file twice
    counter = 0
    print('-----------------------Start-----------------------')
    for step in range(istep, fstep):
        # a timer for all the procedure
        t1_all = time.time()
        print('-----------------------Step %d-----------------------' % step)
        params['step'] = step
        # timer for CP2K
        t1 = time.time()
        CP2K_methods.run_cp2k_xtb(params)
        print('Done with step', step, 'Elapsed time:', time.time() - t1)
        # now if the counter is equal to zero
        # just compute the MO overlap of that step.
        if counter == 0:
            t1 = time.time()
            print('Creating shell...')
            shell_1, l_vals = molden_methods.molden_file_to_libint_shell('Diag_%d-libra-1_0.molden'%step,\
                                                                          is_spherical)
            print('Done with creating shell. Elapsed time:', time.time() - t1)
            t1 = time.time()
            print('Reading energies and eigenvectors....')
            eig_vect_1, energies_1 = molden_methods.eigenvectors_molden('Diag_%d-libra-1_0.molden'%step,\
                                                                        nbasis(shell_1),l_vals)
            print('Done with reading energies and eigenvectors. Elapsed time:',
                  time.time() - t1)
            if params['remove_molden']:
                os.system('rm Diag_%d-libra-1_0.molden' % step)
            print('Computing atomic orbital overlap matrix...')
            t1 = time.time()
            AO_S = compute_overlaps(shell_1, shell_1, nprocs)
            print('Done with computing atomic orbital overlaps. Elapsed time:',
                  time.time() - t1)
            t1 = time.time()
            print('Turning the MATRIX to numpy array...')
            AO_S = data_conv.MATRIX2nparray(AO_S)
            print('Done with transforming MATRIX 2 numpy array. Elapsed time:',
                  time.time() - t1)
            istate = params['init_state']
            fstate = params['final_state']
            ## Now, we need to resort the eigenvectors based on the new indices
            print('Resorting eigenvectors elements...')
            t1 = time.time()
            new_indices = CP2K_methods.resort_molog_eigenvectors(l_vals)
            eigenvectors_1 = []
            for j in range(len(eig_vect_1)):
                # the new and sorted eigenvector
                eigenvector_1 = eig_vect_1[j]
                eigenvector_1 = eigenvector_1[new_indices]
                # append it to the eigenvectors list
                eigenvectors_1.append(eigenvector_1)
            eigenvectors_1 = np.array(eigenvectors_1)
            print('Done with resorting eigenvectors elements. Elapsed time:',
                  time.time() - t1)
            ##
            t1 = time.time()
            print('Computing and saving molecular orbital overlaps...')
            # Note that we choose the data from istate to fstate
            # the values for istate and fstate start from 1
            S = np.linalg.multi_dot([eigenvectors_1, AO_S,
                                     eigenvectors_1.T])[istate - 1:fstate - 1,
                                                        istate - 1:fstate - 1]
            # creating zero matrix
            mat_block_size = len(S)
            zero_mat = np.zeros((mat_block_size, mat_block_size))
            S_step = data_conv.form_block_matrix(S, zero_mat, zero_mat, S)
            # Since a lot of the data are zeros we save them as sparse matrices
            S_step_sparse = scipy.sparse.csc_matrix(S_step)
            E_step = data_conv.form_block_matrix(np.diag(energies_1)[istate-1:fstate-1,istate-1:fstate-1],\
                                                 zero_mat,zero_mat,\
                                                 np.diag(energies_1)[istate-1:fstate-1,istate-1:fstate-1])
            E_step_sparse = scipy.sparse.csc_matrix(E_step)
            scipy.sparse.save_npz(params['res_dir'] + '/S_ks_%d.npz' % step,
                                  S_step_sparse)
            scipy.sparse.save_npz(params['res_dir'] + '/E_ks_%d.npz' % step,
                                  E_step_sparse)
            print(
                'Done with computing molecular orbital overlaps. Elapsed time:',
                time.time() - t1)
            print('Done with step %d.' % step, 'Elapsed time:',
                  time.time() - t1_all)
        else:
            # The same procedure as above but now we compute time-overlaps as well
            t1 = time.time()
            print('Creating shell...')
            shell_2, l_vals = molden_methods.molden_file_to_libint_shell('Diag_%d-libra-1_0.molden'%step,\
                                                                          is_spherical)
            print('Done with creating shell. Elapsed time:', time.time() - t1)
            t1 = time.time()
            print('Reading energies and eigenvectors....')
            eig_vect_2, energies_2 = molden_methods.eigenvectors_molden('Diag_%d-libra-1_0.molden'%step,\
                                                                        nbasis(shell_2),l_vals)
            print('Done with reading energies and eigenvectors. Elapsed time:',
                  time.time() - t1)
            if params['remove_molden']:
                os.system('rm Diag_%d-libra-1_0.molden' % step)
            print('Computing atomic orbital overlap matrix...')
            t1 = time.time()
            AO_S = compute_overlaps(shell_2, shell_2, nprocs)
            AO_St = compute_overlaps(shell_1, shell_2, nprocs)
            print('Done with computing atomic orbital overlaps. Elapsed time:',
                  time.time() - t1)
            t1 = time.time()
            print('Turning the MATRIX to numpy array...')
            AO_S = data_conv.MATRIX2nparray(AO_S)
            AO_St = data_conv.MATRIX2nparray(AO_St)
            print('Done with transforming MATRIX 2 numpy array. Elapsed time:',
                  time.time() - t1)
            ## Now, we need to resort the eigenvectors based on the new indices
            print('Resorting eigenvectors elements...')
            t1 = time.time()
            new_indices = CP2K_methods.resort_molog_eigenvectors(l_vals)
            eigenvectors_2 = []
            for j in range(len(eig_vect_2)):
                # the new and sorted eigenvector
                eigenvector_2 = eig_vect_2[j]
                eigenvector_2 = eigenvector_2[new_indices]
                # append it to the eigenvectors list
                eigenvectors_2.append(eigenvector_2)
            eigenvectors_2 = np.array(eigenvectors_2)
            print('Done with resorting eigenvectors elements. Elapsed time:',
                  time.time() - t1)
            ##
            t1 = time.time()
            print('Computing and saving molecular orbital overlaps...')
            S = np.linalg.multi_dot([eigenvectors_2, AO_S,
                                     eigenvectors_2.T])[istate - 1:fstate - 1,
                                                        istate - 1:fstate - 1]
            St = np.linalg.multi_dot([eigenvectors_1, AO_S,
                                      eigenvectors_2.T])[istate - 1:fstate - 1,
                                                         istate - 1:fstate - 1]
            S_step = data_conv.form_block_matrix(S, zero_mat, zero_mat, S)
            St_step = data_conv.form_block_matrix(St, zero_mat, zero_mat, St)
            S_step_sparse = scipy.sparse.csc_matrix(S_step)
            St_step_sparse = scipy.sparse.csc_matrix(St_step)
            E_step = data_conv.form_block_matrix(np.diag(energies_2)[istate-1:fstate-1,istate-1:fstate-1],\
                                                 zero_mat,zero_mat,\
                                                 np.diag(energies_2)[istate-1:fstate-1,istate-1:fstate-1])
            E_step_sparse = scipy.sparse.csc_matrix(E_step)
            scipy.sparse.save_npz(params['res_dir'] + '/S_ks_%d.npz' % step,
                                  S_step_sparse)
            scipy.sparse.save_npz(
                params['res_dir'] + '/St_ks_%d.npz' % (step - 1),
                St_step_sparse)
            scipy.sparse.save_npz(params['res_dir'] + '/E_ks_%d.npz' % step,
                                  E_step_sparse)
            print(
                'Done with computing molecular orbital overlaps. Elapsed time:',
                time.time() - t1)
            shell_1 = shell_2
            energies_1 = energies_2
            eigenvectors_1 = eigenvectors_2
            print('Removing unnecessary wfn files...')
            os.system('rm OT_%d-RESTART*' % (step - 1))
            os.system('rm Diag_%d-RESTART*' % (step - 1))
            print('Done with step %d.' % step, 'Elapsed time:',
                  time.time() - t1_all)
        counter += 1
    # Finally move all the pdos and log files to all_pdosfiles and all_logfiles
    os.system('mv *pdos %s/.' % params['all_pdosfiles'])
    os.system('mv *log %s/.' % params['all_logfiles'])
    print('Done with the job!!!')
Ejemplo n.º 8
0
from libra_py import data_conv

# number of processors
nprocs = 4
# set up the timer
t1 = time.time()
# creating the shell for the molden file
shells_1, l_vals = molden_methods.molden_file_to_libint_shell('CdSe13-CdSe13-1_0.molden',True)
# extracting the eigenvectors and energies from molden file
eig1, ener1 = molden_methods.eigenvectors_molden('CdSe13-CdSe13-1_0.molden',nbasis(shells_1),l_vals)
# compute the AO overlap matrix
AO = compute_overlaps(shells_1,shells_1,nprocs)
# turn it into a numpy array
AO = data_conv.MATRIX2nparray(AO)
# new indices for the the MOLog eigenvectors 
new_indices = CP2K_methods.resort_molog_eigenvectors(l_vals)
# making all the reindexed eigenvectors
eigenvectors = []
for i in range(len(eig1)):
    # the new and sorted eigenvector
    eigenvector = eig1[i]
    eigenvector = eigenvector[new_indices]
    # append it to the eigenvectors list
    eigenvectors.append(eigenvector)
# make it a numpy array to be able to work with    
eigs = np.array(eigenvectors)
# compute the MO overlap
S = np.linalg.multi_dot([eigs, AO ,eigs.T])
# print out the diagonal element of the MO overlap matrix
# to make sure you get 1 on the diagonal
print(np.diag(S))
Ejemplo n.º 9
0
###############################################################
"""
  2. Extract CI-like coefficients
"""

if thermal == True:
    logfiles = glob.glob('all_logfiles/*.log')

elif thermal == False:
    logfiles = glob.glob('../tddft/*.log')

ci_coeffs = []
for logfile in logfiles:
    params.update({"logfile_name": logfile})
    excitation_energies, ci_basis_raw, ci_coefficients_raw_unnorm, spin_components = CP2K_methods.read_cp2k_tddfpt_log_file( params ) 
    ci_coefficients_raw_norm = step2_many_body.normalize_ci_coefficients(ci_coefficients_raw_unnorm)
    for j in range(len(ci_coefficients_raw_norm)):
        for k in range(len(ci_coefficients_raw_norm[j])):
            ci_coefficients_raw_norm[j][k] = ci_coefficients_raw_norm[j][k]**2
    ci_coeffs.append(ci_coefficients_raw_norm)






###############################################################
"""
  3. Post process the Ci-like coefficients, plot
"""
Ejemplo n.º 10
0
def plot_cubes(params):
    """
    This function plots the cubes for selected energy levels using VMD.
    
    Args:
    
        params (dict):
    
            min_band (int): The minimum state number.
                                        
            states_to_be_plotted (list): The list containing the Kohn-Sham orbitals to be plotted by VMD. This list is defined in the submit file.
            
            path_to_tcl_file (str): The path to the tcl file which contains the input for plotting the cubes in VMD.
            
            MO_images_directory (str): The molecular orbitals images directory.
    
            isUKS (int): This parameter is set for spin restricted and unrestricted calculations. When it is
                         set to 1 it means that unrestricted calculations were set in the input file otherwise 
                         it is restricted.
                         
            curr_step (int): The current time step used to save the images of the MOs.
            
            phase_factor_visual (numpy array): The phase correction factor list for each MOs for the current step.
        
    Returns:
    
        None
        
    """

    # Critical parameters
    critical_params = [
        "min_band", "states_to_be_plotted", "path_to_tcl_file",
        "MO_images_directory", "curr_step", "phase_factor_visual"
    ]
    # Default parameters
    default_params = {"isUKS": 0}
    # Check input
    comn.check_input(params, default_params, critical_params)

    # Unpack the Kohn-Sham orbital indicies and the Kohn-Sham orbitals to be plotted. Also unpack the
    # path to the directory where the molecular orbitals will be plotted
    states_to_be_plotted = params["states_to_be_plotted"]
    # For VMD
    path_to_tcl_file = params["path_to_tcl_file"]
    # The molecular orbital images directory
    MO_images_directory = params["MO_images_directory"]

    # isUKS flag for spin-polarized and spin-unpolarized
    isUKS = int(params["isUKS"])
    # The current step
    curr_step = int(params["curr_step"])
    # If the path does not exist create it.
    if not os.path.isdir(MO_images_directory):
        os.makedirs(MO_images_directory)

    # Extracting the phase factor from params
    phase_factor_visual = params["phase_factor_visual"]

    min_band = params["min_band"]
    # We plot the cubes for the previous time step
    cubefile_names_prev = CP2K_methods.cube_file_names_cp2k(params)
    # read the lines of the tcl file
    tcl_file = open(path_to_tcl_file, 'r')
    tcl_lines = tcl_file.readlines()
    tcl_file.close()

    if isUKS == 1:
        # The cube file names of the alpha spin is the even indices of the cubefile_names_prev
        alp_cubefile_names_prev = cubefile_names_prev[0::2]
        # The same but for the phase factor of the alpha spin
        phase_factor_alpha = phase_factor_visual[0::2]
        # The cube file names of the beta spin is the odd indices of the cubefile_names_prev
        bet_cubefile_names_prev = cubefile_names_prev[1::2]
        # The same but for the phase factor of the beta spin
        phase_factor_beta = phase_factor_visual[1::2]

        for state_to_be_plotted in states_to_be_plotted:
            # Subtracting the min_band to obtain the index of the cube file for the state to be plotted for alpha spin
            alpha_cube_name = alp_cubefile_names_prev[state_to_be_plotted -
                                                      min_band]
            # Subtracting the min_band to obtain the index of the cube file for the state to be plotted for beta spin
            beta_cube_name = bet_cubefile_names_prev[state_to_be_plotted -
                                                     min_band]
            # open a new tcl file for alpha cubes
            new_file_alpha = open("vmd_alpha_cube_plot_%d.tcl" % curr_step,
                                  'w')
            # open a new tcl file for beta cubes
            new_file_beta = open("vmd_beta_cube_plot_%d.tcl" % curr_step, 'w')

            for j in range(len(tcl_lines)):
                if 'mol load cube' in tcl_lines[j]:
                    # Open the cube file in VMD for alpha cubes
                    new_file_alpha.write('mol load cube %s\n' %
                                         alpha_cube_name)
                    # Open the cube file in VMD for beta cubes
                    new_file_beta.write('mol load cube %s\n' % beta_cube_name)
                elif 'render TachyonInternal' in tcl_lines[j]:
                    # Render the images to the MO_images_directory alpha cubes
                    new_file_alpha.write(
                        'render TachyonInternal %s/%s.tga\n' %
                        (MO_images_directory,
                         alpha_cube_name.replace('cubefiles/', '').replace(
                             '.cube', '')))
                    # Render the images to the MO_images_directory beta cubes
                    new_file_beta.write(
                        'render TachyonInternal %s/%s.tga\n' %
                        (MO_images_directory,
                         beta_cube_name.replace('cubefiles/', '').replace(
                             '.cube', '')))
                elif 'isosurface' in tcl_lines[j].lower():
                    # Correct the isovalues by multiplying it by the phase factor fo alpha orbitals
                    tmp_elements_alpha = tcl_lines[j].split()
                    tmp_elements_alpha[5] = str(
                        phase_factor_alpha[state_to_be_plotted - min_band] *
                        float(tmp_elements_alpha[5]))
                    isosurface_line_alpha = ' '.join(tmp_elements_alpha)
                    new_file_alpha.write(isosurface_line_alpha + '\n')

                    # Correct the isovalues by multiplying it by the phase factor for beta orbitals
                    tmp_elements_beta = tcl_lines[j].split()
                    tmp_elements_beta[5] = str(
                        phase_factor_beta[state_to_be_plotted - min_band] *
                        float(tmp_elements_beta[5]))
                    isosurface_line_beta = ' '.join(tmp_elements_beta)
                    new_file_beta.write(isosurface_line_beta + '\n')

                else:
                    # The rest of the tcl file lines
                    new_file_alpha.write(tcl_lines[j])
                    new_file_beta.write(tcl_lines[j])

            new_file_alpha.close()
            new_file_beta.close()
            # Run the VMD by tcl file
            os.system('vmd < vmd_alpha_cube_plot_%d.tcl' % curr_step)
            os.system('vmd < vmd_beta_cube_plot_%d.tcl' % curr_step)
            #os.system('rm vmd_alpha_cube_plot_%d.tcl' % curr_step)
            #os.system('rm vmd_beta_cube_plot_%d.tcl' % curr_step)

    else:
        # The same as above but with restricted spin calculations. No beta orbitals is considered.
        for state_to_be_plotted in states_to_be_plotted:
            cube_name = cubefile_names_prev[state_to_be_plotted - min_band]

            new_file = open("vmd_cube_plot_%d.tcl" % curr_step, 'w')
            for j in range(len(tcl_lines)):
                if 'mol load cube' in tcl_lines[j]:
                    new_file.write('mol load cube %s\n' % cube_name)
                elif 'render TachyonInternal' in tcl_lines[j]:
                    new_file.write(
                        'render TachyonInternal %s/%s.tga\n' %
                        (MO_images_directory,
                         cube_name.replace('cubefiles/', '').replace(
                             '.cube', '')))
                elif 'isosurface' in tcl_lines[j].lower():
                    tmp_elements = tcl_lines[j].split()
                    tmp_elements[5] = str(
                        phase_factor_visual[state_to_be_plotted - min_band] *
                        float(tmp_elements[5]))
                    isosurface_line = ' '.join(tmp_elements)
                    new_file.write(isosurface_line + '\n')
                else:
                    new_file.write(tcl_lines[j])

            new_file.close()

            os.system('vmd < vmd_cube_plot_%d.tcl' % curr_step)
Ejemplo n.º 11
0
import os
import sys
from libra_py import CP2K_methods


run_slurm = True
submit_template = 'submit_template.slm'
run_python_file = 'run_template.py'
istep = 1200
fstep = 1402
njobs = 30
submission_exe = 'sbatch'
# Removing the previous folders if existed. You can keep them as well 
# but Libra will overwrite some of the data if their names are the same
os.system('rm -rf res job* all_*')

print('Distributing jobs...')
CP2K_methods.distribute_cp2k_libint_jobs(submit_template, run_python_file, istep, fstep, njobs, run_slurm, submission_exe)

Ejemplo n.º 12
0
def run_step2_jobs(params):
    """ Prepares and runs job folders needed for running step2 

    Args:
        params (dict): The parameters dictionary used for containing 
            the variables needed to make the submit.slm template 

    Returns:
        None: but creates the wd (working diretory) that stores the 
            job folders and the files needed to run each job, and runs
            those jobs.    
    """

    logger.debug("Entered into the function initialize_step2_jobs")

    critical_params = []
    default_params = {
        "trajectory_xyz_filename": "md.xyz",
        "es_software": "cp2k",
        "es_software_input_template": "cp2k_input_template",
        "istep": 0,
        "fstep": 1,
        "njobs": 1,
        "waveplot_input_template": "waveplot_in.hsd"
    }
    comn.check_input(params, default_params, critical_params)
    logger.debug("Checked params in the function initialize_step2_jobs")

    # Consider using shutils module here instead of os.system()
    os.system("rm -r wd")
    os.mkdir('wd')
    os.system("rm -r res")
    os.mkdir('res')

    # Get the neededvariables from the params dictionary
    trajectory_xyz_filename = params["trajectory_xyz_filename"]
    es_software = params["es_software"]
    es_software_input_template = params["es_software_input_template"]
    init_md_step = params["istep"]
    final_md_step = params["fstep"]
    njobs = params["njobs"]

    if es_software == "dftb+":
        waveplot_input_template = params["waveplot_input_template"]

    # Initialize the jobs
    for njob in range(njobs):

        logger.debug(f"Within the loops over njobs, initializing job: {njob}")

        job_init_step, job_final_step = step2_many_body.curr_and_final_step_job(
            init_md_step, final_md_step, njobs, njob)
        nsteps_this_job = job_final_step - job_init_step + 1
        logger.debug(
            f"The initial step for the job {njob} is: {job_init_step} with final step: {job_final_step}"
        )
        logger.debug(f"nsteps_this_job is: {nsteps_this_job}")

        logger.debug(f"Creating job: {njob} in directory: {os.getcwd()}")
        if es_software.lower() == "cp2k":
            logger.debug("es_software.lower() == cp2k")
            CP2K_methods.cp2k_distribute(job_init_step, job_final_step,
                                         nsteps_this_job,
                                         trajectory_xyz_filename,
                                         es_software_input_template, njob)

        elif es_software.lower() == "gaussian":
            logger.debug("es_software.lower() == gaussian")
            Gaussian_methods.gaussian_distribute(job_init_step, job_final_step,
                                                 nsteps_this_job,
                                                 trajectory_xyz_filename,
                                                 es_software_input_template,
                                                 njob)

        elif es_software.lower() == "dftb+":
            logger.debug("es_software.lower() == dftb+")
            DFTB_methods.dftb_distribute(job_init_step, job_final_step,
                                         nsteps_this_job,
                                         trajectory_xyz_filename,
                                         es_software_input_template,
                                         waveplot_input_template, njob)

        logger.debug(
            f"Finished distributing for job: {njob}. We are currently in directory: {os.getcwd()}"
        )
        os.chdir("wd/job" + str(njob) + "/")
        logger.debug(f"Changed directory to: {os.getcwd()}")

        logger.debug(
            "Assigning values to the variables job_init_step, nsteps_this_job, and njob in submit_template.slm"
        )

        os.system("cp ../../submit_template.slm submit_" + str(njob) + ".slm")
        # Now, open the submit_template file in this job folder
        # Add the values to the params for this job
        f = open("submit_" + str(njob) + ".slm")
        submit_template_file = f.readlines()
        submit_template_file_size = len(submit_template_file)
        f.close()

        f = open("submit_" + str(njob) + ".slm", 'w')
        f.close()
        for i in range(submit_template_file_size):

            f = open("submit_" + str(njob) + ".slm", 'a')

            submit_template_file_line = submit_template_file[i].split()
            if not submit_template_file_line:
                continue

            elif submit_template_file_line[0] == "job_init_step=":
                f.write("declare -i job_init_step=%i" % (job_init_step))
                f.write("\n")

            elif submit_template_file_line[0] == "nsteps_this_job=":
                f.write("declare -i nsteps_this_job=%i" % (nsteps_this_job))
                f.write("\n")

            elif submit_template_file_line[0] == "njob=":
                f.write("declare -i njob=%i" % (njob))
                f.write("\n")

            else:
                f.write(submit_template_file[i])

        logger.debug(f"Updated submit_template.slm")

        os.system("sbatch submit_" + str(njob) + ".slm")
        #os.system("sh submit_"+str(njob)+".slm")
        logger.debug(f"Submitting the job in folder: {os.getcwd()}")

        # Change directory back to the top directory
        os.chdir("../../")
        logger.debug(
            f"Finished submitting job. We are now back in directory: {os.getcwd()}"
        )
import numpy as np
import time
from liblibra_core import *
from libra_py import CP2K_methods
from libra_py import data_conv

# number of processors
nprocs = 4
# setting up a timer
t1 = time.time()
# extract the coordinates from the trajectory file with the time step
time_step = 1
coord = CP2K_methods.extract_coordinates('cdse13-1.xyz', time_step)
# the full path to different BASIS set files
# since it is in this folder we just put the name
basis_set_files_path = ['BASIS_MOLOPT']
# the unique atoms present in the trajectory
unique_atoms = ['Cd', 'Se']
# the unique atoms respective basis set names
basis_set_names = ['DZVP-MOLOPT-SR-GTH', 'DZVP-MOLOPT-SR-GTH']
# the basis set data for the  unique atoms
data = CP2K_methods.find_basis_set(basis_set_files_path, unique_atoms,
                                   basis_set_names)
# create the libint2 shell based on the coordinates and the basis set data
# in spherical coordinates (the spherical flag is set to True)
shell_1 = CP2K_methods.make_shell(coord, data, True)
# You can print the number of basis in the shell to make sure
# it is the same as in the MOLog files
print('The number of atomic orbital basis set is:', nbasis(shell_1))
# compute the AO overlap matrix
AO = compute_overlaps(shell_1, shell_1, nprocs)
Ejemplo n.º 14
0
def dftb_distribute(istep, fstep, nsteps_this_job, trajectory_xyz_file,
                    dftb_input, waveplot_input, curr_job_number):
    """
    Distributes dftb jobs for trivial parallelization 

    Make sure that your dftb input file has absolute paths to the following input parameters:

        SlaterKosterFiles = Type2FileNames {
          Prefix = "/panasas/scratch/grp-alexeyak/brendan/dftbp_development/"
          Separator = "-"
          Suffix = ".skf"
        }

    Args:
        
        istep (integer): The initial time step in the trajectory xyz file.

        fstep (integer): The final time step in the trajectory xyz file.

        nsteps_this_job (integer): The number of steps for this job.

        trajectory_xyz_file (string): The full path to trajectory xyz file.

        dftb_input (string): the dftb_input_template.hsd file

        waveplot_input (string): the input file for the waveplot subpackage of dftbplus for generating cubefiles

        curr_job_number (integer): The current job number.

    Returns:

        None

    """

    # Now we need to distribute the jobs into job batches
    # First, make a working directory where the calculations will take place
    os.chdir("wd")

    nsteps = fstep - istep + 1
    njobs = int(nsteps / nsteps_this_job)

    # Initialize the curr_step to istep
    curr_step = istep

    # Make the job directory folders
    os.system("mkdir job" + str(curr_job_number) + "")
    os.chdir("job" + str(curr_job_number) + "")
    # Copy the trajectory file and input template there
    os.system("cp ../../" + trajectory_xyz_file + " .")
    os.system("cp ../../" + dftb_input + " .")
    os.system("cp ../../" + waveplot_input + " .")

    # Now, we need to edit the submit file
    # Now, in jobs folder njob, we should do only a certain number of steps
    for step in range(nsteps_this_job):

        # extract the curr_step xyz coordianates from the trajectory file and write it to another xyz file
        CP2K_methods.read_trajectory_xyz_file(trajectory_xyz_file, curr_step)
        curr_step += 1

    # Go back to the main directory
    os.chdir("../../")