示例#1
0
def model2a(Hdia, Sdia, d1ham_dia, dc1_dia, q, params):
    """

    Same as ::funct:```model2``` just different interface
    

              k*x^2         V
    Hdia =       V        k*(x-x0)^2 + D

    Sdia =  I

    Ddia != 0.0, but Ddia + Ddia.H() = dSdia/dR, with dSdia = 0.0

    Args: 
        Hdia ( CMATRIX(2,2) ): diabatic Hamiltonian - updated by this function
        Sdia ( CMATRIX(2,2) ): overlap of the basis (diabatic) states - updated by this function [ identity ] 
        d1ham_dia ( list of 1 CMATRIX(2,2) objects ): derivatives of the diabatic Hamiltonian w.r.t. 
            the nuclear coordinate - updated by this function
        dc1_dia ( list of 1 CMATRIX(2,2) objects ): derivative coupling in the diabatic basis - updated 
            by this function [ zero ]
        q ( double ): coordinates of the particle, ndof = 1
        params ( dictionary ): model parameters

            * **params["x0"]** ( double ): displacement of the minimum of one of the diabatic states
                [ default: 1.0, units: Bohr ]
            * **params["k"]** ( double ): force constante [ default: 0.01, units: Ha/Bohr^2]
            * **params["D"]** ( double ): gap between the minima of the states 1 and 0, negative 
                value means the state 1 is lower in energy than state 0  [ default: 0.0, units: Ha]
            * **params["V"]** ( double ): electronic coupling between these diabats [ default: 0.005, units: Ha]
            * **params["NAC"]** ( double ): NAC in the diabatic basis  [ default: -0.1, units: Ha]

    Returns:       
        None


    """

    critical_params = [ ] 
    default_params = {"x0":1.0, "k":0.01, "D":0.0, "V":0.005, "NAC":-0.1 }
    comn.check_input(params, default_params, critical_params)
    x0,k,D,V, nac = params["x0"], params["k"], params["D"], params["V"], params["NAC"]

    x = q

    Sdia.set(0,0, 1.0+0.0j);  Sdia.set(0,1, 0.0+0.0j);
    Sdia.set(1,0, 0.0+0.0j);  Sdia.set(1,1, 1.0+0.0j);

    Hdia.set(0,0, k*x*x*(1.0+0.0j) );   Hdia.set(0,1, V*(1.0+0.0j));
    Hdia.set(1,0, V*(1.0+0.0j));        Hdia.set(1,1, (k*(x-x0)**2 + D)*(1.0+0.0j));


    for i in [0]:
        #  d Hdia / dR_0
        d1ham_dia[i].set(0,0, 2.0*k*x*(1.0+0.0j) );   d1ham_dia[i].set(0,1, 0.0+0.0j);
        d1ham_dia[i].set(1,0, 0.0+0.0j);   d1ham_dia[i].set(1,1,2.0*k*(x-x0)*(1.0+0.0j));

        #  <dia| d/dR_0| dia >
        dc1_dia[i].set(0,0, 0.0+0.0j);          dc1_dia[i].set(0,1, nac*(1.0+0.0j));
        dc1_dia[i].set(1,0, nac*(-1.0+0.0j));   dc1_dia[i].set(1,1, 0.0+0.0j);
示例#2
0
def get_data_sets(params):
    """Reads several sets of data files 

    Args:
        params ( dictionary ): parameters controlling the function execution [Required!]

            Required parameter keys:

            * **params["data_set_paths"]** ( list of strings ):
                define the paths of the directories where the data files for
                different data sets (e.g. independent MD trajectories) are located. 
            .. note::
                In addition, requires parameters described in
                :func:`get_data`

    Returns:
        list of lists of CMATRIX: data: 
            the time series of Hvib matrices for several data sets, such that
            data[idata][time] is a CMATRIX for the data set indexed by `idata`
            at time `time`


    Example:
        The full name of the vibronic Hamiltonian files read by this module should be:
    
        params["data_set_paths"][idata]+params["data_re_prefix"]+integer(time step)+params["data_re_suffix"] - for real part

        params["data_set_paths"][idata]+params["data_im_prefix"]+integer(time step)+params["data_im_suffix"] - for imaginary part

        Say, the directory "/home/alexeyak/test/step3/res0" contains files:
        Hvib_0_re, Hvib_1_re, .... ,    Hvib_999_re
        Hvib_0_im, Hvib_1_im, .... ,    Hvib_999_im

        Then set:

        >>> params["data_set_paths"] = ["/home/alexeyak/test/step3/res0/"]
        >>> params["data_re_prefix"] = "Hvib_"
        >>> params["data_re_suffix"] = "_re"
        >>> params["data_im_prefix"] = "Hvib_"
        >>> params["data_im_suffix"] = "_im"

    """

    critical_params = ["data_set_paths"]
    default_params = {}
    comn.check_input(params, default_params, critical_params)

    data = []

    for idata in params[
            "data_set_paths"]:  # over all MD trajectories (data sets)
        prms = dict(params)
        prms.update({"data_re_prefix": idata + params["data_re_prefix"]})
        prms.update({"data_im_prefix": idata + params["data_im_prefix"]})

        data_i = get_data(prms)
        data.append(data_i)

    return data
示例#3
0
def Tully1(q, params):
    """
   
    The implementation that calls the C++ implementation of Tully model I = Simple Avoided Crossing (SAC):

    H_00 = A*(1.0-exp(-B*x)) x>0,  
         = A*(exp(B*x)-1.0 ) x<0
    H_11 = -H_00
    H_01 = C*exp(-D*x^2)

    Args: 
        q ( MATRIX(1,1) ): coordinates of the particle, ndof = 1
        params ( dictionary ): model parameters

            * **params["A"]** ( double ):  [ default: 0.010, units: Ha]
            * **params["B"]** ( double ):  [ default: 1.600, units: Bohr^-1]
            * **params["C"]** ( double ):  [ default: 0.005, units: Ha]
            * **params["D"]** ( double ):  [ default: 1.000, units: Bohr^-2]

    Returns:       
        PyObject: obj, with the members:

            * obj.ham_dia ( CMATRIX(2,2) ): diabatic Hamiltonian 
            * obj.ovlp_dia ( CMATRIX(2,2) ): overlap of the basis (diabatic) states [ identity ]
            * obj.d1ham_dia ( list of 1 CMATRIX(2,2) objects ): 
                derivatives of the diabatic Hamiltonian w.r.t. the nuclear coordinate
            * obj.dc1_dia ( list of 1 CMATRIX(2,2) objects ): derivative coupling in the diabatic basis [ zero ]
 
    """

    critical_params = []
    default_params = {"A": 0.010, "B": 1.600, "C": 0.005, "D": 1.000}
    comn.check_input(params, default_params, critical_params)

    A = params["A"]
    B = params["B"]
    C = params["C"]
    D = params["D"]

    obj = tmp()
    obj.ham_dia = CMATRIX(2, 2)
    obj.ovlp_dia = CMATRIX(2, 2)
    obj.d1ham_dia = CMATRIXList()
    obj.d1ham_dia.append(CMATRIX(2, 2))
    obj.dc1_dia = CMATRIXList()
    obj.dc1_dia.append(CMATRIX(2, 2))

    # Convert MATRIX to doubleList()
    qq = doubleList()
    qq.append(q.get(0))
    prms = doubleList()  # will use the default values
    prms.append(A)
    prms.append(B)
    prms.append(C)
    prms.append(D)

    model_SAC(obj.ham_dia, obj.ovlp_dia, obj.d1ham_dia, obj.dc1_dia, qq, prms)

    return obj
示例#4
0
def Tully3(q, params):
    """
   
    The implementation that calls the C++ implementation of Tully model III =
    Extended Coupling With Reflection  (ECWR):

    H_00 = A
    H_11 = -H_00
    H_01 = B*exp(C*x);          x <= 0
           B*(2.0 - exp(-C*x)); x > 0

    Args: 
        q ( MATRIX(1,1) ): coordinates of the particle, ndof = 1
        params ( dictionary ): model parameters

            * **params["A"]** ( double ):  [ default: 0.0006, units: Ha ]
            * **params["B"]** ( double ):  [ default: 0.1000, units: Ha ]
            * **params["C"]** ( double ):  [ default: 0.9000, units: Bohr^-1 ]

    Returns:       
        PyObject: obj, with the members:

            * obj.ham_dia ( CMATRIX(2,2) ): diabatic Hamiltonian 
            * obj.ovlp_dia ( CMATRIX(2,2) ): overlap of the basis (diabatic) states [ identity ]
            * obj.d1ham_dia ( list of 1 CMATRIX(2,2) objects ): 
                derivatives of the diabatic Hamiltonian w.r.t. the nuclear coordinate
            * obj.dc1_dia ( list of 1 CMATRIX(2,2) objects ): derivative coupling in the diabatic basis [ zero ]
 
 
    """

    critical_params = []
    default_params = {"A": 0.0006, "B": 0.1000, "C": 0.9000}
    comn.check_input(params, default_params, critical_params)

    A = params["A"]
    B = params["B"]
    C = params["C"]

    obj = tmp()
    obj.ham_dia = CMATRIX(2, 2)
    obj.ovlp_dia = CMATRIX(2, 2)
    obj.d1ham_dia = CMATRIXList()
    obj.d1ham_dia.append(CMATRIX(2, 2))
    obj.dc1_dia = CMATRIXList()
    obj.dc1_dia.append(CMATRIX(2, 2))

    # Convert MATRIX to doubleList()
    qq = doubleList()
    qq.append(q.get(0))
    prms = doubleList()  # will use the default values
    prms.append(A)
    prms.append(B)
    prms.append(C)

    model_ECWR(obj.ham_dia, obj.ovlp_dia, obj.d1ham_dia, obj.dc1_dia, qq, prms)

    return obj
示例#5
0
def get_data(params):
    """Read a single set of data files 

    Args:
        params ( dictionary ): parameters controlling the function execution

            Required parameter keys:

            * **params["data_dim"]** ( int ): matrix dimension how many lines/columns in the file [Required!]
            * **params["active_space"]** ( list of ints ): the indices of the states we care 
                about. These indices will be used to determine the size of the created CMATRIX objects
                and only these states will be extracted from the original files [ default: range(data_dim) ]
            * **params["isnap"]** ( int ): index of the first file to read [Required!]
            * **params["fsnap"]** ( int ): index of the final file to read [Required!]
            * **params["data_re_prefix"]** ( string ): prefixes of the files with real part of the data [Required!]
            * **params["data_im_prefix"]** ( string ): prefixes of the files with imaginary part of the data [Required!]
            * **params["data_re_suffix"]** ( string ): suffixes of the files with real part of the Hvib(t) [default: "_re"]
            * **params["data_im_suffix"]** ( string ): suffixes of the files with imaginary part of the Hvib(t) [default: "_im"]
            * **params["get_real"]** ( int ): whether we want to read the real component [ default: 1 - Yes ]
            * **params["get_imag"]** ( int ): whether we want to read the imaginary component [ default: 1 - Yes ]
            

    Returns:
        list of CMATRIX objects: data: 
            a time series of data matrices, such that data[time] is a data at time step `time`

    Example:
        This example will read 10 pairs of files: "Hvib_0_re", "Hvib_0_im", "Hvib_1_re", "Hvib_1_im", ...
        "Hvib_9_re", "Hvib_9_im". Each file should contain a 4 x 4 matrix of numbers. It will generate a 
        list of 4 x 4 complex-valued matrices.

        >>> hvib = get_data({"data_dim":4, "isnap":0, "fsnap":10, "data_re_prefix":"Hvib", "data_im_prefix":"Hvib"})

        The following example will do the same as the example above, however the intially-read 4 x 4 matrices will
        be partially discarded. Out of 16 values only 4 (the upper left block of 4 numbers)  will be stored in 
        the resulting list of 2 x 2 complex-valued matrices. 

        >>> hvib = get_data({"data_dim":4, "isnap":0, "fsnap":10, "data_re_prefix":"Hvib", "data_im_prefix":"Hvib", "active_space":[0,1]})


    """

    critical_params = ["data_dim", "isnap", "fsnap", "data_re_prefix", "data_im_prefix"]
    default_params = { "data_re_suffix":"_re", "data_im_suffix":"_im", "active_space":range(params["data_dim"]), "get_real":1, "get_imag":1}
    comn.check_input(params, default_params, critical_params)

    ndim = params["data_dim"]  # the number of cols/row in the input files

    data = []
    for i in range(params["isnap"],params["fsnap"]):

        filename_re = params["data_re_prefix"]+str(i)+params["data_re_suffix"]
        filename_im = params["data_im_prefix"]+str(i)+params["data_im_suffix"]
        data_i = get_matrix(ndim, ndim, filename_re, filename_im, params["active_space"], params["get_real"], params["get_imag"]) 
        data.append(data_i)

    return data
示例#6
0
def probabilities_1D_scattering(q, states, nst, params):
    """Computes the scattering probabilities in 1D

    Args:
        _q ( MATRIX(nnucl, ntraj) ): coordinates of the "classical" particles [units: Bohr]
        states ( intList, or list of ntraj ints ): the quantum state of each trajectory
        nst ( int ): the number of possible quantum states in the problem
        params ( dictionary ): parameters of the simulation, should contain
 
            * **params["act_dof"]** ( int ): index of the nuclear DOF that is considered active (scattering coord)
            * **params["left_boundary"] ( double ): the beginning of the reflected particles counter [units: Bohr]
            * **params["right_boundary"] ( double ): the beginning of the transmitted particles counter [units: Bohr]

    Returns:
        tuple: ( pop_refl, pop_transm ): where

            * pop_refl ( MATRIX(nst, 1) ): probabilities of reflection on each state
            * pop_transm ( MATRIX(nst, 1) ): probabilities of transmission on each state

    """

    critical_params = []
    default_params = {
        "act_dof": 0,
        "left_boundary": -10.0,
        "right_boundary": 10.0
    }
    comn.check_input(params, default_params, critical_params)

    act_dof = params["act_dof"]
    left_boundary = params["left_boundary"]
    right_boundary = params["right_boundary"]

    ntraj = len(states)

    pop_transm = MATRIX(nst, 1)  # transmitted
    pop_refl = MATRIX(nst, 1)  # reflected

    ntransm, nrefl = 0.0, 0.0
    for traj in range(0, ntraj):

        if q.get(act_dof, traj) < left_boundary:
            pop_refl.add(states[traj], 0, 1.0)
            nrefl += 1.0

        if q.get(act_dof, traj) > right_boundary:
            pop_transm.add(states[traj], 0, 1.0)
            ntransm += 1.0

    ntot = ntransm + nrefl
    if ntot > 0.0:
        pop_transm = pop_transm / ntot
        pop_refl = pop_refl / ntot

    return pop_refl, pop_transm
示例#7
0
def get_Hvib(params):
    """Read a single set of vibronic Hamiltonian files 

    Args:
        params ( dictionary ): parameters controlling the function execution

            Required parameter keys:

            * **params["nstates"]** ( int ): how many lines/columns in the file [Required!]
            * **params["nfiles"]** ( int ): how many files to read, starting from index 0 [Required!]
            * **params["Hvib_re_prefix"]** ( string ): prefixes of the files with real part of the Hvib(t) [Required!]
            * **params["Hvib_im_prefix"]** ( string ): prefixes of the files with imaginary part of the Hvib(t) [Required!]
            * **params["active_space"]** ( list of ints ): the indices of the states we care 
                about. These indices will be used to determine the size of the created CMATRIX objects
                and only these states will be extracted from the original files [ default: range(nstates) ]
            * **params["Hvib_re_suffix"]** ( string ): suffixes of the files with real part of the Hvib(t) [default: "_re"]
            * **params["Hvib_im_suffix"]** ( string ): suffixes of the files with imaginary part of the Hvib(t) [default: "_im"]

    Returns:
        list of CMATRIX objects: Hvib: 
            a time series of Hvib matrices, such that Hvib[time] is a Hvib at time step `time`

    Example:
        This example will read 10 pairs of files: "Hvib_0_re", "Hvib_0_im", "Hvib_1_re", "Hvib_1_im", ...
        "Hvib_9_re", "Hvib_9_im". Each file should contain a 4 x 4 matrix of numbers. It will generate a 
        list of 4 x 4 complex-valued matrices.

        >>> hvib = get_Hvib({"nstates":4, "nfiles":10, "Hvib_re_prefix":"Hvib", "Hvib_im_prefix":"Hvib"})


        The following example will do the same as the example above, however the intially-read 4 x 4 matrices will
        be partially discarded. Out of 16 values only 4 (the upper left block of 4 numbers)  will be stored in 
        the resulting list of 2 x 2 complex-valued matrices. 

        >>> hvib = get_Hvib({"nstates":4, "nfiles":10, "Hvib_re_prefix":"Hvib", "Hvib_im_prefix":"Hvib", "active_space":[0,1]})


    """

    critical_params = ["nstates", "nfiles", "Hvib_re_prefix", "Hvib_im_prefix"]
    default_params = { "Hvib_re_suffix":"_re", "Hvib_im_suffix":"_im", "active_space":range(params["nstates"])}
    comn.check_input(params, default_params, critical_params)

    nstates = params["nstates"]  # the number of states in the input files

    Hvib = []
    for i in range(0,params["nfiles"]):

        filename_re = params["Hvib_re_prefix"]+str(i)+params["Hvib_re_suffix"]
        filename_im = params["Hvib_im_prefix"]+str(i)+params["Hvib_im_suffix"]
        hvib = data_read.get_matrix(nstates, nstates, filename_re, filename_im, params["active_space"] ) 
        Hvib.append(hvib)

    return Hvib
示例#8
0
def model4a(Hdia, Sdia, d1ham_dia, dc1_dia, q, params):
    """

    Same as ::funct:```model4``` just different interface
    
              k*cos(w*x)         V
    Hdia =       V        k*sin(w*x) + D

    Sdia =  I

    Ddia  = 0.0

    Args: 
        Hdia ( CMATRIX(2,2) ): diabatic Hamiltonian - updated by this function
        Sdia ( CMATRIX(2,2) ): overlap of the basis (diabatic) states - updated by this function [ identity ] 
        d1ham_dia ( list of 1 CMATRIX(2,2) objects ): derivatives of the diabatic Hamiltonian w.r.t. 
            the nuclear coordinate - updated by this function
        dc1_dia ( list of 1 CMATRIX(2,2) objects ): derivative coupling in the diabatic basis - updated 
            by this function [ zero ]
        q ( double ): coordinates of the particle, ndof = 1
        params ( dictionary ): model parameters

            * **params["k"]** ( double ): force constante [ default: 0.01, units: Ha]
            * **params["w"]** ( double ): frequency  [ default: 0.1, units: Bohr^-1]
            * **params["D"]** ( double ): gap between the minima of the states 1 and 0, negative 
                value means the state 1 is lower in energy than state 0  [ default: 0.0, units: Ha]
            * **params["V"]** ( double ): electronic coupling between these diabats [ default: 0.005, units: Ha]

    Returns:       
        None

    """
    critical_params = [ ] 
    default_params = {"k":0.01, "D":0.0, "V":0.005, "w":0.1 }
    comn.check_input(params, default_params, critical_params)
    k,D,V,w = params["k"], params["D"], params["V"], params["w"]

    x = q

    Sdia.set(0,0, 1.0+0.0j);  Sdia.set(0,1, 0.0+0.0j);
    Sdia.set(1,0, 0.0+0.0j);  Sdia.set(1,1, 1.0+0.0j);

    Hdia.set(0,0, k*math.cos(x*w)*(1.0+0.0j) );   Hdia.set(0,1, V*(1.0+0.0j));
    Hdia.set(1,0, V*(1.0+0.0j));                  Hdia.set(1,1, k*math.sin(x*w)*(1.0+0.0j) + D);


    for i in [0]:
        #  d Hdia / dR_0
        d1ham_dia[i].set(0,0,-w*k*math.sin(x*w)*(1.0+0.0j) );   d1ham_dia[i].set(0,1, 0.0+0.0j);
        d1ham_dia[i].set(1,0, 0.0+0.0j);                        d1ham_dia[i].set(1,1, w*k*math.cos(x*w)*(1.0+0.0j));

        #  <dia| d/dR_0| dia >
        dc1_dia[i].set(0,0, 0.0+0.0j);   dc1_dia[i].set(0,1, 0.0+0.0j);
        dc1_dia[i].set(1,0, 0.0+0.0j);   dc1_dia[i].set(1,1, 0.0+0.0j);
示例#9
0
def exe_espresso(i, params={}):
    """

    This function runs necessary QE calculations for the input ```i``` as defined by the "params" dictionary

    Args:
        i ( int ): index of the input  file

            The input files expected should be called:
            x%i.scf_wrk.in  and x%i.exp.in

            The output files will be:
            x%i.scf.out  and x%i.exp.out
        
        params ( dictionary ): A dictionary containing important simulation parameters

            * **params["BATCH_SYSTEM"]** ( string ): the name of the job submission command
                use "srun" if run calculations on SLURM system or "mpirun" if run on PBS system
                [default: "srun"]
            * **params["NP"]** ( int ): the number of nodes on which execute calculations
                [default: 1]
            * **params["EXE"]** ( string ): the name of the program to be executed. This may be 
                the absolute path to the QE (pw.x) binary
            * **params["EXE_EXPORT"]** ( string ): the name of the program that converts the binary files
                with the QE wavefunctions to the text format (pw_export.x). The name includes the 
                absolute path to the binary

    """

    # Now try to get parameters from the input
    critical_params = ["EXE", "EXE_EXPORT"]
    default_params = {"BATCH_SYSTEM": None, "NP": 1}
    comn.check_input(params, default_params, critical_params)

    EXE = params["EXE"]
    EXE_EXPORT = params["EXE_EXPORT"]
    BATCH_SYSTEM = params["BATCH_SYSTEM"]
    NP = params["NP"]

    # Run calculations
    if BATCH_SYSTEM == None or BATCH_SYSTEM == "None":
        os.system("%s < x%i.scf_wrk.in > x%i.scf.out" % (EXE, i, i))
        os.system("%s < x%i.exp.in > x%i.exp.out    " % (EXE_EXPORT, i, i))
    else:
        os.system("%s -n %s %s < x%i.scf_wrk.in > x%i.scf.out" %
                  (BATCH_SYSTEM, NP, EXE, i, i))
        os.system("%s -n %s %s < x%i.exp.in > x%i.exp.out    " %
                  (BATCH_SYSTEM, NP, EXE_EXPORT, i, i))
示例#10
0
def plot_mem(res, _params, model_params, plot_params):
    """
    Args:
        res ( mem_saver ): the object with all the suitable parameters
        _params ( dict ): simulation control parameters
        model_params ( dict ): the parameters of the model we compute
        
    """

    colors = {}

    colors.update({"11": "#8b1a0e"})  # red
    colors.update({"12": "#FF4500"})  # orangered
    colors.update({"13": "#B22222"})  # firebrick
    colors.update({"14": "#DC143C"})  # crimson

    colors.update({"21": "#5e9c36"})  # green
    colors.update({"22": "#006400"})  # darkgreen
    colors.update({"23": "#228B22"})  # forestgreen
    colors.update({"24": "#808000"})  # olive

    colors.update({"31": "#8A2BE2"})  # blueviolet
    colors.update({"32": "#00008B"})  # darkblue

    colors.update({"41": "#2F4F4F"})  # darkslategray

    clrs_index = [
        "11", "21", "31", "41", "12", "22", "32", "13", "23", "14", "24"
    ]

    # Parameters and dimensions
    critical_params = []
    default_params = {"colors": colors, "clrs_index": clrs_index}
    comn.check_input(plot_params, default_params, critical_params)

    colors = plot_params["colors"]
    clrs_index = plot_params["clrs_index"]

    params = dict(_params)

    nsteps = params["nsteps"]
    nstates = len(model_params["E_n"])
    prefix = params["prefix"]
    properties_to_save = params["properties_to_save"]

    t = None
    if "time" in properties_to_save:
        #t = res.data["time"]
        t = list(res.np_data["time"][:])

    #=============== Populations ======================

    plt.figure(1, figsize=(36, 12))  # dpi=300, frameon=False)
    plt.subplot(1, 2, 1)
    plt.title('Adiabatic population dynamics')
    plt.xlabel('Time, a.u.')
    plt.ylabel('Population')

    if "pop_adi" in properties_to_save and t != None:
        for i in range(nstates):
            #Pi = data_conv.unpack1(res.data["pop_adi"], i, 0, 0)
            Pi = list(res.np_data["pop_adi"][:, i, 0])
            plt.plot(t,
                     Pi,
                     label='$P_%i$' % (i),
                     linewidth=10,
                     color=colors[clrs_index[i]])
            plt.legend()

    plt.subplot(1, 2, 2)
    plt.title('Diabatic population dynamics')
    plt.xlabel('Time, a.u.')
    plt.ylabel('Population')

    if "pop_dia" in properties_to_save and t != None:
        for i in range(nstates):
            #Pi = data_conv.unpack1(res.data["pop_dia"], i, 0, 0)
            Pi = list(res.np_data["pop_dia"][:, i, 0])
            plt.plot(t,
                     Pi,
                     label='$P_%i$' % (i),
                     linewidth=10,
                     color=colors[clrs_index[i]])
            plt.legend()

    plt.savefig("%s/Fig1.png" % (prefix), dpi=300)
    plt.savefig("%s/Fig1.pdf" % (prefix), dpi=300)

    #============= Energies =====================
    plt.figure(2, figsize=(36, 12))  # dpi=300, frameon=False)

    plt.subplot(1, 2, 1)
    plt.title('Energies')
    plt.xlabel('t, a.u.')
    plt.ylabel('Energy, a.u.')
    if "Ekin_dia" in properties_to_save \
       and "Epot_dia" in properties_to_save \
       and "Etot_dia" in properties_to_save \
       and t != None:

        #Ekin_dia = res.data["Ekin_dia"]
        #Epot_dia = res.data["Epot_dia"]
        #Etot_dia = res.data["Etot_dia"]
        Ekin_dia = list(res.np_data["Ekin_dia"][:])
        Epot_dia = list(res.np_data["Epot_dia"][:])
        Etot_dia = list(res.np_data["Etot_dia"][:])

        plt.plot(t,
                 Etot_dia,
                 label='$Etot_{dia}$',
                 linewidth=10,
                 color=colors["11"])
        plt.plot(t,
                 Ekin_dia,
                 label='$Ekin_{dia}$',
                 linewidth=10,
                 color=colors["21"])
        plt.plot(t,
                 Epot_dia,
                 label='$Epot_{dia}$',
                 linewidth=10,
                 color=colors["31"])
        plt.legend()

    plt.subplot(1, 2, 2)
    plt.title('Energies')
    plt.xlabel('t, a.u.')
    plt.ylabel('Energy, a.u.')

    if "Ekin_adi" in properties_to_save \
       and "Epot_adi" in properties_to_save \
       and "Etot_adi" in properties_to_save \
       and t != None:

        #Ekin_adi = res.data["Ekin_adi"]
        #Epot_adi = res.data["Epot_adi"]
        #Etot_adi = res.data["Etot_adi"]
        Ekin_adi = list(res.np_data["Ekin_adi"][:])
        Epot_adi = list(res.np_data["Epot_adi"][:])
        Etot_adi = list(res.np_data["Etot_adi"][:])

        plt.plot(t,
                 Etot_adi,
                 label='$Etot_{adi}$',
                 linewidth=10,
                 color=colors["11"])
        plt.plot(t,
                 Ekin_adi,
                 label='$Ekin_{adi}$',
                 linewidth=10,
                 color=colors["21"])
        plt.plot(t,
                 Epot_adi,
                 label='$Epot_{adi}$',
                 linewidth=10,
                 color=colors["31"])
        plt.legend()

    plt.savefig("%s/Fig2.png" % (prefix), dpi=300)
    plt.savefig("%s/Fig2.pdf" % (prefix), dpi=300)

    #============= Phase spaces & Norms  =====================
    plt.figure(3, figsize=(36, 12))  # dpi=300, frameon=False)

    plt.subplot(1, 2, 1)
    plt.title('Phase space')
    plt.xlabel('Coordinate, a.u.')
    plt.ylabel('Momentum, a.u.')

    if "q_dia" in properties_to_save and "p_dia" in properties_to_save:
        #ndof = res.data["q_dia"][0].num_of_rows
        ndof = res.np_data["q_dia"].shape[1]

        for idof in range(ndof):
            #qi = data_conv.unpack1(res.data["q_dia"], idof, 0, 0)
            #pi = data_conv.unpack1(res.data["p_dia"], idof, 0, 0)
            qi = list(res.np_data["q_dia"][:, idof, 0])
            pi = list(res.np_data["p_dia"][:, idof, 0])

            plt.plot(qi,
                     pi,
                     label='',
                     linewidth=10,
                     color=colors[clrs_index[i]])
            plt.legend()

    plt.subplot(1, 2, 2)
    plt.title('Norms')
    plt.xlabel('Time, a.u.')
    plt.ylabel('Norm')

    if "norm_dia" in properties_to_save and "norm_adi" in properties_to_save and t != None:
        #nrm_dia = res.data["norm_adi"]
        #nrm_adi = res.data["norm_dia"]
        nrm_dia = list(res.np_data["norm_dia"][:])
        nrm_adi = list(res.np_data["norm_adi"][:])

        plt.plot(t,
                 nrm_dia,
                 label='Diabatic',
                 linewidth=10,
                 color=colors["11"])
        plt.plot(t,
                 nrm_adi,
                 label='Adiabatic',
                 linewidth=10,
                 color=colors["21"])
        plt.legend()

    plt.savefig("%s/Fig3.png" % (prefix), dpi=300)
    plt.savefig("%s/Fig3.pdf" % (prefix), dpi=300)

    plt.show()
    plt.close()
示例#11
0
def read_gaussian_tddft_log_file(params):
    """
    This function read Gaussian output file and extracts the excitation analysis results.

    Args:

        params (dictionary): The dictionary containing the input parameters.

            logfile_name (string): The log file name.

            tolerance (float): The tolerance factor for choosing the excited states of the excitation analysis.

            number_of_states (integer): The number of excited states to be considered in the excitation analysis.

            isUKS (integer): This parameter is the flag for restricted or unrestricted spin calculations. If it is
                             set to 1 the unrestricted spin calculations will be considered.
    Returns:

        excitation_energies (1D numpy arra): The excitation energies.

        ci_basis (list): The list containnig the CI basis of the excitation analysis.

        ci_coefficients (list): The list containing the ci_coefficients for each excitation.

        spin_components (list): The list containing the spin components for each excitation.

    """

    # Critical parmeters
    critical_params = [ "logfile_name" ]
    # Default parameters
    default_params = { "tolerance": 0.05, "number_of_states": 5, "isUKS": 0 }
    # Check input
    comn.check_input(params, default_params, critical_params)

    # Gaussian log file name
    gaussian_log_file_name = params["logfile_name"]
    # tolerance factor
    tolerance = params["tolerance"]
    # The unrestricted spin calculations flag
    isUKS = params["isUKS"]
    # The number of states
    number_of_states = params["number_of_states"]

    # Open the log file
    file = open(gaussian_log_file_name,'r')
    lines = file.readlines()
    file.close()

    print("\nWe are opening gaussian logfile, here is the number of lines")
    print("lines = ", lines)

    # Initialize the excitation energies list
    excitation_energies = []
    # The lines with excited state in them
    excited_states_lines = []
    for i in range(len(lines)):
        # If 'Excited State' was found in the log file append it in excited_states_lines
        if 'Excited State' in lines[i]:
            excitation_energies.append(float(lines[i].split()[4]))
            excited_states_lines.append(i)
    print("excited_states_lines = ", excited_states_lines)

    # Initialize the ci_basis, ci_coefficients, and spin_components
    ci_basis = []
    ci_coefficients = []
    spin_components = []
    # Find the final line of the excitation analysis (the blank line) and append
    # it to excited_states_lines for further use in the 'for' loops
    for i in range(excited_states_lines[-1],len(lines)):
        # Find the blank line
        if len(lines[i].split())==0:
            excited_states_lines.append(i)
            # If founf the blank line exit the for loop
            break
    
    for i in range(number_of_states):
        # Initialize tmp variables for storing the excitation analyses
        tmp_ci_state              = []
        tmp_ci_state_coefficients = []
        tmp_spin = []
        
        for j in range( excited_states_lines[i], excited_states_lines[i+1] ):
            if '->' in lines[j]: # and len(lines[j].split())==4:
                if isUKS==1:
                    # For alpha spin
                    if 'A' in lines[j]:
                        line_alpha = lines[j].replace('A','').replace('->','').split()
                        # Use the tolerance factor for chosing the states
                        if float(line_alpha[2])**2 > tolerance:
                            tmp_ci_state.append( [int(line_alpha[0]), int(line_alpha[1])] )
                            tmp_ci_state_coefficients.append( float(line_alpha[2]) )
                            tmp_spin.append('alp')

                    # For beta spin
                    elif 'B' in lines[j]:
                        line_beta = lines[j].replace('B','').replace('->','').split()
                        # Use the tolerance factor for chosing the states
                        if float(line_beta[2])**2 > tolerance:
                            tmp_ci_state.append( [int(line_beta[0]), int(line_beta[1])] )
                            tmp_ci_state_coefficients.append( float(line_beta[2]) )
                            tmp_spin.append('bet')
                else:
                    # Just for alpha spin and the same as above
                    print("\nThis is the spin-restricted case, we are reading the lines now")
                    tmp_line_alpha = lines[j].replace("->","")
                    line_alpha = tmp_line_alpha.split()

                    print("\nlines[j] = ", lines[j])  
                    print("tmp_line_alpha = ", tmp_line_alpha)
                    print("line_alpha = ", line_alpha)

                    if float(line_alpha[2])**2 > tolerance:
                        tmp_ci_state.append( [int(line_alpha[0]), int(line_alpha[1])] )
                        tmp_ci_state_coefficients.append( float(line_alpha[2]) )
                        tmp_spin.append('alp')            

        # Append the tmp variables into the main variables
        ci_basis.append(tmp_ci_state)
        ci_coefficients.append(tmp_ci_state_coefficients)
        spin_components.append(tmp_spin)
    
    return excitation_energies[0:number_of_states], ci_basis, ci_coefficients, spin_components
示例#12
0
def convolve_cp2k_pdos(params: dict):
    """
    This function reads the pdos file produced by CP2K and extract the pdos at each time step and 
    then convolve them with Gaussian functions.
    
    Args:
    
        params (dictionary):
    
            cp2k_pdos_file (str): The CP2K .pdos file.
        
            time_step (int): The time step of molecular dynamics.
        
            sigma (float): The standard deviation in Gaussian function.
        
            coef (float): The coefficient multiplied in Gaussian function.
        
            npoints (int): The number of points used in convolution.
        
            energy_conversion (float): The energy conversion unit from Hartree. For example 27.211386 is
                                       for unit conversion from Hartree to eV. This value comes from libra_py.units. For example
                                       for Hartree to eV one needs to call libra_py.units.au2ev in the input. The default value is 
                                       Hartree to eV.
				   
            angular_momentum_cols (list): The angular momentum columns in the *.pdos files produced by CP2K.
	
    Returns:
	
        energy_grid (numpy array): The energy grid points vector.
		
        convolved_pdos (numpy array): The convolved pDOS vector.
		
        homo_energy (float): The average H**O energy.
		
    """

    # Critical parameters
    critical_params = ["cp2k_pdos_file", "angular_momentum_cols"]
    # Default parameters
    default_params = {
        "time_step": 0,
        "sigma": 0.02,
        "coef": 1.0,
        "npoints": 4000,
        "energy_conversion": units.au2ev
    }
    # Check input
    comn.check_input(params, default_params, critical_params)

    # The CP2K log file name
    cp2k_pdos_file = params["cp2k_pdos_file"]
    # The time step in the .pdos file (This is for molecular dynamics, for single-point calculations it is set to 0).
    time_step = params["time_step"]
    # The standard deviation value
    sigma = params["sigma"]
    # The pre factor that is multiplied to the Gaussian function
    coef = params["coef"]
    # Number of points for the grid
    npoints = params["npoints"]
    # The energy conversion value from atomic unit, It is better to use the default values in the `libra_py.units`
    energy_conversion = params["energy_conversion"]
    # The angular momentum columns in the .pdos files
    angular_momentum_cols = params["angular_momentum_cols"]

    # Opening the file
    file = open(cp2k_pdos_file, 'r')
    lines = file.readlines()
    file.close()

    # Lines with 'DOS'
    lines_with_dos = []

    # Finding the lines with 'DOS'
    for i in range(0, len(lines)):
        if 'DOS'.lower() in lines[i].lower().split():
            lines_with_dos.append(i)

    # Finding the first and last index of PDOS for each time step
    if len(lines_with_dos) == 1:
        # First index
        first_index = 2
        # Last index
        last_index = int(lines[len(lines) - 1].split()[0])
    elif len(lines_with_dos) > 1:
        # First index
        first_index = 2
        # Last index
        last_index = int(lines_with_dos[1] - 1)

    # Find the number of columns in the PDOS file showing the number
    # of orbital components, energy, and occupation column.
    num_cols = len(lines[first_index].split())

    # Number of energy levels considered for PDOS
    num_levels = last_index - first_index + 1

    # Finding the h**o and lumo energy level by appending the
    # pdos numerical values of unoccupied states only
    pdos_unocc = []
    # Energy levels
    energy_levels = []
    for i in range(first_index, last_index + 1):
        energy_levels.append(float(lines[i].split()[1]) * energy_conversion)
        if float(lines[i].split()[2]) == 0:
            pdos_unocc.append(i)
    # H**O energy level
    homo_level = int(lines[min(pdos_unocc)].split()[0])
    # H**O energy
    homo_energy = float(lines[homo_level].split()[1]) * energy_conversion
    # Minimum energy level
    min_energy = float(lines[first_index].split()[1]) * energy_conversion
    # Maximum energy level
    max_energy = float(lines[last_index].split()[1]) * energy_conversion

    # Now we make an equispaced energy vector from min_energy ad max_energy with npoints.
    energy_grid = np.linspace(min_energy - 2, max_energy + 2, npoints)
    energy_grid = np.array(energy_grid)

    # Appending the energy lines with their component densities of states
    energy_lines = []
    # The initial line in the .pdos file of step 'time_step'
    init_line = time_step * (num_levels + 2) + 2
    # The final line in the .pdos file of step 'time_step'
    final_line = (time_step + 1) * (num_levels + 2)
    for i in range(init_line, final_line):
        # Appending the energy lines into enrgy_lines
        energy_lines.append(lines[i].split())

    for i in range(0, len(energy_lines)):

        for j in range(0, len(energy_lines[0])):

            energy_lines[i][j] = float(energy_lines[i][j])

    energy_lines = np.array(energy_lines)

    # Now we sum the PDOSs defined in angular_momentum_cols by user
    pdos_sum = []
    for k in range(0, len(energy_lines)):

        # A temporary vector for summation of the PDOS
        tmp_vec = []
        tmp_vec.append(energy_lines[k][1])

        for i in range(0, len(angular_momentum_cols)):
            # Initializing a new sum variable
            # print("angular_momentum_cols[i]",angular_momentum_cols[i])
            tmp_sum = 0
            for j in angular_momentum_cols[i]:

                # If j is less than the number of columns
                # then sum the PDOS
                if j <= num_cols:
                    tmp_sum += energy_lines[k][j]
            # Appending tmp_sum into tmp_vec
            tmp_vec.append(tmp_sum)

        # Now append tmp_vec into pdos_sum, we will
        # then use this pdos_sum for convolution
        pdos_sum.append(tmp_vec)

    convolved_pdos = []
    t1 = time.time()
    # The pre-factor for Gaussian functions
    pre_factor = (coef / (sigma * np.sqrt(2.0 * np.pi)))
    for j in range(1, len(angular_momentum_cols) + 1):
        # Initialize a vector of zeros summing the weighted PDOS
        tmp_weighted_pdos = np.zeros(energy_grid.shape)

        for i in range(0, num_levels):
            # The Guassian function
            gaussian_fun = pre_factor * (np.exp(-0.5 * np.power(
                ((energy_grid - float(pdos_sum[i][0]) * energy_conversion) /
                 sigma), 2)))

            tmp_weighted_pdos = tmp_weighted_pdos + gaussian_fun * float(
                pdos_sum[i][j])
        convolved_pdos.append(tmp_weighted_pdos)
    print('Elapsed time for convolving ', cp2k_pdos_file, ': ',
          time.time() - t1, ' seconds')
    convolved_pdos = np.array(convolved_pdos)

    return energy_grid, convolved_pdos, homo_energy
示例#13
0
def cp2k_pdos(_params):
    """Computes various types of pDOS from the atomic state projections generated by the CP2K software

    Args:
        _params (dict) : control parameters of simulations, can contain the following keys:

          * **prefix** ( string ): a common prefix of the filenames for files containing the projection information [Required]

          * **emin** ( double ): the minimal energy of the pDOS window relative to Ef [eV, default: -5.0 eV]

          * **emax** ( double ): maximal energy of the pDOS window relative to Ef [eV, default: 5.0 eV]

          * **de** ( double ): the original grid spacing of the pDOS, not necessarily the one used in pdos.in [eV, default: 0.1 eV]

          * **projections** ( list of lists of - see below): groups of atoms and types of projections.
            Each element of this list contains 3 sub-lists, whose intersection defines which files to use:
            e.g. projection = [["s","px"], [1,2,3], ["Cs", "Br"]] - means s and px orbitals of atoms 1, 2, and 3
            as long as any of these atoms are Cs or Br. Currently, atomic indices are not used [Required]

          * **element_mapping** ( dict ): mapping of the element name to the number in the putput files of CP2k.
            Example: { "C":1, "H":2 } - then the "k1" files are for C contributions, "k2" are for H contributions [Required]

          * **outfile_prefix** ( string ): the prefix of the output file that will contain the final projections [ default: "pdos"]

          * **do_convolve** ( Bool ): the flag telling whether we want to convolve the original data with the
            Gaussian envelope. The convolution is done with :func:`convolve` [ default: True ] 

          * **de_new** ( double ): the new energy grid spacing, in effect only if do_convolve == True [eV, default: 0.025]

          * var ( double ): standard deviation of the Gaussian with which we do the  convolution
            in effect only if do_convolve == True [eV, default: 0.05] 

          * nspin ( int ): specifies which nspin was used in the electronic structure calculation 
                      nspin = 1 - non-spin-polarized [ default ]
                      nspin = 2 - spin-polarized (not implemented yet)
          

    Returns:
        tuple: ( E, pDOSa, pDOSb ), where:

            * E ( MATRIX(N, 1) ): new energy grid, N - the new number of energy grid points
            * pDOSa ( MATRIX(N, Nproj) ): new Y grids, Nproj - len(projections) the number of projections we are interested in
            * pDOSb ( MATRIX(N, Nproj) ): new Y grids, Nproj - len(projections) the number of projections we are interested in
 
            * if nspin = 1, pDOSb and pDOSa are the same
            * if nspin = 2, pDOSb may be different from pDOSa
    """

    params = dict(_params)

    critical_params = ["prefix", "projections", "element_mapping"]
    default_params = {
        "snapshots": [0],
        "emin": -5.0,
        "emax": 5.0,
        "de": 0.1,
        "outfile_prefix": "pdos",
        "do_convolve": True,
        "de_new": 0.025,
        "var": 0.05,
        "nspin": 1
    }
    comn.check_input(params, default_params, critical_params)

    prefix = params["prefix"]
    snapshots = params["snapshots"]
    projections = params["projections"]
    elt_mapping = params["element_mapping"]
    emin, emax, de = params["emin"], params["emax"], params["de"]
    outfile_prefix = params["outfile_prefix"]
    do_convolve, de_new, var = params["do_convolve"], params["de_new"], params[
        "var"]
    nspin = params["nspin"]

    if nspin not in [1]:
        print("Error: The value of nspin must be 1")
        print("nspin = 1: Spin-unpolarized")
        print("Exiting Now ...")
        sys.exit(0)

    #============= Dimensions  =================

    nproj = len(projections)  # number of projections
    N = int(math.floor((emax - emin) / de)) + 1  # number of the gridpoints

    en = MATRIX(N, 1)  # energy of the grid points
    dosa = MATRIX(
        N, nproj
    )  # Matrix for alpha spin-orbitals dos.get(i,proj) - dos for level i projected on projection proj
    dosb = MATRIX(N, nproj)  # Matrix for beta  spin-orbitals
    for i in range(0, N):
        en.set(i, 0, emin + i * de)  # this is a scale centered on Ef

    #============= Data gathering  =================

    ang_mom_mapping = {
        "s": 3,
        "py": 4,
        "pz": 5,
        "px": 6,
        "d-2": 7,
        "d-1": 8,
        "d0": 9,
        "d+1": 10,
        "d+2": 11
    }
    nsnaps = float(len(snapshots))

    for snap in snapshots:
        for proj in projections:  # loop over all projection
            ang_mom = proj[0]
            atoms = proj[1]
            elements = proj[2]

            proj_indx = projections.index(proj)

            for Elt in elements:  # for given atom names
                at_indx = elt_mapping[Elt]

                filename = F"{prefix}-{snap}-k{at_indx}-1.pdos"  # file
                if not os.path.exists(filename):
                    print(F"The file {filename} is not found.")

                if os.path.exists(filename):

                    fa = open(filename, "r")
                    B = fa.readlines()
                    check = B[0].split()
                    fa.close()

                    Ef = float(
                        check[-2]
                    ) * units.au2ev  # the second to the last entry of the first line contains Fermi energy in a.u.
                    # convert to eV
                    # For debug
                    #print(F" snap = {snap}  proj = {proj_indx} elt = {Elt}  Ef = {Ef}")

                    for lin in B[
                            2:]:  # read all lines, except for the 2 header lines
                        tmp = lin.split()

                        e = float(
                            tmp[1]
                        ) * units.au2ev  # energy in a.u. - convert to eV
                        if e < Ef + emin or e > Ef + emax:
                            pass
                        else:
                            state_indx = int(math.floor(
                                (e - (Ef + emin)) / de))

                            for symb in ang_mom:  # for given angular momentum labels
                                ang_mom_indx = ang_mom_mapping[symb]
                                dosa.add(state_indx, proj_indx,
                                         float(tmp[ang_mom_indx]) / nsnaps)

    #============= Optional convolution =================

    E, pDOSa, pDOSb = MATRIX(en), MATRIX(dosa), MATRIX(dosb)

    if do_convolve == True:
        E, pDOSa = convolve(en, dosa, de, de_new, var)
        E, pDOSb = convolve(en, dosb, de, de_new, var)

    #============= Print out ==================

    f2a = open(outfile_prefix + "_alp.txt", "w")
    f2a.close()
    f2b = open(outfile_prefix + "_bet.txt", "w")
    f2b.close()

    N = E.num_of_rows
    for i in range(0, N):  # loop over grid points
        line = str(E.get(i, 0)) + "   "
        tot = 0.0
        for j in range(0, nproj):
            tot = tot + pDOSa.get(i, j)
            line = line + str(pDOSa.get(i, j)) + "   "
        line = line + str(tot) + "\n"
        f2a = open(outfile_prefix + "_alp.txt", "a")
        f2a.write(line)
        f2a.close()

    for i in range(0, N):  # loop over grid points
        line = str(E.get(i, 0)) + "   "
        tot = 0.0
        for j in range(0, nproj):
            tot = tot + pDOSb.get(i, j)
            line = line + str(pDOSb.get(i, j)) + "   "
        line = line + str(tot) + "\n"
        f2b = open(outfile_prefix + "_bet.txt", "a")
        f2b.write(line)
        f2b.close()

    if nspin == 2:
        return E, pDOSa, pDOSb
    else:
        pDOSb = MATRIX(pDOSa)
        return E, pDOSa, pDOSb
示例#14
0
def run(H_vib, params):
    """
    Main function to run the SH calculations based on the Landau-Zener hopping
    probabilities, all within the NBRA. The probabilities are implemented according to
    the Belyaev-Lebedev work.

    ===== Modeling params ===== 

    params["dt"]             [double, a.u.] - time distance between the adjacent data points
    params["ntraj"]          [int] - how many stochastic trajectories to use in the ensemble
    params["istate"]         [int] - index of the starting state (within those used in the active_space - see above)
    params["do_output"]      [string] - wheather to print out the results into a file
    params["outfile"]        [string] - the name of the file, where all the results will be printed out
    params["T"]              [double, K] - temperature of the simulation

    """

    critical_params = []
    default_params = {
        "T": 300.0,
        "ntraj": 1000,
        "nsteps": 1,
        "istate": 0,
        "sh_method": 1,
        "decoherence_method": 0,
        "dt": 41.0,
        "Boltz_opt": 3,
        "do_output": False,
        "outfile": "_out.txt"
    }
    comn.check_input(params, default_params, critical_params)

    rnd = Random()

    ndata = len(H_vib)
    nsteps = len(H_vib[0])
    nstates = H_vib[0][0].num_of_cols
    dt = params["dt"]
    do_output = params["do_output"]
    ntraj = params["ntraj"]
    boltz_opt = params["Boltz_opt"]
    T = params["T"]

    res = MATRIX(nsteps, 3 * nstates + 5)

    #===== Precompute hopping probabilities ===
    P = []
    itimes = [0]
    nitimes = len(itimes)

    for idata in range(0, ndata):
        p = Belyaev_Lebedev(H_vib[idata], params)
        P.append(p)

    #========== Initialize the DYNAMICAL VARIABLES  ===============
    # State populations and active state indices
    Pop, istate = [], []

    for tr in range(0, ntraj):
        istate.append(params["istate"])
        Pop.append(CMATRIX(nstates, 1))
        Pop[tr].set(params["istate"], 1.0, 0.0)

    #=============== Entering the DYNAMICS ========================
    for i in range(0, nsteps):  # over all evolution times

        #============== Analysis of the Dynamics  =================
        # Compute the averages
        #res_i = step4.traj_statistics(i, Coeff, istate, H_vib, itimes)
        res_i = step4.traj_statistics2(i, Pop, istate, H_vib, itimes)

        # Print out into a file
        step4.printout(i * dt, res_i, params["outfile"])

        # Update the overal results matrix
        res.set(i, 0, i * dt)
        push_submatrix(res, res_i, Py2Cpp_int([i]),
                       Py2Cpp_int(range(1, 3 * nstates + 5)))

        #=============== Propagation ==============================
        for idata in range(0, ndata):  # over all data sets (MD trajectories)

            for it_indx in range(
                    0, nitimes
            ):  # over all initial times within each MD trajectory

                it = itimes[it_indx]

                for tr in range(0, ntraj):  # over all stochastic trajectories

                    Tr = idata * (nitimes * ntraj) + it_indx * (ntraj) + tr

                    #============== Propagation: TD-SE and surface hopping ==========

                    # Evolve Markov process.
                    # The convention is:
                    # P(i,j) - the probability to go from j to i
                    Pop[Tr] = CMATRIX(P[idata][i]) * Pop[Tr]

                    # Surface hopping
                    ksi = rnd.uniform(0.0, 1.0)
                    ksi1 = rnd.uniform(0.0, 1.0)

                    # Proposed hop:
                    st_new = tsh.hop_py(istate[Tr], P[idata][i].T(), ksi)

                    # Accept the proposed hop with the Boltzmann probability
                    E_new = H_vib[idata][i].get(st_new, st_new).real
                    E_old = H_vib[idata][i].get(istate[Tr], istate[Tr]).real
                    de = E_new - E_old

                    if de > 0.0:
                        bf = tsh.boltz_factor(E_new, E_old, T, boltz_opt)
                        if ksi1 < bf:
                            istate[Tr] = st_new
                    else:
                        istate[Tr] = st_new

    return res
示例#15
0
def do_step(i, params, run):
    """

    Runs a single-point SCF calculation for a given geometry along a trajectory

    Args:
        i ( int ): index of the time step to be used from the trajectory file
        params ( dictionary ): the control parameters of the simulation
        
            * **params["EXE"]** ( string ): path to the ERGOSCF executable [ default: ergo ]
            * **params["mo_active_space"]** ( list of ints or None ): indices of the MOs we care about 
                The indexing starts from 0, not 1! If set to None - all MOs will be returned. [default: None]
            * **params["md_file"]** ( string ): the name of the xyz file containing the trajectory - the 
                file should be in the general xyz format. [default: "md.xyz"]
            * **params["H**O-LUMO-only"]** ( int ): a flag to decide to use only H**O and LUMO orbitals as read from 
                the ErgoSCF output. This is the option to really go with the linear-scaling costs [ default: 0 ]

        run (Python function ): the function that defines the ErgoSCF input generation - the user 
            has to define all the control parameters in it, to be able to run the ErgoSCF calculations

            Example:

                In the example below, the outermost quotes should be tripled 

                Note: the function should follw the signature shown here

                def run(EXE, COORDS):
                    inp = "#!bin/sh
                %s << EOINPUT > /dev/null
                spin_polarization = 0
                molecule_inline
                %sEOF
                basis = "STO-3G"
                use_simple_starting_guess=1
                scf.create_mtx_files_F = 1
                scf.create_mtx_file_S = 1
                XC.sparse_mode = 1
                run "LDA"
                EOINPUT
                " % (EXE, COORDS)
                    return inp


    Returns:
        tuple: (E, MO), where:
        
            * E ( CMATRIX(M, M) ), the matrix of the converged Hamiltonian eigenvalues (at given geometry)
                Here, M = len(params["mo_active_space"]) - we output only the MO energies that are of interest to us

            * MO ( CMATRIX(A, M) ), the matrix of the converged Hamiltonian eigenvalues (at given geometry)
                Here, M = len(params["mo_active_space"]) - we output only the MO energies that are of interest to us.
                A - is the number of AOs in this calculation


    """

    # Now try to get parameters from the input
    critical_params = []
    default_params = {
        "EXE": "ergo",
        "mo_active_space": None,
        "md_file": "md.xyz",
        "H**O-LUMO-only": 0
    }
    comn.check_input(params, default_params, critical_params)

    # Get the parameters
    EXE = params["EXE"]
    md_file = params["md_file"]
    HL_only = params["H**O-LUMO-only"]

    # Make an input file for SP calculations
    R = ERGO_methods.xyz_traj2gen_sp(md_file, i)

    # Run SCF calculations
    command = run(EXE, R)
    os.system("%s" % (command))

    # Get the last Fock matrix
    last_indx, last_filename = ERGO_methods.find_last_file("F_matrix_", ".mtx")
    F = ERGO_methods.get_mtx_matrices(last_filename)
    S = ERGO_methods.get_mtx_matrices("S_matrix.mtx")

    # Get the dimensions
    ao_sz = F.num_of_cols
    ao_act_sp = list(range(0, ao_sz))

    mo_sz = ao_sz
    mo_act_sp = list(range(0, mo_sz))

    if params["mo_active_space"] != None:
        mo_sz = len(params["mo_active_space"])
        mo_act_sp = list(params["mo_active_space"])

    # Solve the eigenvalue problem with the converged Fock matrix
    # get the converged MOs
    E = CMATRIX(ao_sz, ao_sz)
    MO = CMATRIX(ao_sz, ao_sz)
    solve_eigen(F, S, E, MO, 0)

    # Extract the E sub-matrix
    E_sub = CMATRIX(mo_sz, mo_sz)
    pop_submatrix(E, E_sub, mo_act_sp, mo_act_sp)

    # Extract the MO sub-matrix
    MO_sub = CMATRIX(ao_sz, mo_sz)
    pop_submatrix(MO, MO_sub, ao_act_sp, mo_act_sp)

    return E_sub, MO_sub
示例#16
0
def plot_surfaces(_compute_model, _param_sets, states_of_interest, xmin, xmax,
                  dx, plot_params):
    """
    Args:
        _compute_model ( PyObject ): the function that returns the class with Hamiltonian properties
        _param_sets ( list of lists of dictionaries ): parameters of the model Hamiltonian, many sets are possible (hense the list)

            For each set, the following keywords are required:
            * **nstates** ( int ): the dimensionality of the Hamiltonian

        states_of_interest ( list of ints ): indices of the states we want to plot 
        xmin ( double ): minimal value of the x axis used in the actual PES calculations  [a.u.]
        xmax ( double ): maximal value of the x axis used in the actual PES calculations  [a.u.]
        dx ( double ): step size of PES scan [a.u.]
        plot_params ( dictionary ): the parameters of plotting
            
            The dictionary can contain the following parameters:
            ** plot_params["colors"]** ( dictionary ) : defines the list of color definition (similar to the one found below)
            ** plot_params["clrs_index"]** ( list of strings ): the mapping of a color definition to its "name" (similar to the one found below)
            ** plot_params["xlim"]** ( list of 2 doubles ): the minimal and maximal values of the x axis in the plotted frame
            ** plot_params["ylim"]** ( list of 2 doubles ): the minimal and maximal values of the y axis in the plotted frame        
        
    """

    colors_ = {}

    colors_.update({"11": "#8b1a0e"})  # red
    colors_.update({"12": "#FF4500"})  # orangered
    colors_.update({"13": "#B22222"})  # firebrick
    colors_.update({"14": "#DC143C"})  # crimson

    colors_.update({"21": "#5e9c36"})  # green
    colors_.update({"22": "#006400"})  # darkgreen
    colors_.update({"23": "#228B22"})  # forestgreen
    colors_.update({"24": "#808000"})  # olive

    colors_.update({"31": "#8A2BE2"})  # blueviolet
    colors_.update({"32": "#00008B"})  # darkblue

    colors_.update({"41": "#2F4F4F"})  # darkslategray

    clrs_index_ = [
        "11", "21", "31", "41", "12", "22", "32", "13", "23", "14", "24"
    ]

    # Parameters and dimensions
    critical_params = []
    default_params = {
        "colors": colors_,
        "clrs_index": clrs_index_,
        "xlim": [-7.5, 15],
        "ylim": [-0.005, 0.025]
    }
    comn.check_input(plot_params, default_params, critical_params)

    colors = plot_params["colors"]
    clrs_index = plot_params["clrs_index"]
    xlim = plot_params["xlim"]
    ylim = plot_params["ylim"]

    X = []
    nsteps = int((xmax - xmin) / dx) + 1

    for i in range(nsteps):
        X.append(xmin + i * dx)

    plt.rc('axes', titlesize=38)  # fontsize of the axes title
    plt.rc('axes', labelsize=38)  # fontsize of the x and y labels
    plt.rc('legend', fontsize=36)  # legend fontsize
    plt.rc('xtick', labelsize=28)  # fontsize of the tick labels
    plt.rc('ytick', labelsize=28)  # fontsize of the tick labels

    plt.rc('figure.subplot', left=0.2)
    plt.rc('figure.subplot', right=0.95)
    plt.rc('figure.subplot', bottom=0.13)
    plt.rc('figure.subplot', top=0.88)

    sz = len(_param_sets)
    for iset in range(sz):

        # The "nstates" field must be specified
        comn.check_input(_param_sets[iset], {}, ["nstates"])
        n = _param_sets[iset]["nstates"]

        ham = nHamiltonian(n, n, 1)  # ndia, nadi, nnucl
        ham.init_all(2)

        hdia, hadi = [], []
        uij = []  # projecitions of the MOs onto elementary basis

        for k1 in range(n):
            hadi.append([])
            hdia.append([])
            uij_k1 = []
            for k2 in range(n):
                uij_k1.append([])
            uij.append(uij_k1)

        for i in range(nsteps):

            q = MATRIX(1, 1)
            q.set(0, 0, X[i])

            # Diabatic properties
            ham.compute_diabatic(_compute_model, q, _param_sets[iset])

            # Adiabatic properties
            ham.compute_adiabatic(1)

            U = ham.get_basis_transform()
            #P = U * U.H()  # population matrix

            for k1 in range(n):
                hadi[k1].append(ham.get_ham_adi().get(k1, k1).real)
                hdia[k1].append(ham.get_ham_dia().get(k1, k1).real)

                for k2 in range(n):
                    uij[k1][k2].append(
                        U.get(k1, k2).real**2 + U.get(k1, k2).imag**2)

        plt.figure(2 * iset, figsize=(36, 18))  # dpi=300, frameon=False)

        plt.subplot(1, 2, 1)
        plt.ylim(ylim[0], ylim[1])
        plt.xlim(xlim[0], xlim[1])

        #plt.title('Params set %i: Ham_dia' % (iset) )
        plt.xlabel('Coordinate, a.u.')
        plt.ylabel('Energy, a.u.')
        for k1 in states_of_interest:
            plt.plot(X,
                     hdia[k1],
                     label='$H_{%i%i}$' % (k1, k1),
                     linewidth=7,
                     color=colors[clrs_index[k1]])
        plt.legend()

        plt.subplot(1, 2, 2)
        plt.ylim(ylim[0], ylim[1])
        plt.xlim(xlim[0], xlim[1])
        #plt.title('Params set %i: Ham_adi' % (iset))
        plt.xlabel('Coordinate, a.u.')
        plt.ylabel('Energy, a.u.')
        for k1 in states_of_interest:
            plt.plot(X,
                     hadi[k1],
                     label='$E_{%i}$' % (k1),
                     linewidth=7,
                     color=colors[clrs_index[k1]])
        plt.legend()

        plt.figure(2 * iset + 1, figsize=(36, 18))  # dpi=300, frameon=False)
        sz1 = len(states_of_interest)

        for k2 in states_of_interest:
            indx = states_of_interest.index(k2)
            plt.subplot(1, sz1, 1 + indx)
            #plt.title('Params set %i: Adi state %i' % (iset, k2) )
            plt.xlabel('Coordinate, a.u.')
            plt.ylabel('Projection')

            for k1 in range(n):
                #                plt.plot(X, uij[k1][k2], label='$dia_{%i} | adi_{%i}$' % (k1, k2), linewidth=7, color = colors[clrs_index[k1]])
                plt.plot(X,
                         uij[k1][k2],
                         label='$< \psi^{dia}_{%i} | \psi^{adi}_{%i} >$' %
                         (k1, k2),
                         linewidth=7,
                         color=colors[clrs_index[k1]])
            plt.legend()

        plt.show()
        plt.close()
示例#17
0
def plot_pes_properties(comp_model, model_params, pes_params_, plot_params_):
    """
        Args:
        
            comp_model ( PyObject ): the function that returns the class with Hamiltonian properties
            
            model_params ( dictionary ): parameters of the model Hamiltonian
            
            pes_params_ ( dictionary ): controls the way the calculations are done
            
                Can contain the following parameters
                
                * **pes_params["ndia"]** ( int ): dinemsionality of the diabatic Hamiltonian [ default: 2 ]
                * **pes_params["nadi"]** ( int ): dinemsionality of the adiabatic Hamiltonian [ default: 2 ]
                * **pes_params["ndof"]** ( int ): the number of nuclear DOF [ default: 1 ]
                * **pes_params["active_dof"]** ( int ): index of the DOF w.r.t. which the scan will be done [ default: 0 ]
                * **pes_params["coord_type"]** ( 0 or 1 ): 0 - actual coordinate, 1 - time coordinate [ default: 0 ]
                * **pes_params["reference_coord"]** ( MARIX(ndof, 1) ): the geometry of a many-DOF-system, used in case we 
                    only vary one DOF, but want to keep all other DOFs fixed
                * **pes_params["coord_mapping"]** ( PyObject ): the function that maps the 1D "scan coordinate" to a
                    ndof-dimensional vector that represents the system's geometry. If provided (other than None),
                    it will be used to update the geometries of the system will  [ default: None ]
                * **pes_params["xmin"]** ( double ): minimal range of PES scan, matters only if coord_type = 0 [ default: -10.0 ]
                * **pes_params["xmax"]** ( double ): maximal range of PES scan, matters only if coord_type = 0 [ default:  10.0 ]
                * **pes_params["dx"]** ( double ): PES scan step size, matters only if coord_type = 0 [ default:  1.0 ]
                * **pes_params["tmin"]** ( int ): minimal range of t-PES scan, matters only if coord_type = 1 [ default: 0.0 ]
                * **pes_params["tmax"]** ( int ): maximal range of t-PES scan, matters only if coord_type = 1 [ default: 10.0 ]
                * **pes_params["dt"]** ( int ): t-PES scan step size, matters only if coord_type = 1 [ default:  1 ]
                * **pes_params["rep_tdse"]** ( 0 or 1 ): representation we are interested in
                    ( 0 - diabatic, 1 - adiabatic) [ required ]
                * **pes_params["rep_ham"]** ( 0 or 1 ): representation of the Hamiltonian returned 
                    by the `comp_model` function ( 0 - diabatic, 1 - adiabatic) [ required ]                    
                    
            plot_params_ ( dictionary ): determines what to print and how to do it, can contain the following keys:
            
                * **pes_params["which_ham_dia"]** ( list of 2-element lists ): which matrix elements of a 
                    diabatic Hamiltonian to plot [ default: empty ]
                    
                * **pes_params["which_ham_adi"]** ( list of 2-element lists ): which matrix elements of an 
                    adiabatic Hamiltonian to plot [ default: empty ]
                    
                * **pes_params["which_d1ham_dia"]** ( list of 3-element lists ): which matrix elements of a 
                    derivatives of a diabatic Hamiltonian w.r.t. which nuclear DOFs to plot, the format of each
                    entry is [idof, istate, jstate] [ default: empty ]

                * **pes_params["which_d1ham_adi"]** ( list of 3-element lists ): which matrix elements of a 
                    derivatives of an adiabatic Hamiltonian w.r.t. which nuclear DOFs to plot, the format of each
                    entry is [idof, istate, jstate] [ default: empty ]

                * **pes_params["which_dc1_dia"]** ( list of 3-element lists ): which matrix elements of a 
                    derivative couplings in a diabatic representation w.r.t. which nuclear DOFs to plot, the format
                    of each entry is [idof, istate, jstate] [ default: empty ]

                * **pes_params["which_dc1_adi"]** ( list of 3-element lists ): which matrix elements of a 
                    derivative couplings in an adiabatic representation w.r.t. which nuclear DOFs to plot, the format
                    of each entry is [idof, istate, jstate] [ default: empty ]

                * ** plot_params["colors"]** ( dictionary ) : defines the list of color definition (similar to the one found below)

                * ** plot_params["clrs_index"]** ( list of strings ): the mapping of a color definition to its "name" (similar to the one found below)

                    
    """

    pes_params = dict(pes_params_)
    pes_params_critical = ["rep_tdse", "rep_ham"]
    pes_params_default = {
        "ndia": 2,
        "nadi": 2,
        "ndof": 1,
        "active_dof": 0,
        "coord_type": 0,
        "reference_coord": MATRIX(1, 0),
        "coord_mapping": None,
        "xmin": -10.0,
        "xmax": 10.0,
        "dx": 1.0,
        "tmin": 0,
        "tmax": 10,
        "dt": 1
    }
    comn.check_input(pes_params, pes_params_default, pes_params_critical)

    active_dof = pes_params["active_dof"]
    coord_type = pes_params["coord_type"]
    reference_coord = pes_params["reference_coord"]
    coord_mapping = pes_params["coord_mapping"]
    xmax = pes_params["xmax"]
    xmin = pes_params["xmin"]
    dx = pes_params["dx"]
    tmax = pes_params["tmax"]
    tmin = pes_params["tmin"]
    dt = pes_params["dt"]
    ndia = pes_params["ndia"]
    nadi = pes_params["nadi"]
    ndof = pes_params["ndof"]
    rep_tdse = pes_params["rep_tdse"]
    rep_ham = pes_params["rep_ham"]

    colors_ = {}

    colors_.update({"11": "#8b1a0e"})  # red
    colors_.update({"12": "#FF4500"})  # orangered
    colors_.update({"13": "#B22222"})  # firebrick
    colors_.update({"14": "#DC143C"})  # crimson

    colors_.update({"21": "#5e9c36"})  # green
    colors_.update({"22": "#006400"})  # darkgreen
    colors_.update({"23": "#228B22"})  # forestgreen
    colors_.update({"24": "#808000"})  # olive

    colors_.update({"31": "#8A2BE2"})  # blueviolet
    colors_.update({"32": "#00008B"})  # darkblue

    colors_.update({"41": "#2F4F4F"})  # darkslategray

    clrs_index_ = [
        "11", "21", "31", "41", "12", "22", "32", "13", "23", "14", "24"
    ]

    plot_params = dict(plot_params_)
    plot_params_critical = []
    plot_params_default = {
        "which_ham_dia": [],
        "which_ham_adi": [],
        "which_d1ham_dia": [],
        "which_d1ham_adi": [],
        "which_dc1_dia": [],
        "which_dc1_adi": [],
        "colors": colors_,
        "clrs_index": clrs_index_
    }
    comn.check_input(plot_params, plot_params_default, plot_params_critical)
    which_ham_dia = plot_params["which_ham_dia"]
    which_ham_adi = plot_params["which_ham_adi"]
    which_d1ham_dia = plot_params["which_d1ham_dia"]
    which_d1ham_adi = plot_params["which_d1ham_adi"]
    which_dc1_dia = plot_params["which_dc1_dia"]
    which_dc1_adi = plot_params["which_dc1_adi"]
    colors = plot_params["colors"]
    clrs_index = plot_params["clrs_index"]

    grid = []
    nsteps = 0
    if coord_type == 0:
        nsteps = int((xmax - xmin) / dx) + 1
        for i in range(nsteps):
            grid.append(xmin + i * dx)
    elif coord_type == 1:
        nsteps = int((tmax - tmin) / dt) + 1
        for i in range(nsteps):
            grid.append(tmin + i * dt)

    # ======= Hierarchy of Hamiltonians =======
    tol = 0.01
    ham = nHamiltonian(ndia, nadi, ndof)
    ham.init_all(2)
    ham.phase_corr_ovlp_tol = tol

    ham1 = []
    for tr in range(1):
        ham1.append(nHamiltonian(ndia, nadi, ndof))
        ham1[tr].init_all(2)
        ham1[tr].phase_corr_ovlp_tol = tol
        ham.add_child(ham1[tr])

    projectors = CMATRIXList()
    for tr in range(1):
        projectors.append(CMATRIX(nadi, nadi))
        projectors[tr].identity()

    # Energies, forces, and couplings
    ham_dia = []
    ham_adi = []
    d1ham_dia = []
    d1ham_adi = []
    dc1_dia = []
    dc1_adi = []

    for i in which_ham_dia:
        ham_dia.append([])

    for i in which_ham_adi:
        ham_adi.append([])

    for i in which_d1ham_dia:
        d1ham_dia.append([])

    for i in which_d1ham_adi:
        d1ham_adi.append([])

    for i in which_dc1_dia:
        dc1_dia.append([])

    for i in which_dc1_adi:
        dc1_adi.append([])

    nsteps = len(grid)
    scan_path = []

    tid = Py2Cpp_int([0, 0])

    for step in range(nsteps):
        q = None
        if coord_mapping == None:
            q = MATRIX(reference_coord)
            q.set(active_dof, 0, grid[step])
        else:
            q = coord_mapping(grid[step])
        scan_path.append(MATRIX(q))

        model_params["timestep"] = step

        update_Hamiltonian_q({
            "rep_tdse": rep_tdse,
            "rep_ham": rep_ham
        }, q, projectors, ham, comp_model, model_params)

        #if rep_tdse==0:

        for it_indx, it in enumerate(which_ham_dia):
            ham_dia[it_indx].append(
                ham.get_ham_dia(tid).get(it[0], it[1]).real)

        for it_indx, it in enumerate(which_d1ham_dia):
            d1ham_dia[it_indx].append(
                ham.get_d1ham_dia(it[0], tid).get(it[1], it[2]).real)

        for it_indx, it in enumerate(which_dc1_dia):
            dc1_dia[it_indx].append(
                ham.get_dc1_dia(it[0], tid).get(it[1], it[2]).real)

        #elif rep_tdse==1:

        for it_indx, it in enumerate(which_ham_adi):
            ham_adi[it_indx].append(
                ham.get_ham_adi(tid).get(it[0], it[1]).real)

        for it_indx, it in enumerate(which_d1ham_adi):
            d1ham_adi[it_indx].append(
                ham.get_d1ham_adi(it[0], tid).get(it[1], it[2]).real)

        for it_indx, it in enumerate(which_dc1_adi):
            dc1_adi[it_indx].append(
                ham.get_dc1_adi(it[0], tid).get(it[1], it[2]).real)

    plt.rc('axes', titlesize=38)  # fontsize of the axes title
    plt.rc('axes', labelsize=38)  # fontsize of the x and y labels
    plt.rc('legend', fontsize=36)  # legend fontsize
    plt.rc('xtick', labelsize=28)  # fontsize of the tick labels
    plt.rc('ytick', labelsize=28)  # fontsize of the tick labels

    plt.rc('figure.subplot', left=0.2)
    plt.rc('figure.subplot', right=0.95)
    plt.rc('figure.subplot', bottom=0.13)
    plt.rc('figure.subplot', top=0.88)

    #======== Now lets plot what we have computed ===========
    plt.figure(1, figsize=(36, 18))  # dpi=300, frameon=False)
    plt.subplot(1, 2, 1)
    plt.title('Diabatic energies')
    plt.xlabel('Coordinate, a.u.')
    plt.ylabel('Energy, a.u.')
    for it_indx, it in enumerate(which_ham_dia):
        plt.plot(grid,
                 ham_dia[it_indx],
                 label='$H_{%i, %i}$' % (it[0], it[1]),
                 linewidth=5,
                 color=colors[clrs_index[it_indx]])
    plt.legend()

    plt.subplot(1, 2, 2)
    plt.title('Adiabatic energies')
    plt.xlabel('Coordinate, a.u.')
    plt.ylabel('Energy, a.u.')
    for it_indx, it in enumerate(which_ham_adi):
        plt.plot(grid,
                 ham_adi[it_indx],
                 label='$H_{%i, %i}$' % (it[0], it[1]),
                 linewidth=5,
                 color=colors[clrs_index[it_indx]])
    plt.legend()
    plt.show()
    plt.close()

    plt.figure(2, figsize=(36, 18))  # dpi=300, frameon=False)
    plt.subplot(1, 2, 1)
    plt.title('Derivatives of diabatic energies')
    plt.xlabel('Coordinate, a.u.')
    plt.ylabel('Derivative energy, a.u.')
    for it_indx, it in enumerate(which_d1ham_dia):
        plt.plot(grid,
                 d1ham_dia[it_indx],
                 label='$dH_{%i, %i} / dR_{%i}$' % (it[1], it[2], it[0]),
                 linewidth=5,
                 color=colors[clrs_index[it_indx]])
    plt.legend()

    plt.subplot(1, 2, 2)
    plt.title('Derivatives of adiabatic energies')
    plt.xlabel('Coordinate, a.u.')
    plt.ylabel('Derivative energy, a.u.')
    for it_indx, it in enumerate(which_d1ham_adi):
        plt.plot(grid,
                 d1ham_adi[it_indx],
                 label='$dH_{%i, %i} / dR_{%i}$' % (it[1], it[2], it[0]),
                 linewidth=5,
                 color=colors[clrs_index[it_indx]])
    plt.legend()
    plt.show()
    plt.close()

    plt.figure(3, figsize=(36, 18))  # dpi=300, frameon=False)
    plt.subplot(1, 2, 1)
    plt.title('Derivatives couplings, diabatic rep.')
    plt.xlabel('Coordinate, a.u.')
    plt.ylabel('Derivative coupling, a.u.')
    for it_indx, it in enumerate(which_dc1_dia):
        plt.plot(grid,
                 dc1_dia[it_indx],
                 label='$< \psi_{%i} | dR_{%i} | \psi_{%i} >$' %
                 (it[1], it[0], it[2]),
                 linewidth=5,
                 color=colors[clrs_index[it_indx]])
    plt.legend()

    plt.subplot(1, 2, 2)
    plt.title('Derivatives couplings, adiabatic rep.')
    plt.xlabel('Coordinate, a.u.')
    plt.ylabel('Derivative energy, a.u.')
    for it_indx, it in enumerate(which_dc1_adi):
        plt.plot(grid,
                 dc1_adi[it_indx],
                 label='$< \psi_{%i} | dR_{%i} | \psi_{%i} >$' %
                 (it[1], it[0], it[2]),
                 linewidth=5,
                 color=colors[clrs_index[it_indx]])
    plt.legend()

    plt.show()
    plt.close()

    return scan_path
示例#18
0
def compute_mat_elt(X, a, b, params):
    """Computes the frequencies with which a given matrix element evolves in time

    Args:   
        X ( list of MATRIX ): time-series data
        a ( int ): is the row index of the matrix element to analyze
        b ( int ): is the column index of the matrix element to analyze
        params ( dictionalry ): parameters of the simulation. Contain the following keys:
            * **params["filename"]** ( string ): the prefix of the filenames generated. Doesn't matter 
                if `do_output == False` [default: "influence_spectra_"]
            * **params["logname"]** ( string ): the name of the log-file. Doesn't matter 
                if `do_output == False` [default: "out.log"]             
            * **params["nfreqs"]** ( int ): the maximal number of frequencies we want to extract [default: 1]

            SeeAlso: recipe1(data, params) for the description of other parameters: 
            
                * dr
                * wspan
                * dw
                * do_output
                * acf_filename
                * spectrum_filename
                * do_center
                * acf_type
                * data_type

    Returns:
        tuple: ( T, norm_acf, raw_acf, W, J, J2, freqs ), where

            freqs ( list of lists ), where

                * freqs[fr][0] - frequency of the mode fr [in 2*pi*a.u.^-1]
                * freqs[fr][1] - amplitude of the mode fr in the influence spectrum [arb. units]
                * freqs[fr][2] - normalized amplitued of the mode fr [arb.units]

            SeeAlso: The first 6 outputs are described in the `recipe1`
                * T
                * norm_acf
                * raw_acf
                * W
                * J
                * J2


    """

    # Set defaults and check critical parameters
    critical_params = [ ] 
    default_params = { "nfreqs":1, "logname":"out.log", "filename":"influence_spectra_", "do_output":0 }
    comn.check_input(params, default_params, critical_params)

    # Local variables and dimensions
    nfreqs = params["nfreqs"]
    do_output = params["do_output"] 
    filename = params["filename"]
    logname = params["logname"]


    #========= Collect info in a different format =======
    nsteps = len(X)    
    sz = X[0].num_of_rows
    data_ab = []
    for n in range(0,nsteps):
        xi = MATRIX(1,1)
        xi.set(0,0, X[n].get(a,b))
        data_ab.append(xi)   

  
    # ===== Compute the ACFs and their FTs
    params1 = dict(params)
    params1["acf_filename"] = filename+"_acf_"+str(a)+"_"+str(b)+".txt"
    params1["spectrum_filename"] = filename+"_acf_"+str(a)+"_"+str(b)+".txt"
    params1["verbose"] = 0


    T, norm_acf, raw_acf, W, J, J2 = recipe1(data_ab, params1)   # T is in fs, W is in cm^-1
 
    
    #===== Determine all frequencies (peaks) and sort them (in accending manner) ====
    out = data_stat.find_maxima(J2, params1)

    if do_output:
        lgfile = open(logname, "a")
        lgfile.write("Maximal peaks in the file "+params1["spectrum_filename"]+"\n")

    # Reduce the number of frequencies to the maximal number available
    if nfreqs > len(out):
        nfreqs = len(out)

    szo = len(out) - 1

    #==== Compute the intensities and normalized intensities, do the output =========
    freqs = []
    norm = 0.0
    for i in range(0,nfreqs):
        indx = out[szo-i][0]
        norm = norm + abs(J[indx])
       
    for i in range(0,nfreqs):
        indx = out[szo-i][0]
        freqs.append( [W[indx], J[indx], J[indx]/norm ] )

        if do_output:
            lgfile.write("index= %3i  frequency= %8.5f  amplitude= %8.5f normalized_amplitude= %8.5f \n" % (i, W[indx], J[indx], J[indx]/norm) )
    if do_output:
        lgfile.close()    
    
        lgfile = open(logname, "a")
        for a in freqs:
            lgfile.write(" ========= Mode = %5i =========== \n" % (freqs.index(a)) )
            lgfile.write(" omega = E/hbar = %8.5f [cm^-1] \n" % (a[0])   )
            lgfile.write(" Amplitude = %8.5f \n" % (a[1]) )
            lgfile.write(" Normalized amplitude = %8.5f \n" % (a[2]))
        lgfile.close()

   
    return  freqs, T, norm_acf, raw_acf, W, J, J2
示例#19
0
def Belyaev_Lebedev(Hvib, params):
    """
    Computes the Landau-Zener hopping probabilities based on the energy levels
    according to: 
    (1) Belyaev, A. K.; Lebedev, O. V. Phys. Rev. A, 2011, 84, 014701

    See also:
    (2) Xie, W.; Domcke, W. J. Chem. Phys. 2017, 147, 184114
    (3) Crespo-Otero, R.; Barbatti, M. Chem. Rev. 2018, 188, 7026 - section: 3.2.3

    Specifics:
    1) The estimation of d^2E_ij / dt^2 is based on the 3-point Lagrange interpolation
    2) This is done within the NBRA

    Args:
        Hvib (list of CMATRIX(nstates,nstates) ):  vibronic Hamiltonians along the trajectory
        params ( dictionary ): control parameters

            * **params["dt"]** ( double ): time distance between the adjacent data points [ units: a.u., defaut: 41.0 ]
            * **params["T"]** ( double ): temperature of the nuclear sub-system [ units: K, default: 300.0 ]
            * **params["Boltz_opt"]** ( int ): option to select a probability of hopping acceptance [default: 3]
                Options:

                - 0 - all proposed hops are accepted - no rejection based on energies
                - 1 - proposed hops are accepted with exp(-E/kT) probability - the old (hence the default approach)
                - 2 - proposed hops are accepted with the probability derived from Maxwell-Boltzmann distribution - more rigorous
                - 3 - generalization of "1", but actually it should be changed in case there are many degenerate levels

     
    """

    # Control parameters
    critical_params = []
    default_params = {"T": 300.0, "Boltz_opt": 3, "dt": 41.0}
    comn.check_input(params, default_params, critical_params)

    boltz_opt = params["Boltz_opt"]
    T = params["T"]
    dt = params["dt"]

    # Data dimensions
    nsteps = len(Hvib)
    nstates = Hvib[0].num_of_cols

    # Pre-compute the energy gaps along the trajectory
    dE = decoherence_times.energy_gaps(Hvib)
    """
    Compute the probabilities based on the LZ formula
    P(i,j) - the convention is: the probability to go from j to i
    This will make the Markov state propagation more convenient
    """

    P = []
    P.append(MATRIX(nstates, nstates))
    for i in range(0, nstates):
        P[0].set(i, i, 1.0)

    for n in range(1, nsteps - 1):
        P.append(MATRIX(nstates, nstates))

        # Belyaev-Lebedev probabilities
        # Find the minima of the |E_i - E_j| for all pair of i and j
        for i in range(0, nstates):  # target
            for j in range(i + 1, nstates):  # source

                # Interpolation is based on the 3-points Lagrange interpolant
                # http://mathworld.wolfram.com/LagrangeInterpolatingPolynomial.html
                p = 0.0
                if (dE[n - 1].get(i, j) > dE[n].get(i, j)
                        and dE[n].get(i, j) < dE[n + 1].get(i, j)):

                    denom = dE[n - 1].get(
                        i, j) - 2.0 * dE[n].get(i, j) + dE[n + 1].get(i, j)
                    if denom > 0.0:
                        argg = (dE[n].get(i, j)**3) / denom
                        p = math.exp(-0.5 * math.pi * dt * math.sqrt(argg))
                else:
                    p = 0.0  # no transitions is not a minimum

                P[n].set(i, j, p)
                P[n].set(j, i, p)

        # Optionally, can correct transition probabilitieis to
        # account for Boltzmann factor
        for i in range(0, nstates):  # target
            for j in range(0, nstates):  # source

                if i != j:

                    E_new = Hvib[n].get(i, i).real  # target
                    E_old = Hvib[n].get(j, j).real  # source
                    bf = 1.0
                    if E_new > E_old:
                        bf = tsh.boltz_factor(E_new, E_old, T, boltz_opt)
                        if bf > 1.0:
                            print(
                                "Error: Boltzmann scaling factor can not be larger 1.0 = ",
                                bf)
                            #sys.exit(0)
                        P[n].scale(i, j, bf)

        # Compute the probability of staying on the same state j (source)
        # P(j,j) = 1 - sum_(i!=j) { P(i,j) }
        #
        # The convention is:
        # P(i,j) - the probability to go from j to i

        for j in range(0, nstates):  # for all source states

            tot = 0.0  # Total probability to leave state j
            for i in range(0, nstates):  # all target states
                if i != j:  # but j
                    tot += P[n].get(i, j)

            # Compute the probability to stay on state j
            P[n].set(j, j, 1.0 - tot)

    P.append(MATRIX(nstates, nstates))
    for i in range(0, nstates):
        P[nsteps - 1].set(i, i, 1.0)

    return P
示例#20
0
def compute_all(X, params):
    """Computes the frequencies with which all matrix elements evolve in time

    In particular, we are interested only in the frequencies of the real part of 
    diagonal elements and in frequencies of imaginary part of non-diagonal elements.
    This is a typical situation for the "vibronic" Hamiltonian data in the NA-MD

    Args:
        X ( list of CMATRIX ): time-series data of complex matrices, e.g. "vibronic" Hamiltonian
        params ( dictionary ): parameters controlling the execution of :funct:`compute_mat_elt` function
            ..seealso::`compute_mat_elt` for the full description of the required and allowed parameters and the default values

    Returns:
        list[nstates][nstates][nfreqs][3]: freqs, such that
        
            * freqs[a][b][fr][0] ( double ): frequency of the mode fr for the matrix element X_ab [in 2*pi*a.u.^-1] 
            * freqs[a][b][fr][1] ( double ): amplitude of the mode fr in the influence 
                spectrum for the matrix element X_ab  [arb. units]
            * freqs[a][b][fr][2] ( double ):  normalized amplitued of the mode fr for the matrix 
                element X_ab  [arb.units]

    """

    critical_params = [ ] 
    default_params = { "filename":"influence_spectra_" }
    comn.check_input(params, default_params, critical_params)


    params_re = dict(params)
    params_re.update({"filename":params["filename"]+"_re_"})

    params_im = dict(params)
    params_im.update({"filename":params["filename"]+"_im_"})
    

    # Split the X array into two arrays - real and imaginary parts
    X_re, X_im = [], []
    nsteps = len(X)
    for step in range(0,nsteps):
        X_re.append(X[step].real())
        X_im.append(X[step].imag())

    # Do the calculations for each matrix element
    nstates = X[0].num_of_cols   

    freqs = [ [ [] for i in range(0,nstates)] for j in range(0,nstates)]
    T = [ [ [] for i in range(0,nstates)] for j in range(0,nstates)]
    norm_acf = [ [ [] for i in range(0,nstates)] for j in range(0,nstates)]
    raw_acf = [ [ [] for i in range(0,nstates)] for j in range(0,nstates)]
    W = [ [ [] for i in range(0,nstates)] for j in range(0,nstates)]
    J = [ [ [] for i in range(0,nstates)] for j in range(0,nstates)]
    J2 = [ [ [] for i in range(0,nstates)] for j in range(0,nstates)]

 
    for i in range(0,nstates):
        for j in range(0,nstates):
            if i == j:
                freqs[i][j], T[i][j],  norm_acf[i][j],  raw_acf[i][j],  W[i][j], J[i][j], J2[i][j] = compute_mat_elt(X_re, i, j, params_re)
            else:
                freqs[i][j], T[i][j],  norm_acf[i][j],  raw_acf[i][j],  W[i][j], J[i][j], J2[i][j] = compute_mat_elt(X_im, i, j, params_im)

    return freqs, T,  norm_acf,  raw_acf,  W,  J, J2
示例#21
0
def pdos_plot(plt, E, pDOSa, pDOSb, _params):
    """
    This function plots pDOS into files and picture for needed projections

    Args:

        plt ( matplotlib instance ): Matplotlib object for plotting

        E ( MATRIX(N, 1) ): the energy grid, N - the new number of energy grid points

        pDOSa ( MATRIX(N, Nproj) ): the pDOSa projections, Nproj - the total number of available projections

        pDOSb ( MATRIX(N, Nproj) ): the pDOSb projections, Nproj - the total number of available projections

        _params ( dict ): control parameters for plotting, includes the following keys:

          * **which_projections** ( list of ints): selects which of the available projections we want to plot [ default: [0] ]

          * **labels** ( list of stings ): selects the labels for each data line, should be of the same size as `which_projections` [ default: ["s"] ]
 
          * **colors** ( list of strings or hex codes): defines the colors of the lines to be plotted [ default: ["black"] ]

          * **title** ( string ) : The title of the produced figure [default: "No title"]

    Returns:
        None: but produced .txt files with the needed pDOSs as well as the pictures of these pDOSs

    """

    params = dict(_params)

    critical_params = []
    default_params = {
        "which_projections": [0],
        "labels": ["s"],
        "colors": ["black"],
        "title": "No title"
    }
    comn.check_input(params, default_params, critical_params)

    which_projections = params["which_projections"]
    labels = params["labels"]
    colors = params["colors"]
    title = params["title"]

    e = data_conv.MATRIX2nparray(E)
    pdosa = data_conv.MATRIX2nparray(pDOSa)
    pdosb = data_conv.MATRIX2nparray(pDOSb)

    nplots = len(which_projections)

    figure = plt.figure(num=None,
                        figsize=(3.21, 2.41),
                        dpi=600,
                        edgecolor='black',
                        frameon=True)

    # Plot the total density by black color
    for i in range(nplots):
        plt.plot(e[:, 0],
                 pdosa[:, which_projections[i]],
                 label=labels[i],
                 color=colors[i],
                 linewidth=2.0)

    plt.title(title, fontsize=12)
    plt.legend(fontsize=6.75, ncol=1, loc='upper center')
    plt.xlabel('$E - E_f, eV$', fontsize=12)
    plt.ylabel('$pDOS, eV^{-1}$', fontsize=12)

    plt.tight_layout()
    plt.savefig('pdosa.png', dpi=300)
    plt.show()
示例#22
0
def recipe1(data, params):
    """A recipe to compute ACF and its FT for data series
           
    Args:
        data ( list of MATRIX(ndof, 1) objects ): sequence of real-valued ndof-dimensional vectors
        params ( Python dictionary ): controlling the parameters
     
            * **params["dt"]** ( double ): time distance between the adjacent data points [units: fs, default: 1.0]
            * **params["wspan"]** ( double ): window of frequencies for the Fourier transform [ units: cm^-1, default: 3000.0 ]
            * **params["dw"]** ( double ): grid points spacing in the frequency domain [ units: cm^-1, default: 1.0 ]
            * **params["do_output"]** ( Boolean ): whether we print out the data the results into files [ default: False ]
            * **params["acf_filename"]** ( string ): the name of the file where to print the ACF [ default: "acf.txt"]
            * **params["spectrum_filename"]** ( string ): the name of the file where to print the spectrum [ default: "spectrum.txt" ]
            * **params["do_center"]** ( Boolean ): a flag controlling whether to center data (=1) or not (=0)
                Centering means we subtract the average value (over all the data points) from all
                the data points - this way, we convert values into their fluctuations [default: True ]

            * **params["acf_type"]** ( int ): selector of the convention to to compute ACF

                * 0 : the chemist convention,  (1/(N-h)) Sum_{t=1,N-h} (Y[t]*Y[t+h]) [ default ]
                * 1 : the statistician convention, (1/N) Sum_{t=1,N-h} (Y[t]*Y[t+h])

            * **params["data_type"]** ( int ): what is the format of the data?
 
                * 0 : list of MATRIX(ndof, 1) [ default ]
                * 1 : list of VECTOR

    Returns:
        tuple: (T, norm_acf, raw_acf, W, J, J2), where:

            * T ( list of double ): time axis [ units: fs ] 
            * norm_acf ( list of double ): normalized ACF
            * raw_acf ( list of double ): un-normalized ACF
            * W ( list of double ): frequencies axis [ units: cm^-1 ]
            * J ( list of double ): amplitudes of FT 
            * J2 ( list of double ): (1/2pi)*|J|^2 

    """

    critical_params = [ ] 
    default_params = { "dt":1.0, "wspan":3000.0, "dw":1.0, "do_output":False, 
                       "acf_filename":"acf.txt", "spectrum_filename":"spectrum.txt",
                       "do_center":True, "acf_type":0, "data_type":0 }
    comn.check_input(params, default_params, critical_params)


    dt = params["dt"] * units.fs2au            # convert to  atomic units of time
    wspan = params["wspan"] * units.inv_cm2Ha  # convert to Ha (atomic units)
    dw = params["dw"] * units.inv_cm2Ha        # convert to Ha (atomic units)
    do_output = params["do_output"]
    acf_filename = params["acf_filename"]
    spectrum_filename = params["spectrum_filename"]
    do_center = params["do_center"]
    acf_type = params["acf_type"]
    data_type = params["data_type"]
    

    #========
    data_new = data
    if do_center:
        if data_type==0:
            data_new = data_stat.mat_center_data(data)
        elif data_type==1:
            data_new = data_stat.vec_center_data(data)

    #=========== ACFs ==============
    T, norm_acf, raw_acf = None, None, None
    if data_type==0:
        T, norm_acf, raw_acf = acf.acf_mat( data_new , dt, acf_type)
    elif data_type==1:
        T, norm_acf, raw_acf = acf.acf_vec( data_new , dt, acf_type)
    else:
        print("Error: data_type = ", data_type, " is not known\n")
        sys.exit(0)


    sz = len(T)
    for it in range(0,sz):
        T[it] = T[it]/units.fs2au  # convert to fs

    if do_output:
        f = open(acf_filename,"w")
        for it in range(0,sz):
            f.write("%8.5f  %8.5f  %8.5f  \n" % (T[it] , norm_acf[it], raw_acf[it]))
        f.close()


    #=========== FT =============
    W, J = ft.ft(norm_acf, wspan, dw, dt)
    sz = len(W)

    J2 = []
    for iw in range(0,sz):
        W[iw] = W[iw]/units.inv_cm2Ha
        J2.append( (1.0/(2.0*math.pi))*J[iw]*J[iw] )

    if do_output:
        f = open(spectrum_filename,"w")
        for iw in range(0,sz):
            f.write("%8.5f  %8.5f  %8.5f\n" % (W[iw], J[iw], J2[iw] ) )
        f.close()

    return T, norm_acf, raw_acf, W, J, J2
示例#23
0
def read_energies_from_gaussian_log_file( params ):
    """
    This function read the energies from Gaussian log file and returns the Kohn-Sham and total_energies.

    Args:

        params (dictionary): The dictionary containing the input parameters.

            logfile_name (string): The log file name.

            min_band (integer): The minimum state number.

            max_band (integer): The maximum state number.

            spin (integer): The spin component. 1 is for alpha and 2 is for beta spin.

    Returns:

        ks_energies (1D numpy array): The Kohn-Sham energies.

        total_energy (float): The total energy of the system.

    """
    # Critical params
    critical_params = [ "logfile_name", "min_band", "max_band" ]
    # Default params
    default_params = { "spin": 1 }
    # Check inputs
    comn.check_input(params, default_params, critical_params)

    # Extracting the input parameters
    gaussian_log_file_name = params["logfile_name"]
    min_band = params["min_band"]
    max_band = params["max_band"]
    spin     = params["spin"]

    file = open(gaussian_log_file_name,'r')
    lines = file.readlines()
    file.close()

    # Initialize the occupied and unoccupied energies
    occupied_energies   = []
    unoccupied_energies = []
    if spin==1:
        # alpha spin
        spin_letter = 'alpha'
    elif spin==2:
        # beta spin
        spin_letter = 'beta'
    
    for i in range(len(lines)):
        # Find the eigenvalues of the occupied molecular orbitals
        if 'eigenvalues' in lines[i].lower() and 'occ' in lines[i].lower() and spin_letter in lines[i].lower():
            for j in range(len(lines[i].split())):
                try:
                    occupied_energies.append(float(lines[i].split()[j]))
                except:
                    pass
        # Find the eigenvalues of the unoccupied molecular orbitals
        if 'eigenvalues' in lines[i].lower() and 'virt' in lines[i].lower() and spin_letter in lines[i].lower():
            for j in range(len(lines[i].split())):
                try:
                    unoccupied_energies.append(float(lines[i].split()[j]))
                except:
                    pass
    # Turn them into numpy arrays
    occupied_energies = np.array(occupied_energies)
    unoccupied_energies = np.array(unoccupied_energies)
    # Concatenate the occupied and unoccpied energies so we can choose from min_band to max_band
    ks_energies = np.concatenate((occupied_energies,unoccupied_energies))

    # Now the total energy
    total_energy = 0
    for i in range(len(lines)):
        # Find the 'SCF Done' in the output, total energy is there
        if 'SCF Done:'.lower() in lines[i].lower():
            for j in range(len(lines[i].split())):
                try:
                    total_energy = float( lines[i].split()[j] )
                    break
                except:
                    pass
            break
    
    return ks_energies[min_band-1:max_band], total_energy
示例#24
0
def transform_data(X, params):
    """

    This is an auxiliary function to transform the original matrices X (e.g. H_vib) according to:     
    X(original) ->    ( X + shift1 ) (x) (scale) + shift2,  

    Here, (x) indicates the element-wise multiplicaiton, and shift1, shift2, and scale are matrices

    Args: 
        X ( list of lists of CMATRIX(nstates, nstates) ): the original data stored as
            X[idata][step] - a CMATRIX(nstates, nstates) for the dataset `idata` and time 
            step `step`
        params ( dictionary ): parameters controlling the transformation

            * **params["shift1"]** ( CMATRIX(nstates,nstates) ): first shift corrections [units of X], [default: zero]
            * **params["shift2"]** ( CMATRIX(nstates,nstates) ): second shift corrections [units of X], [default: zero]
            * **params["scale"]** ( CMATRIX(nstates,nstates) ): scaling of the X [unitless], [default: 1.0 in all matrix elements]

    Returns:    
        None: but transforms X input directly, so changes the original input


    Example:
        Lets say we have a data set of 2x2 matrices and we want to increase the energy gap by 0.1 units and scale the
        couplings by a factor of 3. Then, the input is going to be like this:

        >>> scl = CMATRIX(2,2); 
        >>> scl.set(0,0, 1.0+0.0j);  scl.set(0,1, 3.0+0.0j);
        >>> scl.set(0,0, 3.0+0.0j);  scl.set(0,1, 1.0+0.0j);
        >>> shi = CMATRIX(2,2); 
        >>> shi.set(0,0, 0.0+0.0j);  shi.set(0,1, 0.0+0.0j);
        >>> shi.set(0,0, 0.0+0.0j);  shi.set(0,1, 0.1+0.0j);
        >>> transform_data(X, {"shift2":shi, "scale":scl })


    """

    ndata = len(X)
    nsteps = len(X[0])
    nstates = X[0][0].num_of_cols

    sh1 = CMATRIX(nstates, nstates)  # zero
    sh2 = CMATRIX(nstates, nstates)  # zero
    scl = CMATRIX(nstates, nstates)  # all elements are 1

    for i in range(0, nstates):
        for j in range(0, nstates):
            scl.set(i, j, 1.0 + 0.0j)

    critical_params = []
    default_params = {"shift1": sh1, "shift2": sh2, "scale": scl}
    comn.check_input(params, default_params, critical_params)

    for idata in range(0, ndata):
        for istep in range(0, nsteps):

            tmp = CMATRIX(X[idata][istep])
            tmp = tmp + params["shift1"]
            tmp.dot_product(tmp, params["scale"])
            tmp = tmp + params["shift2"]
            X[idata][istep] = CMATRIX(tmp)
示例#25
0
def plot_hdf5(plot_params, ax=plt, use_default_ax=True):
    """
    This function is meant to plot the results stored in the hdf files generated by the exact dynamics runs

    Args:

        prefix ( string ): the name of the directory containing the input HDF5 file
            This directory will also be used to output the generated picture files [ default : "out"]
        filename ( string ): name of the HDF5 file to read [ default: "data.hdf"]
        output_level ( int ): the level of info contained in the HDF5 file [ default : 3]        
        which_adi_states ( list of ints ) : indices of the adiabatic states to print [ default: [0] ]
        which_dia_states ( list of ints ) : indices of the diabatic states to print [ default: [0] ]
        colors ( dictionary ): the definition of the colors to use
        clrs_index ( list of strings ) : defines the mapping of the colors on integers and vice versa 
    

    """

    if use_default_ax == True:
        ax.rc('axes', titlesize=24)  # fontsize of the axes title
        ax.rc('axes', labelsize=20)  # fontsize of the x and y labels
        ax.rc('legend', fontsize=20)  # legend fontsize
        ax.rc('xtick', labelsize=16)  # fontsize of the tick labels
        ax.rc('ytick', labelsize=16)  # fontsize of the tick labels

        ax.rc('figure.subplot', left=0.2)
        ax.rc('figure.subplot', right=0.95)
        ax.rc('figure.subplot', bottom=0.13)
        ax.rc('figure.subplot', top=0.88)

    colors = {}

    colors.update({"11": "#8b1a0e"})  # red
    colors.update({"12": "#FF4500"})  # orangered
    colors.update({"13": "#B22222"})  # firebrick
    colors.update({"14": "#DC143C"})  # crimson

    colors.update({"21": "#5e9c36"})  # green
    colors.update({"22": "#006400"})  # darkgreen
    colors.update({"23": "#228B22"})  # forestgreen
    colors.update({"24": "#808000"})  # olive

    colors.update({"31": "#8A2BE2"})  # blueviolet
    colors.update({"32": "#00008B"})  # darkblue

    colors.update({"41": "#2F4F4F"})  # darkslategray

    clrs_index = [
        "11", "21", "31", "41", "12", "22", "32", "13", "23", "14", "24"
    ]

    # Parameters and dimensions
    critical_params = []
    default_params = {
        "prefix":
        "out",
        "filename":
        "data.hdf",
        "hdf5_output_level":
        3,
        "which_dofs": [0],
        "which_adi_states": [0],
        "which_dia_states": [0],
        "colors":
        colors,
        "clrs_index":
        clrs_index,
        "properties_to_save": [
            "timestep", "time", "Ekin_dia", "Ekin_adi", "Epot_dia", "Epot_adi",
            "Etot_dia", "Etot_adi", "norm_dia", "norm_adi", "pop_dia",
            "pop_adi", "q_dia", "q_adi", "q2_dia", "q2_adi", "p_dia", "p_adi",
            "p2_dia", "p2_adi", "denmat_dia", "denmat_adi", "PSI_dia",
            "PSI_adi", "reciPSI_dia", "reciPSI_adi"
        ]
    }
    comn.check_input(plot_params, default_params, critical_params)

    filename = plot_params["filename"]
    prefix = plot_params["prefix"]
    hdf5_output_level = plot_params["hdf5_output_level"]
    which_dofs = plot_params["which_dofs"]
    which_adi_states = plot_params["which_adi_states"]
    which_dia_states = plot_params["which_dia_states"]
    colors = plot_params["colors"]
    clrs_index = plot_params["clrs_index"]
    properties_to_save = plot_params["properties_to_save"]

    out_prefix = prefix

    with h5py.File(F"{prefix}/{filename}", 'r') as f:

        t = None
        if "time" in properties_to_save:
            t = list(f["time/data"][:])

        #=============== Populations ======================

        ax.figure(1, figsize=(36, 12))  # dpi=300, frameon=False)
        ax.subplot(1, 2, 1)
        ax.title('Adiabatic population dynamics')
        ax.xlabel('Time, a.u.')
        ax.ylabel('Population')

        if "pop_adi" in properties_to_save and t != None:
            nstates = f["pop_adi/data"].shape[1]  #.attrs['dim'][1]
            for i in range(nstates):
                if i in which_adi_states:
                    Pi = list(f["pop_adi/data"][:, i, 0])
                    ax.plot(t,
                            Pi,
                            label='$P_%i$' % (i),
                            linewidth=10,
                            color=colors[clrs_index[i]])
                    ax.legend()

        ax.subplot(1, 2, 2)
        ax.title('Diabatic population dynamics')
        ax.xlabel('Time, a.u.')
        ax.ylabel('Population')

        if "pop_dia" in properties_to_save and t != None:
            nstates = f["pop_dia/data"].shape[1]  #.attrs['dim'][1]
            for i in range(nstates):
                if i in which_dia_states:
                    Pi = list(f["pop_dia/data"][:, i, 0])
                    ax.plot(t,
                            Pi,
                            label='$P_%i$' % (i),
                            linewidth=10,
                            color=colors[clrs_index[i]])
                    ax.legend()

        ax.savefig("%s/Fig1.png" % (prefix), dpi=300)
        ax.savefig("%s/Fig1.pdf" % (prefix), dpi=300)

        #============= Energies =====================
        ax.figure(2, figsize=(36, 12))  # dpi=300, frameon=False)

        ax.subplot(1, 2, 1)
        ax.title('Energies')
        ax.xlabel('t, a.u.')
        ax.ylabel('Energy, a.u.')
        if "Ekin_dia" in properties_to_save \
           and "Epot_dia" in properties_to_save \
           and "Etot_dia" in properties_to_save \
           and t != None:

            Ekin_dia = list(f["Ekin_dia/data"][:])
            Epot_dia = list(f["Epot_dia/data"][:])
            Etot_dia = list(f["Etot_dia/data"][:])

            ax.plot(t,
                    Etot_dia,
                    label='$Etot_{dia}$',
                    linewidth=10,
                    color=colors["11"])
            ax.plot(t,
                    Ekin_dia,
                    label='$Ekin_{dia}$',
                    linewidth=10,
                    color=colors["21"])
            ax.plot(t,
                    Epot_dia,
                    label='$Epot_{dia}$',
                    linewidth=10,
                    color=colors["31"])
            ax.legend()

        ax.subplot(1, 2, 2)
        ax.title('Energies')
        ax.xlabel('t, a.u.')
        ax.ylabel('Energy, a.u.')

        if "Ekin_adi" in properties_to_save \
           and "Epot_adi" in properties_to_save \
           and "Etot_adi" in properties_to_save \
           and t != None:

            Ekin_adi = list(f["Ekin_adi/data"][:])
            Epot_adi = list(f["Epot_adi/data"][:])
            Etot_adi = list(f["Etot_adi/data"][:])

            ax.plot(t,
                    Etot_adi,
                    label='$Etot_{adi}$',
                    linewidth=10,
                    color=colors["11"])
            ax.plot(t,
                    Ekin_adi,
                    label='$Ekin_{adi}$',
                    linewidth=10,
                    color=colors["21"])
            ax.plot(t,
                    Epot_adi,
                    label='$Epot_{adi}$',
                    linewidth=10,
                    color=colors["31"])
            ax.legend()

        ax.savefig("%s/Fig2.png" % (prefix), dpi=300)
        ax.savefig("%s/Fig2.pdf" % (prefix), dpi=300)

        #============= Phase spaces & Norms  =====================
        ax.figure(3, figsize=(36, 12))  # dpi=300, frameon=False)

        ax.subplot(1, 2, 1)
        ax.title('Phase space')
        ax.xlabel('Coordinate, a.u.')
        ax.ylabel('Momentum, a.u.')

        if "q_dia" in properties_to_save and "p_dia" in properties_to_save:
            ndof = f["q_dia/data"].shape[1]  #.attrs['dim'][1]

            for idof in range(ndof):
                if idof in which_dofs:
                    qi = list(f["q_dia/data"][:, idof, 0])
                    pi = list(f["p_dia/data"][:, idof, 0])

                    ax.plot(qi,
                            pi,
                            label='',
                            linewidth=10,
                            color=colors[clrs_index[i]])
                    ax.legend()

        ax.subplot(1, 2, 2)
        ax.title('Norms')
        ax.xlabel('Time, a.u.')
        ax.ylabel('Norm')

        if "norm_dia" in properties_to_save and "norm_adi" in properties_to_save and t != None:

            nrm_dia = list(f["norm_dia/data"][:])
            nrm_adi = list(f["norm_adi/data"][:])

            ax.plot(t,
                    nrm_dia,
                    label='Diabatic',
                    linewidth=10,
                    color=colors["11"])
            ax.plot(t,
                    nrm_adi,
                    label='Adiabatic',
                    linewidth=10,
                    color=colors["21"])
            ax.legend()

        ax.savefig("%s/Fig3.png" % (prefix), dpi=300)
        ax.savefig("%s/Fig3.pdf" % (prefix), dpi=300)

        ax.show()
        ax.close()
示例#26
0
def run_dynamics(dyn_params, Ham, rho_init):
    """
    This functions integrates the HEOM for a given system's Hamiltonian, initial conditions, and bath parameters

    Args:
        dyn_params ( dictionary )
            Parameters controlling the execution of the dynamics. Can contain:

            =============== Properties of the bath ================

            * **dyn_params["KK"]** ( int )
                Defines the the number of Matsubara modes (KK+1) - one needs to
                achieve a convergence w.r.t. this parameter [ default: 0]

            * **dyn_params["LL"]** ( int )
                The hierarchy level - one needs to achieve a convergence w.r.t. this parameter [ default: 10 ]

            * **dyn_params["gamma"]** ( float )
                The system-bath interaction ("collision") frequency, related
                to the bath's "friction" on the system [ units: Ha,  default: 1.0/(0.1 * units.ps2au) ]

            * **dyn_params["eta"]** ( float )
                Reorganization energy of bath [ units: Ha, default: 2.0 * 50.0 * units.inv_cm2Ha ]

            * **dyn_params["temperature"]** ( float )
                Temperature of the bath [ units: K, default: 300 ]

            * **dyn_params["el_phon_couplings"]** ( list of CMATRIX; (nstates+1) x CMATRIX(nstates, nstates) )
                The matrices that describe how each electronic phonon is coupled to various electronic states
                in the simplest picture, when a phonon k is coupled to electronic state m, the matrix el_phon_couplings[k]
                will contain 1.0 at the element (m,m) and zeroes everywhere else. You can of course define more general
                situations when one phonon is coupled to many states (and perhaps to their coherences).
                The convention is: dyn_params["el_phon_couplings"][0] - contains trash, and only the next element is the
                first actual coupling. [ default: see the simplest example described above ]


            =============== Parameters of the dynamics ================

            * **dyn_params["dt"]** ( float )
                Time-integration timestep [ units: a.u. of time, default: 0.1*units.fs2au ]

            * **dyn_params["nsteps"]** ( int )
                How many steps of the dynamics to perform [ default: 10 ]

            * **dyn_params["verbosity"]** ( int )
                The level of the run-time printout of any useful information [ default: -1 ]

            * **dyn_params["progress_frequency"]** ( float )
                The frequency (defined as the as fraction of the entire simulation run)
                to print out the progress message. This number shall be in the interval [0, 1]
                [ default: 0.1 ]


            =============== Algorithmic parameters ================

            * **dyn_params["truncation_scheme"]** ( int )
                How to truncate the HEOM equations. Options are:

                - 0 : no truncation
                - 1 : according to Schulten with real part of Matsubara terms [ default ]
                - 2 : according to Schulten with full Matsubara term
                - 3 : according to Shi with real part of Matsubara terms
                - 4 : according to Shi with full Matsubara terms

            * **dyn_params["do_scale"]** ( int )
                Whether to use the scaled HEOM version. Options are:

                - 0 : don't use the scaled HEOM
                - 1 : use it according to JCP 130, 084105, 2009 [ default ]

            * **dyn_params["tolerance"]** ( float )
                The threshold for discarding the auxiliary density matrices.
                The larger it is, the fewer auxiliary density matrices survives in the HEOM
                and the faster the calculations become. [ default: 1e-6 ]

            * **dyn_params["filter_after_steps"]** ( int )
                Denfines the frequency (after how may steps of the dynamics)
                the auxiliary density matrices will be checked to be potentially discarded
                (based on the `tolerance` parameter set above) [ default: 10 ]

            * **dyn_params["num_threads"]** ( int )
                The number of OMP threads to use to parallelize the calculations [ default: 1 ]

            =============== Parameters for saving data ================

            * **dyn_params["prefix"]** ( string )
                The name of the folder which will contain the results computed.
                This folder will be created if not existent already [ default: "out" ]

            * **dyn_params["hdf5_output_level"]** ( int )
                The level of on-the-fly HDF5 file printout.
                The file is called "data.hdf" and is stored in the directory defined by the `prefix` variable.
                The larger it is, the more properties will be saved, and the larger the size of the generated files will be.
                In this cases, the files are being written as the calculations go, so it may be a quite slow process (the main
                bottleneck of the calculations!). However, if your calculations are stopped before they reach the end,
                you shall still have the data. [ default: 0 ]

            * **dyn_params["txt_output_level"]** ( int )
                The level of the text-based output of the computed results. Is not yet implemented. [ default: 0 ]

            * **dyn_params["mem_output_level"]** ( int )
                The level of the memory-based HDF5 file creation. This is a flag similar to the `hdf5_output_level`
                The file is called "mem_data.hdf" and is stored in the directory defined by the `prefix` variable.
                The larger it is, the more properties will be saved, and the larger the size of the generated files will be.
                Unlike with the "hdf5_output_level", the files are being written only when the calculations end. This way,
                the writing of the files is much much faster than with "hdf5_output_lelel", but if the calculations are stopped
                or crash the mid-way, you will have nothing stored. Also, in this case, the function also returns a tuple with
                all the variables stored, so ready to use in the Python program. Keep in mind that although this option
                is much faster than "hdf5_output_level", all the results are first stored in the OS memory before they are
                dumped into the HDF5 files, so for large systems/calculations you may need good amount of RAM. [ default: 3 ]

            * **dyn_params["properties_to_save"]** ( list of strings )
                The names of the datasets (data) to store to the HDF5 files.
                Note that one needs to satisfy both the *_output_level and list the dataset in this parameter
                in order to have it actually saved in the file.

                Available names:

                - **timestep** ( int )
                    The index of the timestep
                    *_output_level >= 1

                - **time** ( float )
                    The time of dynamics [ in a.u. of time]
                    *_output_level >= 1

                - **denmat** ( CMATRIX(nstates, nstates) )
                    The density matrix evolution
                    *_output_level >= 3

                [ default: [ "timestep", "time", "denmat" ] ]


            * **dyn_params["use_compression"]** ( int )
                Whether to use the data compression (via gzip) when storing data to HDF5 files.
                Options:

                - 0 : don't compress data [ default ]
                - 1 : compress data

                The experience shows that it is better not to use the compression. It gets slower.

            * **dyn_params["compression_level"]** ( 3 x int )
                The level of compression for integers, float, and complex numbers.

                Must be a number between 0 and 9, including the ends.

                [ default: [0,0,0] ]



        Ham ( CMATRIX(nstates, nstates) )
            Define the system's electronic Hamiltonian. The diagonal elements contain
            the site energies, the off-diagonal elements contain electronic couplings [ units: Ha ]

        rho ( CMATRIX(nstates, nstates) )
            Is the initial density matrix describing the quantum system.
            It's dimensions must be the same as those of the `Ham` variable.



    """

    params = dict(dyn_params)

    # Parameters and dimensions
    critical_params = []
    default_params = {
        "KK": 0,
        "LL": 10,
        "gamma": 1.0 / (0.1 * units.ps2au),
        "eta": 2.0 * 50.0 * units.inv_cm2Ha,
        "temperature": 300.0,
        "el_phon_couplings": initialize_el_phonon_couplings(Ham.num_of_cols),
        "dt": 0.1 * units.fs2au,
        "nsteps": 10,
        "verbosity": -1,
        "progress_frequency": 0.1,
        "truncation_scheme": 1,
        "do_scale": 0,
        "adm_tolerance": 1e-6,
        "adm_deriv_tolerance": 1e-12,
        "filter_after_steps": 1,
        "do_zeroing": 0,
        "num_threads": 1,
        "prefix": "out",
        "hdf5_output_level": 0,
        "txt_output_level": 0,
        "mem_output_level": 3,
        "properties_to_save": ["timestep", "time", "denmat"],
        "use_compression": 0,
        "compression_level": [0, 0, 0]
    }

    comn.check_input(params, default_params, critical_params)

    nsteps = params["nsteps"]
    print_freq = int(params["progress_frequency"] * nsteps)

    #============= System ======================
    params.update({"Ham": Ham})
    nquant = Ham.num_of_cols

    #============== HEOM topology ==============

    KK = dyn_params["KK"]
    LL = dyn_params["LL"]

    all_vectors = intList2()
    vec_plus = intList2()
    vec_minus = intList2()

    gen_hierarchy(nquant * (KK + 1), LL, params["verbosity"], all_vectors,
                  vec_plus, vec_minus)
    params.update({
        "nvec": all_vectors,
        "nvec_plus": vec_plus,
        "nvec_minus": vec_minus
    })

    nn_tot = len(all_vectors)

    all_indices = []
    init_nonzero = []
    for n in range(nn_tot):
        all_indices.append(n)
        init_nonzero.append(1)

    #============ Bath update =====================
    gamma_matsubara = doubleList()
    c_matsubara = complexList()

    setup_bath(KK, params["eta"], params["gamma"], params["temperature"],
               gamma_matsubara, c_matsubara)
    params.update({
        "gamma_matsubara": gamma_matsubara,
        "c_matsubara": c_matsubara
    })

    if params["verbosity"] >= 1:
        for k in range(KK + 1):
            print(
                F" k = {k} gamma_matsubara[{k}] = {gamma_matsubara[k]}  c_matsubara[{k}] = {c_matsubara[k]}"
            )

    #============= Initialization ============

    rho = CMATRIX((nn_tot) * nquant,
                  nquant)  # all rho matrices stacked on top of each other
    rho_scaled = CMATRIX(
        (nn_tot) * nquant,
        nquant)  # all rho matrices stacked on top of each other
    #drho = CMATRIX((nn_tot)*nquant, nquant)  # all rho matrices stacked on top of each other

    aux_memory = {
        "rho_unpacked": CMATRIXList(),
        "rho_unpacked_scaled": CMATRIXList(),
        "drho_unpacked": CMATRIXList(),
        "drho_unpacked_scaled": CMATRIXList()
    }
    for n in range(nn_tot):
        aux_memory["rho_unpacked"].append(CMATRIX(nquant, nquant))
        aux_memory["rho_unpacked_scaled"].append(CMATRIX(nquant, nquant))
        aux_memory["drho_unpacked"].append(CMATRIX(nquant, nquant))
        aux_memory["drho_unpacked_scaled"].append(CMATRIX(nquant, nquant))

    # Initial conditions
    x_ = Py2Cpp_int(list(range(nquant)))
    y_ = Py2Cpp_int(list(range(nquant)))
    push_submatrix(rho, rho_init, x_, y_)

    #unpack_mtx(aux_memory["rho_unpacked"], rho)

    #========== Scale working ADMs ====================
    if params["verbosity"] >= 2 and params["do_scale"] == 1:
        print("Scaling factors")
        for n in range(nn_tot):
            for m in range(nquant):
                for k in range(KK + 1):
                    n_mk = all_vectors[n][m * (KK + 1) + k]
                    scl = 1.0 / math.sqrt(
                        FACTORIAL(n_mk) * FAST_POW(abs(c_matsubara[k]), n_mk))
                    print(F" n={n} m={m} k={k}  scaling_factor={scl}")

    # raw -> scaled
    transform_adm(rho, rho_scaled, aux_memory, params, 1)

    #========== Filter scaled ADMs ======================
    params.update({
        "nonzero": Py2Cpp_int(init_nonzero),
        "adm_list": Py2Cpp_int(all_indices)
    })
    update_filters(rho_scaled, params, aux_memory)

    if params["verbosity"] >= 2:
        print("nonzero = ", Cpp2Py(params["nonzero"]))
        print("adm_list = ", Cpp2Py(params["adm_list"]))

        if params["verbosity"] >= 4:
            print("ADMs")
            aux_print_matrices(0, aux_memory["rho_unpacked"])
            print("Scaled ADMs")
            aux_print_matrices(0, aux_memory["rho_unpacked_scaled"])

    # Initialize savers
    _savers = save.init_heom_savers(params, nquant)

    #============== Propagation =============

    start = time.time()
    for step in range(params["nsteps"]):

        #================ Saving and printout ===================
        # scaled -> raw
        transform_adm(rho, rho_scaled, aux_memory, params, -1)

        # Save the variables
        save.save_heom_data(_savers, step, print_freq, params,
                            aux_memory["rho_unpacked"])

        if step % print_freq == 0:
            print(F" step= {step}")

            if params["verbosity"] >= 3:
                print("nonzero = ", Cpp2Py(params["nonzero"]))
                print("adm_list = ", Cpp2Py(params["adm_list"]))

            if params["verbosity"] >= 4:
                print("ADMs")
                aux_print_matrices(0, aux_memory["rho_unpacked"])
                print("Scaled ADMs")
                aux_print_matrices(0, aux_memory["rho_unpacked_scaled"])

        #============== Update the list of active equations = Filtering  ============
        if step % params["filter_after_steps"] == 0:

            # To assess which equations to discard, lets estimate the time-derivatives of rho
            # for all the matrices
            params["adm_list"] = Py2Cpp_int(all_indices)

            update_filters(rho_scaled, params, aux_memory)

        #================= Propagation for one timestep ==================================
        rho_scaled = RK4(rho_scaled, params["dt"], compute_heom_derivatives,
                         params)

    end = time.time()
    print(F"Calculations took {end - start} seconds")

    # For the mem_saver - store all the results into HDF5 format only at the end of the simulation
    if _savers["mem_saver"] != None:
        prefix = params["prefix"]
        _savers["mem_saver"].save_data(F"{prefix}/mem_data.hdf",
                                       params["properties_to_save"], "w")
        return _savers["mem_saver"]
示例#27
0
def run(H_vib, params):
    """
    
    The main procedure to run NA-MD calculations within the NBRA workflow

    Args: 
        H_vib ( list of lists of CMATRIX objects ): the vibronic Hamiltonian for all data sets and all time-points
            H_vib[idata][istep].get(i,j) - i,j matrix element for the data set `idata` and step in that data set `istep`
    
        params ( dictionary ): the parameters that control the execution of the NA-MD-NBRA calculations

            * **params["nsteps"]** ( int ): the length of the NA-MD trajectory. This parameter is not 
                necessarily the same as len(H_vib[0]), so need to be provided [Required!] 

            * **params["T"]** ( double ): temperature of nuclear/electronic dynamics [in K, default: 300.0]
            * **params["ntraj"]** ( int ): the number of stochastic surface hopping trajectories [default: 1]
            * **params["tdse_Ham"]** ( int ): option to select either the regular (input) or Boltzmann-corrected
                Hamiltonian:

                - 0 - regular [ default ]
                - 1 - Boltzmann-corrected 

            * **params["Hvib_type"]** ( int ): option to select if the Hvib is a diabatic or an adiabatic
                Hamiltonian:

                - 0 - diabatic
                - 1 - adiabatic  [ default ]

            * **params["sh_method"]** ( int ): selects the algorithm to compute surface hopping probabilities 
                Options:

                - 0 - MSSH
                - 1 - FSSH [ default ]

            * **params["decoherence_constants"]** ( int ): selects whether to compute decoherence parameters
                on the fly or to use provided parameters:

                - 0 - pre-compute the parameters from the trajectory data before NA-MD run [ default ]
                - 1 - use the provided parameters ..seealso:: ```params["decoherence_times"]``` and ```params["decoherence_rates"]```
                - 20 - use the time-dependent decoherence times as in DISH paper. This is different from
                    option 0 in that these numbers depend on the state amplitudes. Dephasing times are computed as in 0.
                - 21 - use the time-dependent decoherence times as in DISH paper. This is different from
                    option 0 in that these numbers depend on the state amplitudes. Dephasing times are computed as in 1.


            * **params["decoherence_times"]** ( MATRIX(nstates,nstates) ): decoherence times for all 
                pairs of states. This should be provided if ``` params["decoherence_constants"] == 1``` the dimensions should be
                consistent with those of the input Hvib data. [ units: a.u. of time ]
             

            * **params["decoherence_method"]** ( int ): selects the decoherence method 
                Options:

                - 0 - no decoherence [ default ]
                - 1 - ID-A
                - 2 - MSDM
                - 3 - DISH

            * **params["dt"]** ( double ): nuclear dynamics integration time step [in a.u. of time, default: 41.0]
            * **params["Boltz_opt"]** ( int ): option to select a probability of hopping acceptance [default: 3]
                Options:

                - 0 - all proposed hops are accepted - no rejection based on energies
                - 1 - proposed hops are accepted with exp(-E/kT) probability - the old (hence the default approach)
                - 2 - proposed hops are accepted with the probability derived from Maxwell-Boltzmann distribution - more rigorous
                - 3 - generalization of "1", but actually it should be changed in case there are many degenerate levels

            * **params["istate"]** ( int ): index of the initial state [default: 0]
            * **params["init_times"]** ( list of ints ): indices of the starting point in the provided data arrays [default: [0]]
            * **params["outfile"]** ( string ): the name of the file where to print populations
                and energies of states [default: "_out.txt"]    

    Returns: 
        MATRIX(nsteps, 3*nstates+5): the trajectory (and initial-condition)-averaged observables for every timesteps,
            the assumed format is: 

            time, first state info        ...          N-st state info All-states-related data

            time, E(0), P_SE(0), P_SH(0), ...,   E(nst-1), P_SE(nst-1), P_SH(nst-1), <E*P_SE>, <E*P_SH>, sum{P_SE}, sum{P_SH}

     
    """

    
    critical_params = [ "nsteps" ] 
    default_params = { "T":300.0, "ntraj":1,
                       "tdse_Ham":0, "sh_method":1, "decoherence_constants": 0, "decoherence_method":0, "dt":41.0, "Boltz_opt":3,
                       "Hvib_type":1,
                       "istate":0, "init_times":[0], "outfile":"_out.txt" }
    comn.check_input(params, default_params, critical_params)


    rnd = Random()

    ndata = len(H_vib)
    nsteps = params["nsteps"]
    nstates = H_vib[0][0].num_of_cols  # number of states

    ntraj = params["ntraj"]
    nitimes = len(params["init_times"])
    Ntraj = ndata * nitimes * ntraj

    T = params["T"]
    bolt_opt = params["Boltz_opt"]
    dt = params["dt"]
    tdse_Ham = params["tdse_Ham"]

    res = MATRIX(nsteps, 3*nstates+5)


    #========== Compute PARAMETERS  ===============
    # Decoherence times
    # these are actually the dephasing rates!
    tau, dephasing_rates = None, None

    if params["decoherence_constants"] == 0 or params["decoherence_constants"]==20:
        tau, dephasing_rates = dectim.decoherence_times_ave(H_vib, params["init_times"], nsteps, 1) 

    elif params["decoherence_constants"] == 1 or params["decoherence_constants"]==21:
        if params["decoherence_times"].num_of_cols != nstates:
            print("Error: dimensions of the input decoherence times matrix are not consistent with \
                   the dimensions of the Hamiltonian matrices (the number of states). Exiting...\n")
            sys.exit(0)
        else:
            tau = MATRIX(params["decoherence_times"])
            dephasing_rates = dectim.decoherence_times2rates(tau)



    #========== Initialize the DYNAMICAL VARIABLES  ===============
    # TD-SE coefficients and active state indices
    Coeff, istate = [], []

    # Coherence times and coherence intervals for DISH
    t_m, tau_m = [], []

    for tr in range(0,Ntraj):
        istate.append(params["istate"])
        Coeff.append(CMATRIX(nstates, 1)); 
        Coeff[tr].set(params["istate"], 1.0, 0.0)
        t_m.append(MATRIX(nstates,1))
        tau_m.append(MATRIX(nstates,1))  

           
    # Prepare the output file
    f = open(params["outfile"],"w"); f.close()

    #=============== Entering the DYNAMICS ========================
    for i in range(0,nsteps):  # over all evolution times


        #============== Analysis of the Dynamics  =================
        # Compute the averages
        res_i = traj_statistics(i, Coeff, istate, H_vib, params["init_times"])

        # Print out into a file
        printout(i*dt, res_i, params["outfile"])

        # Update the overal results matrix
        res.set(i,0, i*dt)
        push_submatrix(res, res_i, Py2Cpp_int([i]), Py2Cpp_int( list(range(1,3*nstates+5)) ) )


        #=============== Propagation ==============================
        for idata in range(0,ndata):   # over all MD trajectories (data sets)

            for it_indx in range(0,nitimes): # over all initial times

                it = params["init_times"][it_indx]

                for tr in range(0,ntraj):  # over all stochastic trajectories

                    Tr = idata*(nitimes*ntraj) + it_indx*(ntraj) + tr

                    #============== Propagation: TD-SE and surface hopping ==========
                    # Coherent evolution amplitudes
                    Heff = None 

                    if tdse_Ham==0:
                        Heff = H_vib[idata][it+i]
                    elif tdse_Ham==1:
                        Heff = tsh.Boltz_corr_Ham(H_vib[idata][it+i], Coeff[Tr], params["T"], params["Hvib_type"])

                    propagate_electronic(dt, Coeff[Tr], Heff)   # propagate the electronic DOFs

        
                    # Surface hopping 
                    ksi  = rnd.uniform(0.0, 1.0)
                    ksi2 = rnd.uniform(0.0, 1.0)
        
                    if params["decoherence_method"] in [0, 1, 2]:
                    
                        do_collapse = None
                        if params["decoherence_method"]==0:    # No decoherence
                            do_collapse = 0
                        elif params["decoherence_method"]==1:  # ID-A, taken care of in the tsh.hopping
                            do_collapse = 1
                        elif params["decoherence_method"]==2:  # MSDM
                            do_collapse = 0                            
                            Coeff[Tr] = msdm(Coeff[Tr], dt, istate[Tr], dephasing_rates)
                    
                        istate[Tr], Coeff[Tr] = tsh.hopping(Coeff[Tr], Heff, istate[Tr], params["sh_method"], do_collapse, ksi, ksi2, dt, T, bolt_opt)
                    
                    elif params["decoherence_method"] in [3]:  # DISH
                    
                        tau_m[Tr] = coherence_intervals(Coeff[Tr], dephasing_rates)
                        istate[Tr] = tsh.dish_py(Coeff[Tr], istate[Tr], t_m[Tr], tau_m[Tr], Heff, bolt_opt, T, ksi, ksi2)
                        t_m[Tr] += dt

        
    return res
示例#28
0
def model1(q, params, full_id=None):
    """

    The first of the two model potential described by L. Wang, C.C. Martens, 
    and Y. Zheng, "Entangled trajectory molecular dynamics in 
    multidimensional systems: Two-dimensional quantum tunneling through 
    the Eckart barrier" J. Chem. Phys. 137, 34113 (2012)

    This potential has seperable dof

    Args:
        q ( MATRIX(2,1) ): coordinates of the particle, ndof = 2 
        params ( dictionary ): model parameters

            * **params["Va"]** ( double ): barrier height [ default: 0.00625, units: Ha]
            * **params["Vb"]** ( double ): harmonic potential term [ default: 0.0106, units: Ha/Bohr^2]

    Returns:       
        PyObject: obj, with the members:

            * obj.ham_dia ( CMATRIX(1,1) ): diabatic Hamiltonian 
            * obj.ovlp_dia ( CMATRIX(1,1) ): overlap of the basis (diabatic) states [ identity ]
            * obj.d1ham_dia ( list of 2 CMATRIX(1,1) objects ): 
                derivatives of the diabatic Hamiltonian w.r.t. the nuclear coordinate
            * obj.dc1_dia ( list of 2 CMATRIX(1,1) objects ): derivative coupling in the diabatic basis [ zero ]

    """

    # Define potential specific constants
    critical_params = [ ] 
    default_params = {"Va":0.00625, "Vb":0.0106 }
    comn.check_input(params, default_params, critical_params)


    Va = params["Va"]
    Vb = params["Vb"]


    # Hdia and Sdia are ndia x ndia in dimension
    Hdia = CMATRIX(1,1)
    Sdia = CMATRIX(1,1)

    # d1ham and dc1_dia are ndia x ndia in dimension, but we have nnucl of them
    d1ham_dia = CMATRIXList();
    dc1_dia = CMATRIXList();

    for i in range(0,2):
        d1ham_dia.append( CMATRIX(1,1) )
        dc1_dia.append( CMATRIX(1,1) )


    indx = 0
    if full_id !=None:
        Id = Cpp2Py(full_id)
        indx = Id[-1]

    x = q.col(indx).get(0)
    y = q.col(indx).get(1)

    x2 = x*x
    y2 = y*y

    # z = sech(2x)
    # z2 = sech^2(2x)    
    z = (2.0 * math.cosh(2.0*x))/(1.0 + math.cosh(4.0*x))
    z2 = z*z

    Hdia.set(0,0,( Va*z2 + 0.5*Vb*y*y )*(1.0+0.0j))
    Sdia.set(0,0,1.0+0.0j)

    #  d Hdia / dR_0
    d1ham_dia[0].set(0,0, ( ( -4.0*Va*math.tanh(2.0*x)*z2 ) )*(1.0+0.0j))
    d1ham_dia[1].set(0,0, ( Vb*y )*(1.0+0.0j))

    #  <dia| d/dR_0| dia >
    dc1_dia[0].set(0,0, 0.0+0.0j)
    dc1_dia[1].set(0,0, 0.0+0.0j)

    obj = tmp()
    obj.ham_dia = Hdia
    obj.ovlp_dia = Sdia
    obj.d1ham_dia = d1ham_dia
    obj.dc1_dia = dc1_dia

    return obj
示例#29
0
def find_maxima(data, params):
    """

    This function finds all the maxima of the data set and sorts them according to the data
    The maxima are defined as data[i-1] < data[i] > data[i+1]
    
    Args: 
        data ( list of doubles ): data to be analyzed
        params ( Python dictionary ): parameters controlling the execution
 
            * **params["do_output"] ( Boolean ): wheather to output the data to file [ default: False ]
            * **params["logname"] ( Boolean ): the name of the output file [ default: "data_maxima.txt" ]
            * **params["verbose"] ( int ): the amount of the debug/descriptive info to print out [ default: 0 ]

    Returns:
        list of 2-element lists: our[i][v], where:
 
            out[i][0] - containing indices of the maximal values

    Examples:

        >>> res = find_maxima( [0.01, 1.0, 0.1, 0.25, 0.5, 0.75, -1.0 ], {} )
        >>> print res
        >>> [ [1, 1.0], [5, 0.75] ]

        >>> res = find_maxima( [0.01, 0.75, 0.1, 0.25, 0.5, 1.75, -1.0 ], {} )
        >>> print res
        >>> [ [5, 1.75], [1, 0.75] ]

    """

    critical_params = []
    default_params = {
        "do_output": False,
        "logname": "data_maxima.txt",
        "verbose": 0
    }
    comn.check_input(params, default_params, critical_params)

    do_output = params["do_output"]
    logname = params["logname"]
    verbose = params["verbose"]

    max_indxs = []
    sz = len(data)
    for i in range(1, sz - 1):
        if data[i] > data[i - 1] and data[i] > data[i + 1]:
            max_indxs.append(i)

    inp = []
    sz = len(max_indxs)
    for i in range(0, sz):
        inp.append([max_indxs[i], data[max_indxs[i]]])

    out = merge_sort(inp)  # largest in the end

    if do_output:
        lgfile = open(logname, "a")
        lgfile.write("Found maxima of the data:\n")
        for i in range(0, sz):
            lgfile.write(
                "maximum index = %3i  index of the datapoint = %8.5f  data value = %8.5f \n"
                % (i, out[sz - 1 - i][0], out[sz - 1 - i][1]))
        lgfile.close()

    return out
示例#30
0
def run_tsh(_q, _p, _iM, _Cdia, _Cadi, states, model_params, dyn_params,
            compute_model, rnd):
    """

    Args: 
        _q ( MATRIX(nnucl, ntraj) ): coordinates of the "classical" particles [units: Bohr]
        _p ( MATRIX(nnucl, ntraj) ): momenta of the "classical" particles [units: a.u. of momenta]
        _iM ( MATRIX(nnucl, 1) ): masses of classical particles [units: a.u.^-1]
        _Cdia ( CMATRIX(ndia, ntraj) ): amplitudes of the diabatic basis states
        _Cadi ( CMATRIX(nadi, ntraj) ): amplitudes of the adiabatic basis states
        states ( intList, or list of ntraj ints ): the quantum state of each trajectory
        model_params ( dictionary ): contains the selection of a model and the parameters 
            for that model Hamiltonian
        dyn_params ( dictionary ): parameters controlling the execution of the dynamics
            Can contain:
      
            * **dyn_params["rep"]** ( int ): selects the representation in which nuclear/electronic (Ehrenfest core)
                dynamics is executed

                - 0: diabatic representation
                - 1: adiabatic representation [default: 1]

            * **dyn_params["rep_sh"]** ( int ): selects the representation which is 
                used to perform surface hopping

                - 0: diabatic representation
                - 1: adiabatic representation [default: 1]

            * **dyn_params["nsteps"]** ( int ): the number of NA-MD steps to do [ default: 1 ]

            * **dyn_params["dt"]** ( double ): the nuclear and electronic integration
                timestep [ units: a.u. of time, default: 1.0 ]
 
            * **dyn_params["BATH_params"]** ( Dictionary ): 

        compute_model ( PyObject ): the pointer to the Python function that performs the Hamiltonian calculations
        rnd ( Random ): random numbers generator object

    Returns:
        tuple: ( obs_T, obs_q, obs_p, obs_Ekin, obs_Epot, obs_Etot, obs_dEkin, obs_dEpot, obs_dEtot, obs_Cadi, obs_Cdia, obs_dm_adi, obs_dm_dia, obs_pops ), where

            * obs_T ( list of `nsteps` doubles ): time [units: a.u.]
            * obs_q ( list of `nsteps` MATRIX(nnucl, ntraj) ): coordinates of all trajectories [ units: Bohr ]
            * obs_p ( list of `nsteps` MATRIX(nnucl, ntraj) ): momenta of all trajectories [ units: a.u. ]
            * obs_Ekin ( list of `nsteps` doubles ): average kinetic energy of an ensemble of trajectories [units: a.u.]
            * obs_Epot ( list of `nsteps` doubles ): average potential energy of an ensemble of trajectories [units: a.u.]
            * obs_Etot ( list of `nsteps` doubles ): average total energy of an ensemble of trajectories [units: a.u.]
            * obs_dEkin ( list of `nsteps` doubles ): standard deviation of kinetic energy of an ensemble of trajectories [units: a.u.]
            * obs_dEpot ( list of `nsteps` doubles ): standard deviation of potential energy of an ensemble of trajectories [units: a.u.]
            * obs_dEtot ( list of `nsteps` doubles ): standard deviation of total energy of an ensemble of trajectories [units: a.u.]
            * obs_Cadi ( list of `nsteps` CMATRIX(nadi, ntraj) ): amplitudes of adiabatic electronic states of all trajectories 
            * obs_Cdia ( list of `nsteps` CMATRIX(ndia, ntraj) ): amplitudes of diabatic electronic states of all trajectories 
            * obs_dm_adi ( list of `nsteps` CMATRIX(nadi, nadi) ): ensemble-averaged density matrix in adiabatic basis
            * obs_dm_dia ( list of `nsteps` CMATRIX(ndia, ndia) ): ensemble-averaged density matrix in diabatic basis
            * obs_pop ( list of `nsteps` MATRIX(nadi, 1) ): ensemble-averaged TSH populations of adiabatic states
            * obs_states ( list of `nsteps` of lists of `ntraj` ints):  # indices of the quantum states of each trajectory
              
    """

    obs_T = []  # time
    obs_q = []  # coordinates of all trajectories
    obs_p = []  # momenta of all trajectories
    obs_Ekin = []  # average kinetic energy
    obs_Epot = []  # average potential energy
    obs_Etot = []  # average total energy
    obs_dEkin = []  # kinetic energy fluctuation
    obs_dEpot = []  # potential energy fluctuation
    obs_dEtot = []  # total energy fluctuation
    obs_Cadi = []  # average TD-SE amplitudes in the adiabatic basis
    obs_Cdia = []  # average TD-SE amplitudes in the diabatic basis
    obs_dm_adi = []  # average SE-based density matrix in adiabatic basis
    obs_dm_dia = []  # average SE-based density matrix in diabatic basis
    obs_pop = []  # average SH-based populations adiabatic basis
    obs_states = []  # indices of the quantum states of each trajectory
    #obs_ind = []  # ??

    # Create copies of the input dynamical variables, so we could run several run_test
    # functions with the same input variables without worries that they will be altered
    # inside of the run_test

    q = MATRIX(_q)
    p = MATRIX(_p)
    iM = MATRIX(_iM)
    Cdia = CMATRIX(_Cdia)
    Cadi = CMATRIX(_Cadi)

    # Parameters and dimensions
    critical_params = []
    default_params = {
        "rep": 1,
        "nsteps": 1,
        "dt": 1.0 * units.fs2au,
        "do_phase_correction": 1,
        "state_tracking_algo": 2,
        "MK_alpha": 0.0,
        "MK_verbosity": 0,
        "tsh_version": 1
    }
    comn.check_input(dyn_params, default_params, critical_params)

    rep = dyn_params["rep"]
    nsteps = dyn_params["nsteps"]
    dt = dyn_params["dt"]
    tsh_version = dyn_params["tsh_version"]
    BATH_params = dyn_params["BATH_params"]

    ndia = Cdia.num_of_rows
    nadi = Cadi.num_of_rows
    nnucl = q.num_of_rows
    ntraj = q.num_of_cols

    # ======= Hierarchy of Hamiltonians =======
    ham = nHamiltonian(ndia, nadi, nnucl)
    ham.init_all(2)

    ham1 = []
    for tr in range(0, ntraj):
        ham1.append(nHamiltonian(ndia, nadi, nnucl))
        ham1[tr].init_all(2)
        ham.add_child(ham1[tr])

    # Initial calculations
    ham.compute_diabatic(compute_model, q, model_params, 1)
    ham.compute_adiabatic(1, 1)
    ham.ampl_adi2dia(Cdia, Cadi, 0, 1)

    if rep == 0:
        ham.compute_nac_dia(p, iM, 0, 1)
        ham.compute_hvib_dia(1)
    elif rep == 1:
        ham.compute_nac_adi(p, iM, 0, 1)
        ham.compute_hvib_adi(1)

    Ekin, Epot, Etot, dEkin, dEpot, dEtot = tsh_stat.compute_etot_tsh(
        ham, p, Cdia, Cadi, states, iM, rep)

    # Thermostats
    therms = []

    for tr in range(0, ntraj):
        therms.append(Thermostat(BATH_params))
        therms[tr].set_Nf_t(nnucl)
        therms[tr].set_Nf_r(0)
        therms[tr].set_Nf_b(0)
        therms[tr].init_nhc()

    # Do the propagation
    for i in range(0, nsteps):

        for tr in range(0, ntraj):
            # Rescale momenta
            scl = therms[tr].vel_scale(0.25 * dt)
            p.scale(-1, tr, scl)

            # Update thermostat variables
            ekin = 0.0
            for dof in range(0, nnucl):
                ekin = ekin + 0.5 * iM.get(dof, 0) * p.get(dof, tr)**2

            therms[tr].propagate_nhc(0.5 * dt, ekin, 0.0, 0.0)

            # Rescale momenta
            scl = therms[tr].vel_scale(0.25 * dt)
            p.scale(-1, tr, scl)

        if rep == 0:
            if tsh_version == 1:
                tsh1(q, p, iM, Cdia, states, ham, compute_model, model_params,
                     dyn_params, rnd)
            elif tsh_version == 2:
                tsh1b(q, p, iM, Cdia, states, ham, compute_model, model_params,
                      dyn_params, rnd)
        elif rep == 1:
            if tsh_version == 1:
                tsh1(q, p, iM, Cadi, states, ham, compute_model, model_params,
                     dyn_params, rnd)
            elif tsh_version == 2:
                tsh1b(q, p, iM, Cadi, states, ham, compute_model, model_params,
                      dyn_params, rnd)

        for tr in range(0, ntraj):
            # Rescale momenta
            scl = therms[tr].vel_scale(0.25 * dt)
            p.scale(-1, tr, scl)

            # Update thermostat variables
            ekin = 0.0
            for dof in range(0, nnucl):
                ekin = ekin + 0.5 * iM.get(dof, 0) * p.get(dof, tr)**2

            therms[tr].propagate_nhc(0.5 * dt, ekin, 0.0, 0.0)

            # Rescale momenta
            scl = therms[tr].vel_scale(0.25 * dt)
            p.scale(-1, tr, scl)

        #=========== Properties ==========
        if rep == 0:
            ham.ampl_dia2adi(Cdia, Cadi, 0, 1)
        elif rep == 1:
            ham.ampl_adi2dia(Cdia, Cadi, 0, 1)

        dm_dia, dm_adi = tsh_stat.compute_dm(ham, Cdia, Cadi, rep, 1)
        Ekin, Epot, Etot, dEkin, dEpot, dEtot = tsh_stat.compute_etot_tsh(
            ham, p, Cdia, Cadi, states, iM, rep)
        pops = tsh_stat.compute_sh_statistics(nadi, states)

        ind = 0.0
        for tr in range(0, ntraj):
            ind = ind + ham1[tr].get_ordering_adi()[0]
        ind = ind / float(ntraj)

        obs_T.append(i * dt)
        obs_q.append(MATRIX(q))
        obs_p.append(MATRIX(p))
        obs_Ekin.append(Ekin)
        obs_Epot.append(Epot)
        obs_Etot.append(Etot)
        obs_dEkin.append(dEkin)
        obs_dEpot.append(dEpot)
        obs_dEtot.append(dEtot)
        obs_Cadi.append(CMATRIX(Cadi))
        obs_Cdia.append(CMATRIX(Cdia))
        obs_dm_adi.append(CMATRIX(dm_adi))
        obs_dm_dia.append(CMATRIX(dm_dia))
        obs_pop.append(MATRIX(pops))
        obs_states.append(list(states))
        #obs_ind.append(ind)

    return obs_T, obs_q, obs_p, obs_Ekin, obs_Epot, obs_Etot, obs_dEkin, obs_dEpot, obs_dEtot, obs_Cadi, obs_Cdia, obs_dm_adi, obs_dm_dia, obs_pop, obs_states