示例#1
0
    def calc_data(self):

        #split data into chunks and calculate P(p) for each chunk
        Nchnk = 5
        Nbins = 100
        histo = np.zeros([Nbins, Nchnk + 1])
        split_p_array = np.split(self.p_array, Nchnk)

        for i in range(Nchnk):

            if (i == 0):
                #calculate midpoint of each bin
                bin_edges = np.histogram(split_p_array[i],
                                         bins=Nbins,
                                         range=(self.p_array.min(),
                                                self.p_array.max()),
                                         density=True)[1]
                for j in range(Nbins):
                    histo[j, 0] = (bin_edges[j] + bin_edges[j + 1]) / 2.0

            #calculate normalized histogram for each chunk, note that all histograms have the same range
            histo[:, i + 1] = np.histogram(split_p_array[i],
                                           bins=Nbins,
                                           range=(self.p_array.min(),
                                                  self.p_array.max()),
                                           density=True)[0]

        #calculate average and error over all instances of P(p)
        avg_histo = np.zeros([Nbins, 3])
        avg_histo[:, 0] = np.copy(histo[:, 0])
        avg_histo[:, 1] = np.mean(histo[:, 1:], axis=1)
        avg_histo[:, 2] = np.std(histo[:, 1:], axis=1) / np.sqrt(Nchnk - 1)

        #print out P(p)
        utils.printarray(avg_histo, 'prob_p.dat')
示例#2
0
    def kernel(self):

        print('******* RUNNING RPMD CALCULATION ********')
        print('Running ', self.Ntraj, 'trajectories, each for ', self.Nstep,
              'steps')
        print('*****************************************')
        print()

        #equilibrate system in NVT ensemble
        self.equilibrate()

        #loop over number of trajectories to calculate correlation function
        for itraj in range(self.Ntraj):

            #re-sample velocities at beginning of each trajectory
            self.sample_vel(self.beta_n)

            #save initial position com for correlation function
            initxcom = self.get_xcom()

            #MD loop for a given trajectory
            for step in range(self.Nstep):

                currtime = step * self.delt

                #Calculate and print data of interest
                if (np.mod(step, self.Nprint) == 0):
                    print('Writing data at step ', step, 'and time', currtime,
                          'for trajectory ', itraj)
                    self.calc_data(itraj, step, currtime, initxcom)

                #integrate eom
                self.integrate()

            #Calculate and print data of interest at last time-step for each trajectory
            step += 1
            currtime = step * self.delt
            print('Writing data at step ', step, 'and time', currtime,
                  'for trajectory ', itraj)
            self.calc_data(itraj, step, currtime, initxcom)

        #Finish calculation of correlation function
        self.corrfcn[:, 1] = self.corrfcn[:, 1] / self.Ntraj
        utils.printarray(self.corrfcn, 'corrfcn.dat')
示例#3
0
    def calc_gr( self, N, density ):

        #subroutine to calculate final radial distribution function

        for i in range(self.Nhis):

            #distance for bin i
            self.gr_array[i,0] = self.delg * (i+0.5)

            #volume between bin i+1 and i            
            vb = (4.0/3.0) * np.pi * ( (i+1)**3 - i**3 ) * self.delg**3

            #number of ideal gas particles in vb
            nid = vb * density

            #normalize g(r)
            self.gr_array[i,1] = self.gr_array[i,1] / ( self.ngr * N * nid )

        #Print g(r)
        utils.printarray( self.gr_array, 'radial_dist_fcn.dat', True )
示例#4
0
xmax = 5.0
delx = (xmax - xmin) / (N - 1)  #grid spacing

#Calculate DVR Hamiltonian
Hdvr = np.zeros([N, N])
prob_x = np.zeros([N, 2])
for i in range(N):
    xi = xmin + i * delx
    prob_x[i, 0] = xi
    for j in range(i, N):
        xj = xmin + j * delx

        fctr = (-1)**(i - j) / (2.0 * m * delx**2)

        if (i == j):
            Hdvr[i, j] = 0.5 * k * (xi - R0)**2  #potential term
            Hdvr[i, j] += fctr * np.pi**2 / 3.0  #kinetic term
        else:
            Hdvr[i, j] = fctr * 2.0 / (i - j)**2  #kinetic term

Hdvr = Hdvr + np.transpose(np.triu(Hdvr, 1))

#Calculate probability distribution along x
prob_x[:, 1] = np.diag(expm(-beta * Hdvr))
Q = np.sum(
    prob_x[:, 1]
) * delx  #need delx here due to numerical integration of partition function to properly normalize
prob_x[:, 1] = prob_x[:, 1] / Q

utils.printarray(prob_x, 'prob_x.dat', True)
示例#5
0
                    ):  #Kinetic contribution from nuclei, so off-diagonal in nuclei, but still diagonal for electronic state
                        Hdvr[indx1, indx2] = fctr * 2.0 / (i - j)**2
                    elif (
                            i == j
                    ):  #Off diagonal coupling of states, so off diagonal in state, but still diagonal for nuclei
                        Hdvr[indx1, indx2] = Vel[state1, state2]

Hdvr = Hdvr + np.transpose(np.triu(Hdvr, 1))

densmat = expm(-beta * Hdvr)
Q = np.trace(densmat) * delx
densmat = densmat / Q

#State-dependent probabilities
px_1 = np.zeros([N, 2])
px_1[:, 0] = np.copy(xpos)
px_1[:, 1] = np.diag(densmat)[:N]

px_2 = np.zeros([N, 2])
px_2[:, 0] = np.copy(xpos)
px_2[:, 1] = np.diag(densmat)[N:]

#Joint probability
px = np.zeros([N, 2])
px[:, 0] = np.copy(xpos)
px[:, 1] = px_1[:, 1] + px_2[:, 1]

utils.printarray(px_1, 'prob_x_1.dat', True)
utils.printarray(px_2, 'prob_x_2.dat', True)
utils.printarray(px, 'prob_x.dat', True)