Esempio n. 1
0
    def qlmQlm(self, l, ppp, AreaR):
        """ BOO of the l-fold symmetry as a 2l + 1 vector

            AreaR = 0 indicates calculate traditional qlm and Qlm
            AreaR = 1 indicates calculate voronoi polyhedron face area weighted qlm and Qlm
        """

        fneighbor = open(self.Neighborfile, 'r')
        if AreaR == 1: ffacearea = open(self.faceareafile, 'r')

        smallqlm = []
        largeQlm = []
        for n in range(self.SnapshotNumber):
            hmatrixinv = np.linalg.inv(self.hmatrix[n])
            if AreaR == 0:   #calculate traditional qlm and Qlm
                Neighborlist     = Voropp(fneighbor, self.ParticleNumber)  #neighbor list [number, list....]
                Particlesmallqlm = np.zeros((self.ParticleNumber, 2 * l + 1), dtype = np.complex128)
                for i in range(self.ParticleNumber):
                    RIJ = self.Positions[n][Neighborlist[i, 1: (Neighborlist[i, 0] + 1)]] - self.Positions[n][i]
                    #periodic = np.where(np.abs(RIJ / self.Boxlength[np.newaxis, :]) > 0.50, np.sign(RIJ), 0).astype(np.int)
                    #RIJ -= self.Boxlength * periodic * ppp    #remove Periodic boundary conditions
                    matrixij = np.dot(RIJ, hmatrixinv)
                    RIJ      = np.dot(matrixij - np.rint(matrixij) * ppp, self.hmatrix[n]) #remove PBC
                    theta = np.arccos(RIJ[:, 2] / np.sqrt(np.square(RIJ).sum(axis = 1)))
                    phi   = np.arctan2(RIJ[:, 1], RIJ[:, 0])
                    for j in range(Neighborlist[i, 0]):
                        Particlesmallqlm[i] += SPfunction(l, theta[j], phi[j]) #-l ... 0 ... l
                Particlesmallqlm = Particlesmallqlm / (Neighborlist[:, 0])[:, np.newaxis]
                smallqlm.append(Particlesmallqlm)

            elif AreaR == 1: #calculate voronoi polyhedron facearea weighted qlm and Qlm 
                Neighborlist = Voropp(fneighbor, self.ParticleNumber)  #neighbor list [number, list....]
                facearealist = Voropp(ffacearea, self.ParticleNumber)  #facearea list [number, list....]
                facearealist[:, 1:] = np.where(facearealist[:, 1:] != 0, facearealist[:, 1:] + 1, facearealist[:, 1:]) #becase -1 has been added in Voropp()
                faceareafrac = facearealist[:, 1:] / facearealist[:, 1:].sum(axis = 1)[:, np.newaxis] #facearea fraction
                Particlesmallqlm = np.zeros((self.ParticleNumber, 2 * l + 1), dtype = np.complex128)
                for i in range(self.ParticleNumber):
                    RIJ = self.Positions[n][Neighborlist[i, 1: (Neighborlist[i, 0] + 1)]] - self.Positions[n][i]
                    #periodic = np.where(np.abs(RIJ / self.Boxlength[np.newaxis, :]) > 0.50, np.sign(RIJ), 0).astype(np.int)
                    #RIJ -= self.Boxlength * periodic * ppp    #remove PBC
                    matrixij = np.dot(RIJ, hmatrixinv)
                    RIJ      = np.dot(matrixij - np.rint(matrixij) * ppp, self.hmatrix[n]) #remove PBC
                    theta = np.arccos(RIJ[:, 2] / np.sqrt(np.square(RIJ).sum(axis = 1)))
                    phi   = np.arctan2(RIJ[:, 1], RIJ[:, 0])
                    for j in range(Neighborlist[i, 0]):
                        Particlesmallqlm[i] += np.array(SPfunction(l, theta[j], phi[j])) * faceareafrac[i, j] #-l ... 0 ... l
                smallqlm.append(Particlesmallqlm)


            ParticlelargeQlm = np.copy(Particlesmallqlm)  ####must use copy otherwise only rename it 
            for i in range(self.ParticleNumber):
                for j in range(Neighborlist[i, 0]):
                    ParticlelargeQlm[i] += Particlesmallqlm[Neighborlist[i, j+1]]
            ParticlelargeQlm = ParticlelargeQlm / (1 + Neighborlist[:, 0])[:, np.newaxis]
            largeQlm.append(ParticlelargeQlm)
        
        fneighbor.close()
        if AreaR == 1: ffacearea.close()
        return (smallqlm, largeQlm)  #complex number 
Esempio n. 2
0
    def lthorder(self, l=6, ppp=[1, 1]):
        """ Calculate l-th order in 2D, such as hexatic order

            l is the order ranging from 4 to 8 normally in 2D
            ppp is periodic boundary conditions. 1 for yes and 0 for no
        """

        fneighbor = open(self.Neighborfile, 'r')
        results = np.zeros((self.SnapshotNumber, self.ParticleNumber),
                           dtype=np.complex128)
        for n in range(self.SnapshotNumber):
            hmatrixinv = np.linalg.inv(self.hmatrix[n])
            Neighborlist = Voropp(
                fneighbor,
                self.ParticleNumber)  #neighbor list [number, list...]
            for i in range(self.ParticleNumber):
                RIJ = self.Positions[n, Neighborlist[i, 1:Neighborlist[i, 0] +
                                                     1]] - self.Positions[n, i]
                #periodic = np.where(np.abs(RIJ / self.Boxlength[np.newaxis, :]) > 0.5, np.sign(RIJ), 0).astype(np.int)
                #RIJ -= self.Boxlength * periodic * ppp #remove PBC
                matrixij = np.dot(RIJ, hmatrixinv)
                RIJ = np.dot(matrixij - np.rint(matrixij) * ppp,
                             self.hmatrix[n])  #remove PBC
                theta = np.arctan2(RIJ[:, 1], RIJ[:, 0])
                results[n, i] = (np.exp(1j * l * theta)).mean()

        return results  #complex number in array
Esempio n. 3
0
def CG(ordering, neighborfile, outputfile):
    """Coarse Graining over of ordering over cetain neighbor list
    
    ordering: input array of the atomic property to be coarse-grained
    it should have the shape of [num_of_atom, num_of_snapshot]
    """

    orderingCG = np.zeros_like(ordering) #initiallization
    fneighbor  = open(neighborfile)

    for n in range(ordering.shape[1]):
        dataneigh = Voropp(fneighbor, ordering.shape[0])
        for i in range(ordering.shape[0]):
            indices = dataneigh[i, 1:1+dataneigh[i, 0]].tolist()
            indices.append(i)
            orderingCG[i, n] = ordering[indices, n].mean()

    if outputfile:
        results = np.column_stack((np.arange(ordering.shape[0])+1, orderingCG))
        fmt = '%d ' + '%.10f ' * orderingCG.shape[1]
        np.savetxt(outputfile, results, fmt = fmt, header = 'id order_Coarsegrained', comments = '')

    fneighbor.close()
    print ('-----------Coarse-Graining Done---------')
    return orderingCG
Esempio n. 4
0
def Rorder(filename,
           num_patch=12,
           ndim=3,
           ppp=[1, 1, 1],
           neighborfile='',
           outputfile='',
           outputfileij=''):
    """calculate local orientational ordering"""

    print('--------calculate patchy alignment--------')
    from ParticleNeighbors import Voropp
    pos_all, SnapshotNumber, num_atom, hmatrix = cal_vector(
        filename, num_patch, ndim, ppp)

    fout = open(outputfileij, 'w')
    fneighbor = open(neighborfile, 'r')  #get neighbor list
    results = np.zeros((num_atom[0], SnapshotNumber))
    for n in range(SnapshotNumber):
        positions = pos_all[n]
        vectors = []
        for i in range(num_atom[n]):
            medium = positions[i, 1:] - positions[i, 0][np.newaxis, :]
            medium = medium / np.linalg.norm(
                medium, axis=1)[:, np.newaxis]  #unit vector
            vectors.append(medium)  #particle to patch vectors

        fout.write('id cn Rorder_list num_atom = %d\n' % num_atom[n])
        hmatrixinv = np.linalg.inv(hmatrix[n])
        Neighborlist = Voropp(fneighbor, num_atom[n])
        for i in range(num_atom[n]):
            cnlist = Neighborlist[i, 1:1 +
                                  Neighborlist[i, 0]]  #num, list (id-1...)
            RIJ = positions[cnlist, 0] - positions[i, 0]
            matrixij = np.dot(RIJ, hmatrixinv)
            RIJ = np.dot(matrixij - np.rint(matrixij) * ppp, hmatrix[n])
            RIJ = RIJ / np.linalg.norm(RIJ, axis=1)[:,
                                                    np.newaxis]  #unit vector
            fout.write('%d %d ' % (i + 1, Neighborlist[i, 0]))
            for j in range(Neighborlist[i, 0]):
                patch_i = (vectors[i] * RIJ[j]).sum(axis=1).argmax()
                patch_j = (vectors[cnlist[j]] * RIJ[j]).sum(axis=1).argmin()
                UIJ = (vectors[i][patch_i] *
                       vectors[cnlist[j]][patch_j]).sum()  #U_i * U_j
                results[i, n] += UIJ
                fout.write('%.6f ' % UIJ)
            fout.write('\n')
            results[i, n] = results[i, n] / Neighborlist[i, 0]

    fout.close()
    fneighbor.close()

    results = np.column_stack((np.arange(num_atom[0]) + 1, results))
    names = 'id Psi'
    fmt = '%d ' + '%.6f ' * (results.shape[1] - 1)
    np.savetxt(outputfile, results, fmt=fmt, header=names, comments='')

    print('--------calculate patchy alignment done--------')
    return results, names
Esempio n. 5
0
    def lthorder(self, l = 6, ppp = [1, 1]):
        """ Calculate l-th order in 2D, such as hexatic order

            l is the order ranging from 4 to 8 normally in 2D
            ppp is periodic boundary conditions. 1 for yes and 0 for no
        """

        fneighbor = open(self.Neighborfile, 'r')
        if self.edgelengthfile: 
            fbondlength = open(self.edgelengthfile)
        
        results = np.zeros((self.SnapshotNumber, self.ParticleNumber), dtype = np.complex128)
        for n in range(self.SnapshotNumber):
            hmatrixinv   = np.linalg.inv(self.hmatrix[n])
            Neighborlist = Voropp(fneighbor, self.ParticleNumber) #neighbor list [number, list...]
            if not self.edgelengthfile:
                for i in range(self.ParticleNumber):
                    RIJ = self.Positions[n, Neighborlist[i, 1: Neighborlist[i, 0] + 1]] - self.Positions[n, i]
                    matrixij = np.dot(RIJ, hmatrixinv)
                    RIJ      = np.dot(matrixij - np.rint(matrixij) * ppp, self.hmatrix[n]) #remove PBC
                    theta    = np.arctan2(RIJ[:, 1], RIJ[:, 0])
                    results[n, i] = (np.exp(1j * l * theta)).mean()
            else:
                bondlengthlist = Voropp(fbondlength, self.ParticleNumber)
                for i in range(self.ParticleNumber):
                    RIJ = self.Positions[n, Neighborlist[i, 1: Neighborlist[i, 0] + 1]] - self.Positions[n, i]
                    matrixij = np.dot(RIJ, hmatrixinv)
                    RIJ      = np.dot(matrixij - np.rint(matrixij) * ppp, self.hmatrix[n]) #remove PBC
                    theta    = np.arctan2(RIJ[:, 1], RIJ[:, 0])
                    weights  = bondlengthlist[i, 1:Neighborlist[i, 0] + 1] + 1.0
                    weights /= weights.sum()
                    results[n, i] = (weights*np.exp(1j * l * theta)).sum()

        fneighbor.close()
        if self.edgelengthfile:
            fbondlength.close()
        return results #complex number in array
Esempio n. 6
0
def RorderIJ(filename,
             ndim=3,
             UIJ=0.9,
             neighborfile='',
             outputfile='',
             outputfileij=''):
    """rotational order parameter to characterize the structure

    alignment of center against its neighbors by orientation
    """

    print('-------calculate orientational alignment-------')
    from ParticleNeighbors import Voropp
    d = readangular(filename, ndim)
    d.read_onefile()

    #-----get the unit vector-----
    velocity = [
        u / np.linalg.norm(u, axis=1)[:, np.newaxis] for u in d.velocity
    ]

    fneighbor = open(neighborfile, 'r')
    results = np.zeros((d.ParticleNumber[0], d.SnapshotNumber))
    if outputfileij: fij = open(outputfileij, 'w')
    for n in range(d.SnapshotNumber):
        Neighborlist = Voropp(
            fneighbor, d.ParticleNumber[n])  ##neighbor list [number, list....]
        if outputfileij: fij.write('id cn UIJ_list\n')
        for i in range(d.ParticleNumber[n]):
            CII = velocity[n][i] * velocity[n][Neighborlist[
                i, 1:1 + Neighborlist[i, 0]]]
            #psi = np.linalg.norm(CII, axis = 1)
            psi = np.abs(CII.sum(axis=1))
            results[i, n] = (psi > UIJ).sum()
            if outputfileij:
                fij.write('%d %d ' % (i + 1, Neighborlist[i, 0]))
                for j in range(Neighborlist[i, 0]):
                    fij.write('%.6f ' % psi[j])
                fij.write('\n')

    fneighbor.close()
    results = np.column_stack((np.arange(d.ParticleNumber[0]) + 1, results))
    if outputfile:
        names = 'id UIJ'
        np.savetxt(outputfile, results, fmt='%d', header=names, comments='')

    if outputfileij: fij.close()
    print('-------calculate orientational alignment over-------')
    return results
Esempio n. 7
0
    def logtotal(self, qmax, a = 1.0, dt = 0.002, outputfile = ''):
        """ Compute self-intermediate scattering functions ISF, dynamic susceptibility ISFX4 based on ISF
            Overlap function Qt and its corresponding dynamic susceptibility QtX4
            Mean-square displacements msd; non-Gaussion parameter alpha2
        
            qmax is the wavenumber corresponding to the first peak of structure factor
            a is the cutoff for the overlap function, default is 1.0 (EAM) and 0.3(LJ) (0.3<d>)
            dt is the timestep of MD simulations

            ********CONSIDER CAGE RELATIVE DISPLACEMENTS********
            the trajectory is in log scale, and only the first configuration is considered
            as the reference
        """
        print ('-----------------Compute Overall log Cage Relative Dynamics--------------------')

        results = np.zeros(((self.SnapshotNumber - 1), 5))
        names  = 't  ISF   Qt   msd  alpha2'

        results[:, 0] = (np.array(self.TimeStepall[1:]) - self.TimeStepall[0]) * dt

        RII = self.Positions[1:] - self.Positions[0]
        if self.PBC:
            hmatrixinv = np.linalg.inv(self.hmatrix[0])
            for ii in range(RII.shape[0]):
               matrixij = np.dot(RII[ii], hmatrixinv)
               RII[ii]  = np.dot(matrixij - np.rint(matrixij) * self.ppp, self.hmatrix[0]) #remove PBC

        RII_relative = RII.copy()
        fneighbor    = open(self.Neighborfile, 'r')  #consider neighbors' displacements
        Neighborlist = Voropp(fneighbor, self.ParticleNumber) #neighbor list [number, list...]
        fneighbor.close()
        for m in range(RII.shape[0]):
            for i in range(self.ParticleNumber):
                RII_relative[m, i] = RII[m,i]-RII[m, Neighborlist[i, 1: Neighborlist[i, 0] + 1]].mean(axis = 0) #cage relative displacements            
                #keep RII of each atom unchanged during subtraction

        results[:, 1] = (np.cos(RII_relative * qmax).mean(axis = 2)).mean(axis = 1)
        distance      = np.square(RII_relative).sum(axis = 2)
        results[:, 2] = (np.sqrt(distance) <= a).sum(axis = 1) / self.ParticleNumber
        results[:, 3] = distance.mean(axis = 1)
        distance2     = np.square(distance).mean(axis = 1)
        results[:, 4] = alpha2factor(self.ndim) * distance2 / np.square(results[:, 3]) - 1.0

        if outputfile:
            np.savetxt(outputfile, results, fmt='%.6f', header = names, comments = '')

        print ('-----------------Compute Overall log Cage Relative Dynamics Over--------------------')
        return results, names
Esempio n. 8
0
    def sijlargeQl(self, l, ppp = [1,1,1], AreaR = 0, c = 0.7, outputQl = '', outputsij = '', results_path = '../../analysis/BOO'):
        """ Calculate Crystal Nuclei Criterion s(i, j) based on Qlm  

            AreaR = 0 indicates calculate s(i, j) based on traditional Qlm
            AreaR = 1 indicates calculate s(i, j) based on voronoi polyhedron face area weighted Qlm
            c is a cutoff demonstrating whether a bond is crystalline or not
            Give a name to outputQl and outputsij to store the results
        """
        print ('---- Calculate Crystal Nuclei Criterion s(i, j) based on Ql ----')
        if not os.path.exists(results_path):
            os.makedirs(results_path)

        MaxNeighbor = 50 #the considered maximum number of neighbors
        (smallqlm, largeQlm) = self.qlmQlm(l, ppp, AreaR)
        fneighbor = open(self.Neighborfile, 'r')
        results = np.zeros((1, 3))
        resultssij = np.zeros((1,  MaxNeighbor + 1))
        for n in range(self.SnapshotNumber):
            Neighborlist = Voropp(fneighbor, self.ParticleNumber)  #neighbor list [number, list....]
            sij = np.zeros((self.ParticleNumber, MaxNeighbor))
            sijresults = np.zeros((self.ParticleNumber, 3))
            if (Neighborlist[:, 0] > MaxNeighbor).any():
                raise ValueError('********Too Many Neighbors*********')
            for i in range(self.ParticleNumber):
                for j in range(Neighborlist[i, 0]):
                    sijup = (largeQlm[n][i] * np.conj(largeQlm[n][Neighborlist[i, j+1]])).sum()
                    sijdown = np.sqrt(np.square(np.abs(largeQlm[n][i])).sum()) * np.sqrt(np.square(np.abs(largeQlm[n][Neighborlist[i, j+1]])).sum())
                    sij[i, j] = np.real(sijup / sijdown)
            sijresults[:, 0] = np.arange(self.ParticleNumber) + 1 #particle id
            sijresults[:, 1] = (np.where(sij > c, 1, 0)).sum(axis = 1)  #bond number 
            sijresults[:, 2] = np.where(sijresults[:, 1] > Neighborlist[:, 0] / 2, 1, 0) #crystalline
            results = np.vstack((results, sijresults))
            resultssij = np.vstack((resultssij, np.column_stack((sijresults[:, 0] ,sij))))

        if outputQl:
            names = 'id  sijcrystalbondnum  crystalline.l=' + str(l)
            np.savetxt(results_path + outputQl, results[1:], fmt='%d', header = names, comments = '')
        if outputsij:
            names = 'id  s(i, j)  l=' + str(l)
            formatsij = '%d ' + '%.6f ' * MaxNeighbor
            np.savetxt(results_path + outputsij, resultssij[1:], fmt=formatsij, header = names, comments = '')

        fneighbor.close()
        print ('-------------Calculate s(i, j) based on Ql over-----------')
        return resultssij[1:] #individual value of sij
Esempio n. 9
0
def Rorder(filename, ndim=3, neighborfile='', outputfile='', use_abs=True):
    """rotational order parameter to characterize the structure

    local rotational symmetry over the nearest neighbors
    """

    print('-------calculate local rotational ordering-------')
    from ParticleNeighbors import Voropp
    d = readangular(filename, ndim)
    d.read_onefile()

    #-----get the unit vector-----
    velocity = [
        u / np.linalg.norm(u, axis=1)[:, np.newaxis] for u in d.velocity
    ]

    fneighbor = open(neighborfile, 'r')
    results = np.zeros((d.ParticleNumber[0], d.SnapshotNumber))
    for n in range(d.SnapshotNumber):
        Neighborlist = Voropp(
            fneighbor, d.ParticleNumber[n])  ##neighbor list [number, list....]
        for i in range(d.ParticleNumber[n]):
            CII = velocity[n][i] * velocity[n][Neighborlist[
                i, 1:1 + Neighborlist[i, 0]]]
            if use_abs:
                results[i, n] = np.abs(CII.sum(axis=1)).mean()
            else:
                results[i, n] = (CII.sum(axis=1)).mean()

    fneighbor.close()
    results = np.column_stack((np.arange(d.ParticleNumber[0]) + 1, results))
    if outputfile:
        names = 'id Psi'
        numformat = '%d ' + '%.6f ' * (results.shape[1] - 1)
        np.savetxt(outputfile,
                   results,
                   fmt=numformat,
                   header=names,
                   comments='')

    print('-------calculate local rotational ordering over-------')
    return results
Esempio n. 10
0
    def InstantMSD(self, everyn=1, outputfile=''):
        """ compute the cage-relative instant MSD with the previous configuration as reference

            everyn: which configuration to be the reference, 1 means the former one
        """
        print('-----------------Compute Instant Cage Relative MSD--------------------')

        results = np.zeros(((self.SnapshotNumber - everyn), 2))
        names = 'n  imsd_CR'

        results[:, 0] = np.arange(results.shape[0]) + 1

        fneighbor = open(self.Neighborfile, 'r')
        for n in range(self.SnapshotNumber-everyn):
            RII = self.Positions[n+everyn] - self.Positions[n]
            if self.PBC:
                hmatrixinv = np.linalg.inv(self.hmatrix[n])    
                matrixij = np.dot(RII, hmatrixinv)
                RII = np.dot(matrixij - np.rint(matrixij) * self.ppp, self.hmatrix[n])  # remove PBC

            RII_relative = RII.copy()
            #consider neighbors' displacements; neighbor list [number, list...]
            Neighborlist = Voropp(fneighbor, self.ParticleNumber)
            for i in range(self.ParticleNumber):
                # cage relative displacements
                RII_relative[i] = RII[i]-RII[Neighborlist[i, 1: Neighborlist[i, 0] + 1]].mean(axis=0)
                #keep RII of each atom unchanged during subtraction

            results[n, 1] = np.square(RII_relative).sum(axis=1).mean()
        fneighbor.close()

        if outputfile:
            unitstep = self.TimeStepall[everyn] - self.TimeStepall[0]
            np.savetxt(outputfile, results, fmt='%d %.6f', header=names, comments='TimeStep interval:%d\n'%unitstep)

        print('-----------------Compute Instant Cage Relative MSD Over--------------------')
        return results, names
Esempio n. 11
0
    def fastS4(self, a = 1.0, dt = 0.002, X4timeset = 0, qrange = 10, outputfile = ''):
        """ Compute four-point dynamic structure factor at peak timescale of dynamic susceptibility

            Based on overlap function Qt and its corresponding dynamic susceptibility QtX4     
            a is the cutoff for the overlap function, default is 1.0 (EAM) and 0.3(LJ) (0.3<d>)
            dt is the timestep of MD simulations
            X4timeset is the peaktime scale of X4, if 0 will use the calculated one
            Dynamics should be calculated before computing S4
            Only considered the particles which are fast
            The Qt and X4 should be calculated first

            ********CONSIDER CAGE RELATIVE DISPLACEMENTS********
        """
        print ('-----------------Compute Cage Relative dynamic S4(q) of fast particles --------------')

        #-----------calculte overall dynamics first----------------
        results = np.zeros(((self.SnapshotNumber - 1), 3))
        names  = 't  Qt  QtX4'
        
        cal_Qt   = pd.DataFrame(np.zeros((self.SnapshotNumber-1))[np.newaxis, :])
        deltat   = np.zeros(((self.SnapshotNumber - 1), 2), dtype = np.int) #deltat, deltatcounts
        fneighbor = open(self.Neighborfile, 'r')  #consider neighbors' displacements
        for n in range(self.SnapshotNumber - 1):  #time interval
            RII = self.Positions[n + 1:] - self.Positions[n]
            if self.PBC:
                hmatrixinv = np.linalg.inv(self.hmatrix[n])
                for ii in range(len(RII)):
                   matrixij = np.dot(RII[ii], hmatrixinv)
                   RII[ii]  = np.dot(matrixij - np.rint(matrixij) * self.ppp, self.hmatrix[n]) #remove PBC

            RII_relative = RII.copy()
            Neighborlist = Voropp(fneighbor, self.ParticleNumber) #neighbor list [number, list...]
            for m in range(RII.shape[0]):
                for i in range(self.ParticleNumber):
                    RII_relative[m, i] = RII[m,i]-RII[m, Neighborlist[i, 1: Neighborlist[i, 0] + 1]].mean(axis = 0) #cage relative displacements            
                    #keep RII of each atom unchanged during subtraction

            distance  = np.square(RII_relative).sum(axis = 2)
            RII_Qt    = (np.sqrt(distance) >= a).sum(axis = 1)
            cal_Qt    = pd.concat([cal_Qt, pd.DataFrame(RII_Qt[np.newaxis, :])])
        
        cal_Qt       = cal_Qt.iloc[1:]
        deltat[:, 0] = np.array(cal_Qt.columns) + 1 #Timeinterval
        deltat[:, 1] = np.array(cal_Qt.count())     #Timeinterval frequency

        results[:, 0] = deltat[:, 0] * self.TimeStep * dt 
        results[:, 1] = cal_Qt.mean() / self.ParticleNumber
        results[:, 2] = ((cal_Qt**2).mean() - (cal_Qt.mean())**2) / self.ParticleNumber
        if outputfile:
            np.savetxt('CageDynamics.' + outputfile, results, fmt='%.6f', header = names, comments = '')
        fneighbor.close()

        #-----------calculte S4(q) of fast particles----------------
        twopidl = 2 * pi / self.Boxlength
        Numofq = int(qrange / twopidl.max())
        wavevector = choosewavevector(Numofq, self.ndim)[:, 1:] #Only S4(q) at low wavenumber range is interested
        wavevector = wavevector.astype(np.float64) * twopidl[np.newaxis, :]
        wavenumber = np.linalg.norm(wavevector, axis=1)

        sqresults = np.zeros((wavevector.shape[0], 2))
        sqresults[:, 0] = wavenumber

        if X4timeset:
            X4time = int(X4timeset / dt / self.TimeStep)
        else:
            X4time = deltat[results[:, 2].argmax(), 0] 

        fneighbor = open(self.Neighborfile, 'r')  #consider neighbors' displacements
        for n in range(self.SnapshotNumber - X4time):
            RII = self.Positions[n + X4time] - self.Positions[n]  #absolute displacements
            if self.PBC:
                hmatrixinv = np.linalg.inv(self.hmatrix[n])
                matrixij = np.dot(RII, hmatrixinv)
                RII  = np.dot(matrixij - np.rint(matrixij) * self.ppp, self.hmatrix[n]) #remove PBC

            RII_relative = RII.copy()
            Neighborlist = Voropp(fneighbor, self.ParticleNumber) #neighbor list [number, list...]
            for i in range(self.ParticleNumber):
                RII_relative[i] = RII[i] - RII[Neighborlist[i, 1: Neighborlist[i, 0] + 1]].mean(axis = 0) #cage relative displacements            
                #keep RII of each atom unchanged during subtraction         

            RII = np.linalg.norm(RII_relative, axis=1) #np.sqrt(np.square(RII_relative).sum(axis = 1))
            RII = np.where(RII >= a, 1, 0)
            
            sqtotal = np.zeros_like(sqresults)
            for i in range(self.ParticleNumber):
                if RII[i]:
                    thetas = (self.Positions[n][i][np.newaxis, :] * wavevector).sum(axis=1)
                    sqtotal[:, 0] += np.sin(thetas)
                    sqtotal[:, 1] += np.cos(thetas)
            
            sqresults[:, 1] += np.square(sqtotal).sum(axis=1) / self.ParticleNumber
        sqresults[:, 1] /= (self.SnapshotNumber - X4time)
        
        sqresults = pd.DataFrame(sqresults).round(6)
        results = sqresults.groupby(sqresults[0]).mean().reset_index().values
        
        names = 'q  S4'
        if outputfile:
            np.savetxt(outputfile, results, fmt='%.6f', header = names, comments = '')
        fneighbor.close()
        
        print ('--------- Compute Cage Relative S4(q) of fast particles over ------')
        return results, names
Esempio n. 12
0
    def total(self, outputfile, qmax, a = 1.0, dt = 0.002, results_path = '../../analysis/cagedynamics'):
        """ Compute self-intermediate scattering functions ISF, dynamic susceptibility ISFX4 based on ISF
            Overlap function Qt and its corresponding dynamic susceptibility QtX4
            Mean-square displacements msd; non-Gaussion parameter alpha2
        
            qmax is the wavenumber corresponding to the first peak of structure factor
            a is the cutoff for the overlap function, default is 1.0 (EAM) and 0.3(LJ) (0.3<d>)
            dt is the timestep of MD simulations

            ********CONSIDER CAGE RELATIVE DISPLACEMENTS********
        """
        print ('-----------------Compute Overall Cage Relative Dynamics--------------------')

        if not os.path.exists(results_path):
            os.makedirs(results_path)

        results = np.zeros(((self.SnapshotNumber - 1), 7))
        names  = 't  ISF  ISFX4  Qt  QtX4  msd  alpha2'
        
        cal_isf  = pd.DataFrame(np.zeros((self.SnapshotNumber-1))[np.newaxis, :])
        cal_Qt   = pd.DataFrame(np.zeros((self.SnapshotNumber-1))[np.newaxis, :])
        cal_msd  = pd.DataFrame(np.zeros((self.SnapshotNumber-1))[np.newaxis, :])
        cal_alp  = pd.DataFrame(np.zeros((self.SnapshotNumber-1))[np.newaxis, :])
        deltat   = np.zeros(((self.SnapshotNumber - 1), 2), dtype = np.int) #deltat, deltatcounts
        fneighbor = open(self.Neighborfile, 'r')  #consider neighbors' displacements
        for n in range(self.SnapshotNumber - 1):  #time interval
            RII = self.Positions[n + 1:] - self.Positions[n]
            Neighborlist = Voropp(fneighbor, self.ParticleNumber) #neighbor list [number, list...]
            for i in range(self.ParticleNumber):
                RII[:, i] -= RII[:, Neighborlist[i, 1: Neighborlist[i, 0] + 1]].mean(axis = 1) #cage relative displacements            

            RII_isf   = (np.cos(RII * qmax).sum(axis = 2) / 3.0).sum(axis = 1) #index is timeinterval -1
            cal_isf   = pd.concat([cal_isf, pd.DataFrame(RII_isf[np.newaxis, :])])
            distance  = np.square(RII).sum(axis = 2)
            RII_Qt    = (np.sqrt(distance) <= a).sum(axis = 1)
            cal_Qt    = pd.concat([cal_Qt, pd.DataFrame(RII_Qt[np.newaxis, :])])
            cal_msd   = pd.concat([cal_msd, pd.DataFrame(distance.sum(axis = 1)[np.newaxis, :])])
            distance2 = np.square(distance).sum(axis = 1)
            cal_alp   = pd.concat([cal_alp, pd.DataFrame(distance2[np.newaxis, :])])

        
        cal_isf      = cal_isf.iloc[1:]
        cal_Qt       = cal_Qt.iloc[1:]
        cal_msd      = cal_msd.iloc[1:]
        cal_alp      = cal_alp.iloc[1:]
        deltat[:, 0] = np.array(cal_isf.columns) + 1 #Timeinterval
        deltat[:, 1] = np.array(cal_isf.count())     #Timeinterval frequency

        results[:, 0] = deltat[:, 0] * self.TimeStep * dt 
        results[:, 1] = cal_isf.mean() / self.ParticleNumber
        results[:, 2] = ((cal_isf**2).mean() - (cal_isf.mean())**2) / self.ParticleNumber
        results[:, 3] = cal_Qt.mean() / self.ParticleNumber
        results[:, 4] = ((cal_Qt**2).mean() - (cal_Qt.mean())**2) / self.ParticleNumber
        results[:, 5] = cal_msd.mean() / self.ParticleNumber
        results[:, 6] = cal_alp.mean() / self.ParticleNumber
        results[:, 6] = 3.0 * results[:, 6] / np.square(results[:, 5]) / 5.0 - 1.0

        np.savetxt(results_path + outputfile, results, fmt='%.6f', header = names, comments = '')
        print ('-----------------Compute Overall Cage Relative Dynamics Over--------------------')

        return results
Esempio n. 13
0
def Vonmises(inputfile,
             Neighborfile,
             ndim,
             strainrate,
             outputfile,
             ppp=[1, 1, 1],
             dt=0.002,
             results_path='../../analysis/Strain/'):
    """ Calculate Non-Affine Von-Mises Strain (local shear invariant)
        
        With the first snapshot of inputfile as reference
        The unit of strianrate should in align with the intrinsic time unit (i.e with dt)
        The code accounts for both orthogonal and triclinic boxes
    """
    if not os.path.exists(results_path):
        os.makedirs(results_path)

    d = readdump(inputfile, ndim)
    d.read_onefile()
    positions = d.Positions
    particlenumber = d.ParticleNumber[0]
    snapshotnumber = d.SnapshotNumber
    boxlength = d.Boxlength
    hmatrix = d.hmatrix
    timestep = d.TimeStep[1] - d.TimeStep[0]
    PI = np.eye(ndim, dtype=int)  #identity matrix along diag
    results = np.zeros((particlenumber, snapshotnumber - 1))

    fneighbor = open(Neighborfile, 'r')
    Neighborlist = Voropp(fneighbor,
                          particlenumber)  #neighbor list [number, list...]
    fneighbor.close()
    for i in range(particlenumber):
        neighbors = Neighborlist[i, 1:Neighborlist[i, 0] + 1]
        RIJ0 = positions[0][neighbors] - positions[0][
            i]  #reference snapshot: the initial one
        #periodic  = np.where(np.abs(RIJ0 / boxlength[0]) > 0.50, np.sign(RIJ0), 0)
        #RIJ0     -= boxlength[0] * periodic * ppp #remove periodic boundary conditions
        matrixij = np.dot(RIJ0, np.linalg.inv(hmatrix[0]))
        RIJ0 = np.dot(matrixij - np.rint(matrixij) * ppp, hmatrix[0])
        for j in range(snapshotnumber - 1):
            RIJ1 = positions[j + 1][neighbors] - positions[j + 1][
                i]  #deformed snapshot
            matrixij = np.dot(RIJ1, np.linalg.inv(hmatrix[j + 1]))
            RIJ1 = np.dot(matrixij - np.rint(matrixij) * ppp, hmatrix[j + 1])
            PJ = np.dot(np.linalg.inv(np.dot(RIJ0.T, RIJ0)),
                        np.dot(RIJ0.T, RIJ1))
            etaij = 0.5 * (np.dot(PJ, PJ.T) - PI)
            results[i, j] = np.sqrt(0.5 * np.trace(
                np.linalg.matrix_power(
                    etaij - (1 / ndim) * np.trace(etaij) * PI, 2)))

    results = np.column_stack((np.arange(particlenumber) + 1, results))
    strain = np.arange(snapshotnumber) * timestep * dt * strainrate
    results = np.vstack((strain, results))
    names = 'id   The_first_row_is_the_strain.0isNAN'
    fformat = '%d ' + '%.6f ' * (snapshotnumber - 1)
    np.savetxt(results_path + outputfile,
               results,
               fmt=fformat,
               header=names,
               comments='')
    print('------ Calculate Von Mises Strain Over -------')
    return results
Esempio n. 14
0
def neighbortypes(inputfile,
                  ndim,
                  neighborfile,
                  filetype='lammps',
                  moltypes='',
                  outputfile=''):
    """Analysis the fractions of atom A in the first neighbor of atom B
        The keyword filetype is used for different MD engines
        It has four choices:
        'lammps' (default)

        'lammpscenter' (lammps molecular dump with known atom type of each molecule center)
        moltypes is a dict mapping center atomic type to molecular type
        moltypes is also used to select the center atom 
        such as moltypes = {3: 1, 5: 2}

        'gsd' (HOOMD-blue standard output for static properties)
    
        'gsd_dcd' (HOOMD-blue outputs for static and dynamic properties)
    """

    #get the coordinate information
    d = readdump(inputfile, ndim, filetype, moltypes)
    d.read_onefile()
    #get the neighbor list from voronoi analysis
    fneighbor = open(neighborfile, 'r')

    results = np.zeros((d.SnapshotNumber, 3))  #for binary system 11 12/21 22
    for i in range(d.SnapshotNumber):
        neighborlist = Voropp(
            fneighbor, d.ParticleNumber[i])  #neighbor list [number, list....]
        neighbortype = d.ParticleType[i]

        medium = np.zeros(6)
        for j in range(d.ParticleNumber[i]):
            neighborsij = neighborlist[j, 1:(neighborlist[j, 0] + 1)]
            data11 = (neighbortype[j] + neighbortype[neighborsij] == 2).sum()
            if data11 > 0:
                medium[0] += neighborlist[j, 0]
                medium[1] += data11

            data12 = (neighbortype[j] + neighbortype[neighborsij] == 3).sum()
            if data12 > 0:
                medium[2] += neighborlist[j, 0]
                medium[3] += data12

            data22 = (neighbortype[j] + neighbortype[neighborsij] == 4).sum()
            if data22 > 0:
                medium[4] += neighborlist[j, 0]
                medium[5] += data22

        results[i, 0] = medium[1] / medium[0]
        results[i, 1] = medium[3] / medium[2]
        results[i, 2] = medium[5] / medium[4]

    fneighbor.close()
    if outputfile:
        names = '11  12/21  22'
        np.savetxt(outputfile,
                   results,
                   fmt=3 * ' %.6f',
                   header=names,
                   comments='')

    print('-------demix checking over------')
    return results, names
Esempio n. 15
0
    def cal_multiple(self, l, ppp = [1,1,1], AreaR = 0, c = 0.7, outpath = './', cqlQl = 0, csijsmallql = 0, csijlargeQl = 0, csmallwcap = 0, clargeWcap = 0):
        """Calculate multiple order parameters at the same time"""

        if not os.path.exists(outpath): os.makedirs(outpath)
        namestr = outpath + '.'.join(os.path.basename(self.dumpfile).split('.')[:-1])
        print ('----calculate multiple order parameters together------')
        print ('the common namestr of output filenames is %s'%namestr)
        (smallqlm, largeQlm) = self.qlmQlm(l, ppp, AreaR)

        if cqlQl:
            smallql  = np.sqrt(4 * pi / (2 * l + 1) * np.square(np.abs(smallqlm)).sum(axis = 2))
            smallql  = np.column_stack((np.arange(self.ParticleNumber) + 1, smallql.T))
            outputql = namestr + '.smallq_l%d.dat'%l
            names    = 'id  ql  l=' + str(l)
            numformat = '%d ' + '%.6f ' * (len(smallql[0]) - 1) 
            np.savetxt(outputql, smallql, fmt=numformat, header = names, comments = '')

            largeQl = np.sqrt(4 * pi / (2 * l + 1) * np.square(np.abs(largeQlm)).sum(axis = 2))
            largeQl = np.column_stack((np.arange(self.ParticleNumber) + 1, largeQl.T))
            outputQl = namestr + '.largeQ_l%d.dat'%l
            names = 'id  Ql  l=' + str(l)
            numformat = '%d ' + '%.6f ' * (len(largeQl[0]) - 1) 
            np.savetxt(outputQl, largeQl, fmt=numformat, header = names, comments = '')
            
            print ('-------------Calculate ql and Ql over-----------')

        if csijsmallql:
            MaxNeighbor = 100 #the considered maximum number of neighbors
            fneighbor = open(self.Neighborfile, 'r')
            results = np.zeros((1, 3))
            for n in range(self.SnapshotNumber):
                Neighborlist = Voropp(fneighbor, self.ParticleNumber)  #neighbor list [number, list....]
                sij = np.zeros((self.ParticleNumber, MaxNeighbor))
                sijresults = np.zeros((self.ParticleNumber, 3))
                if (Neighborlist[:, 0] > MaxNeighbor).any(): print ('********Warning: Too Many Neighbors*********')
                for i in range(self.ParticleNumber):
                    for j in range(Neighborlist[i, 0]):
                        sijup = (smallqlm[n][i] * np.conj(smallqlm[n][Neighborlist[i, j+1]])).sum()
                        sijdown = np.sqrt(np.square(np.abs(smallqlm[n][i])).sum()) * np.sqrt(np.square(np.abs(smallqlm[n][Neighborlist[i, j+1]])).sum())
                        sij[i, j] = np.real(sijup / sijdown)
                sijresults[:, 0] = np.arange(self.ParticleNumber) + 1 #particle id
                sijresults[:, 1] = (np.where(sij > c, 1, 0)).sum(axis = 1)  #bond number 
                sijresults[:, 2] = np.where(sijresults[:, 1] > Neighborlist[:, 0] / 2, 1, 0) #crystalline
                results = np.vstack((results, sijresults))

            outputql = namestr + '.sij.smallq_l%d.dat'%l
            names = 'id  sijcrystalbondnum  crystalline.l=' + str(l)
            np.savetxt(outputql, results[1:], fmt='%d', header = names, comments = '')
            fneighbor.close()
            print ('-------------Calculate s(i, j) based on ql over-----------')

        if csijlargeQl:
            MaxNeighbor = 100 #the considered maximum number of neighbors
            fneighbor = open(self.Neighborfile, 'r')
            results = np.zeros((1, 3))
            for n in range(self.SnapshotNumber):
                Neighborlist = Voropp(fneighbor, self.ParticleNumber)  #neighbor list [number, list....]
                sij = np.zeros((self.ParticleNumber, MaxNeighbor))
                sijresults = np.zeros((self.ParticleNumber, 3))
                if (Neighborlist[:, 0] > MaxNeighbor).any(): print ('********Warning: Too Many Neighbors*********')
                for i in range(self.ParticleNumber):
                    for j in range(Neighborlist[i, 0]):
                        sijup = (largeQlm[n][i] * np.conj(largeQlm[n][Neighborlist[i, j+1]])).sum()
                        sijdown = np.sqrt(np.square(np.abs(largeQlm[n][i])).sum()) * np.sqrt(np.square(np.abs(largeQlm[n][Neighborlist[i, j+1]])).sum())
                        sij[i, j] = np.real(sijup / sijdown)
                sijresults[:, 0] = np.arange(self.ParticleNumber) + 1 #particle id
                sijresults[:, 1] = (np.where(sij > c, 1, 0)).sum(axis = 1)  #bond number 
                sijresults[:, 2] = np.where(sijresults[:, 1] > Neighborlist[:, 0] / 2, 1, 0) #crystalline
                results = np.vstack((results, sijresults))

            outputQl = namestr + '.sij.largeQ_l%d.dat'%l
            names = 'id  sijcrystalbondnum  crystalline.l=' + str(l)
            np.savetxt(outputQl, results[1:], fmt='%d', header = names, comments = '')
            fneighbor.close()
            print ('-------------Calculate s(i, j) based on Ql over-----------')

        if csmallwcap:
            smallqlm = np.array(smallqlm)
            smallw = np.zeros((self.SnapshotNumber, self.ParticleNumber))
            Windex = Wignerindex(l)
            w3j    = Windex[:, 3]
            Windex = Windex[:, :3].astype(np.int) + l 
            for n in range(self.SnapshotNumber):
                for i in range(self.ParticleNumber):
                    smallw[n, i] = (np.real(np.prod(smallqlm[n, i, Windex], axis = 1)) * w3j).sum()
           
            smallw = np.column_stack((np.arange(self.ParticleNumber) + 1, smallw.T))
            outputw = namestr + '.smallw_l%d.dat'%l
            names = 'id  wl  l=' + str(l)
            numformat = '%d ' + '%.10f ' * (len(smallw[0]) - 1) 
            np.savetxt(outputw, smallw, fmt=numformat, header = names, comments = '')
       
            smallwcap = np.power(np.square(np.abs(np.array(smallqlm))).sum(axis = 2), -3 / 2).T * smallw[:, 1:]
            smallwcap = np.column_stack((np.arange(self.ParticleNumber) + 1, smallwcap))
            outputwcap = namestr + '.smallwcap_l%d.dat'%l
            names = 'id  wlcap  l=' + str(l)
            numformat = '%d ' + '%.8f ' * (len(smallwcap[0]) - 1) 
            np.savetxt(outputwcap, smallwcap, fmt=numformat, header = names, comments = '')
            print ('------------- Calculate BOO w and normalized (cap) one over ----------------')

        if clargeWcap:
            largeQlm = np.array(largeQlm)
            largew = np.zeros((self.SnapshotNumber, self.ParticleNumber))
            Windex = Wignerindex(l)
            w3j    = Windex[:, 3]
            Windex = Windex[:, :3].astype(np.int) + l 
            for n in range(self.SnapshotNumber):
                for i in range(self.ParticleNumber):
                    largew[n, i] = (np.real(np.prod(largeQlm[n, i, Windex], axis = 1)) * w3j).sum()
           
            largew = np.column_stack((np.arange(self.ParticleNumber) + 1, np.real(largew.T)))
            outputW = namestr + '.largeW_l%d.dat'%l
            names = 'id  Wl  l=' + str(l)
            numformat = '%d ' + '%.10f ' * (len(largew[0]) - 1) 
            np.savetxt(outputW, largew, fmt=numformat, header = names, comments = '')
       
            largewcap = np.power(np.square(np.abs(np.array(largeQlm))).sum(axis = 2), -3 / 2).T * largew[:, 1:]
            largewcap = np.column_stack((np.arange(self.ParticleNumber) + 1, largewcap))
            outputWcap = namestr + '.largeWcap_l%d.dat'%l
            names = 'id  Wlcap  l=' + str(l)
            numformat = '%d ' + '%.8f ' * (len(largewcap[0]) - 1) 
            np.savetxt(outputWcap, largewcap, fmt=numformat, header = names, comments = '')        
            print ('------------- Calculate BOO W and normalized (cap) one over ----------------')

        print ('----calculate multiple order parameters together DONE------')
Esempio n. 16
0
    def partial(self, qmax, a = 1.0, dt = 0.002, atomtype = False, outputfile = ''):
        """ Compute self-intermediate scattering functions ISF, dynamic susceptibility ISFX4 based on ISF
            Overlap function Qt and its corresponding dynamic susceptibility QtX4
            Mean-square displacements msd; non-Gaussion parameter alpha2
        
            qmax is the wavenumber corresponding to the first peak of structure factor
            qmax accounts for six components so it is a list covering all particle types
            a is the cutoff for the overlap function, default is 1.0 (EAM) and 0.3(LJ) (0.3<d>)
            dt is the timestep of MD simulations

            ********CONSIDER CAGE RELATIVE DISPLACEMENTS********
        """
        print ('-----------------Compute Cage Relative Partial Dynamics--------------------')

        partialresults = [] #a list containing results of all particle types

        if not atomtype: atomtype = self.Type
        for i in atomtype:  #loop over different particle types
            TYPESET = np.where(np.array(self.ParticleType) == i, 1, 0)

            results = np.zeros(((self.SnapshotNumber - 1), 7))
            names  = 't  ISF  ISFX4  Qt  QtX4  msd  alpha2'
            
            cal_isf  = pd.DataFrame(np.zeros((self.SnapshotNumber-1))[np.newaxis, :])
            cal_Qt   = pd.DataFrame(np.zeros((self.SnapshotNumber-1))[np.newaxis, :])
            cal_msd  = pd.DataFrame(np.zeros((self.SnapshotNumber-1))[np.newaxis, :])
            cal_alp  = pd.DataFrame(np.zeros((self.SnapshotNumber-1))[np.newaxis, :])
            deltat   = np.zeros(((self.SnapshotNumber - 1), 2), dtype = np.int) #deltat, deltatcounts
            fneighbor = open(self.Neighborfile, 'r')  #consider neighbors' displacements

            for n in range(self.SnapshotNumber - 1):  #loop over time intervals
                RII    = self.Positions[n + 1:] - self.Positions[n]
                if self.PBC:
                    hmatrixinv = np.linalg.inv(self.hmatrix[n])
                    for ii in range(len(RII)):
                       matrixij = np.dot(RII[ii], hmatrixinv)
                       RII[ii]  = np.dot(matrixij - np.rint(matrixij) * self.ppp, self.hmatrix[n]) #remove PBC
                
                RII_relative = RII.copy()
                Neighborlist = Voropp(fneighbor, self.ParticleNumber) #neighbor list [number, list...]
                for m in range(RII.shape[0]):
                    for ii in range(self.ParticleNumber):
                        RII_relative[m, ii] = RII[m,ii]-RII[m, Neighborlist[ii, 1: Neighborlist[ii, 0] + 1]].mean(axis = 0) #cage relative displacements            
                        #keep RII of each atom unchanged during subtraction


                RII_isf   = ((np.cos(RII_relative * qmax[i - 1]).mean(axis = 2)) * TYPESET[n + 1:]).sum(axis = 1) #index is timeinterval -1
                cal_isf   = pd.concat([cal_isf, pd.DataFrame(RII_isf[np.newaxis, :])])
                distance  = np.square(RII_relative).sum(axis = 2)
                RII_Qt    = ((np.sqrt(distance) <= a) * TYPESET[n + 1:]).sum(axis = 1)
                cal_Qt    = pd.concat([cal_Qt, pd.DataFrame(RII_Qt[np.newaxis, :])])
                cal_msd   = pd.concat([cal_msd, pd.DataFrame((distance * TYPESET[n + 1:]).sum(axis = 1)[np.newaxis, :])])
                distance2 = (np.square(distance) * TYPESET[n + 1:]).sum(axis = 1)
                cal_alp   = pd.concat([cal_alp, pd.DataFrame(distance2[np.newaxis, :])])

            
            cal_isf      = cal_isf.iloc[1:]
            cal_Qt       = cal_Qt.iloc[1:]
            cal_msd      = cal_msd.iloc[1:]
            cal_alp      = cal_alp.iloc[1:]
            deltat[:, 0] = np.array(cal_isf.columns) + 1 #Timeinterval
            deltat[:, 1] = np.array(cal_isf.count())     #Timeinterval frequency

            results[:, 0] = deltat[:, 0] * self.TimeStep * dt 
            results[:, 1] = cal_isf.mean() / self.TypeNumber[i - 1]
            results[:, 2] = ((cal_isf**2).mean() - (cal_isf.mean())**2) / self.TypeNumber[i - 1]
            results[:, 3] = cal_Qt.mean() / self.TypeNumber[i - 1]
            results[:, 4] = ((cal_Qt**2).mean() - (cal_Qt.mean())**2) / self.TypeNumber[i - 1]
            results[:, 5] = cal_msd.mean() / self.TypeNumber[i - 1]
            results[:, 6] = cal_alp.mean() / self.TypeNumber[i - 1]
            results[:, 6] = alpha2factor(self.ndim) * results[:, 6] / np.square(results[:, 5]) - 1.0

            if outputfile:
                np.savetxt('Type' + str(i) + '.' + outputfile, results, fmt='%.6f', header = names, comments = '')
            
            partialresults.append(results)
            fneighbor.close()
        
        print ('-----------------Compute Cage Relative Partial Dynamics Over--------------------')
        return partialresults, names
Esempio n. 17
0
    def fastS4(self, outputfile, a = 1.0, dt = 0.002, X4timeset = 0, results_path = '../../analysis/cagedynamics'):
        """ Compute four-point dynamic structure factor at peak timescale of dynamic susceptibility

            Based on overlap function Qt and its corresponding dynamic susceptibility QtX4     
            a is the cutoff for the overlap function, default is 1.0 (EAM) and 0.3(LJ) (0.3<d>)
            dt is the timestep of MD simulations
            X4timeset is the peaktime scale of X4, if 0 will use the calculated one
            Dynamics should be calculated before computing S4
            Only considered the particles which are fast
            The Qt and X4 should be calculated first

            ********CONSIDER CAGE RELATIVE DISPLACEMENTS********
        """
        print ('-----------------Compute Cage Relative dynamic S4(q) of fast particles --------------')

        if not os.path.exists(results_path):
            os.makedirs(results_path)

        #-----------calculte overall dynamics first----------------
        results = np.zeros(((self.SnapshotNumber - 1), 3))
        names  = 't  Qt  QtX4'
        
        cal_Qt   = pd.DataFrame(np.zeros((self.SnapshotNumber-1))[np.newaxis, :])
        deltat   = np.zeros(((self.SnapshotNumber - 1), 2), dtype = np.int) #deltat, deltatcounts
        fneighbor = open(self.Neighborfile, 'r')  #consider neighbors' displacements
        for n in range(self.SnapshotNumber - 1):  #time interval
            RII = self.Positions[n + 1:] - self.Positions[n]
            Neighborlist = Voropp(fneighbor, self.ParticleNumber) #neighbor list [number, list...]
            for i in range(self.ParticleNumber):
                RII[:, i] -= RII[:, Neighborlist[i, 1: Neighborlist[i, 0] + 1]].mean(axis = 1) #cage relative displacements            

            distance  = np.square(RII).sum(axis = 2)
            RII_Qt    = (np.sqrt(distance) >= a).sum(axis = 1)
            cal_Qt    = pd.concat([cal_Qt, pd.DataFrame(RII_Qt[np.newaxis, :])])
        
        cal_Qt       = cal_Qt.iloc[1:]
        deltat[:, 0] = np.array(cal_Qt.columns) + 1 #Timeinterval
        deltat[:, 1] = np.array(cal_Qt.count())     #Timeinterval frequency

        results[:, 0] = deltat[:, 0] * self.TimeStep * dt 
        results[:, 1] = cal_Qt.mean() / self.ParticleNumber
        results[:, 2] = ((cal_Qt**2).mean() - (cal_Qt.mean())**2) / self.ParticleNumber
        np.savetxt(results_path + 'CageDynamics.' + outputfile, results, fmt='%.6f', header = names, comments = '')

        #-----------calculte S4(q) of fast particles----------------
        twopidl = 2 * pi / self.Boxlength[0]
        if self.Boxlength[0] <= 40.0:
            Numofq = int(self.Boxlength[0] / twopidl)
        else:
            Numofq = int(self.Boxlength[0] / 2 / twopidl)

        wavevector = wavevector2d(Numofq) #Only S4(q) at low wavenumber range is interested
        qvalue, qcount = np.unique(wavevector[:, 0], return_counts = True)
        sqresults = np.zeros((len(wavevector[:, 0]), 3)) #the first row accouants for wavenumber

        if X4timeset:
            X4time = int(X4timeset / dt / self.TimeStep)
        else:
            X4time = deltat[results[:, 2].argmax(), 0] 

        fneighbor = open(self.Neighborfile, 'r')  #consider neighbors' displacements
        for n in range(self.SnapshotNumber - X4time):
            Neighborlist = Voropp(fneighbor, self.ParticleNumber) #neighbor list [number, list...]
            RII = self.Positions[n + X4time] - self.Positions[n]  #absolute displacements
            for i in range(self.ParticleNumber):
                RII[i] -= RII[Neighborlist[i, 1: Neighborlist[i, 0] + 1]].mean(axis = 0) #cage relative displacements            

            RII = np.sqrt(np.square(RII).sum(axis = 1))
            RII = np.where(RII >= a, 1, 0)
            sqtotal = np.zeros((len(wavevector[:, 0]), 2))
            for i in range(self.ParticleNumber):
                medium   = twopidl * (self.Positions[n][i] * wavevector[:, 1:]).sum(axis = 1)
                sqtotal[:, 0] += np.sin(medium) * RII[i]
                sqtotal[:, 1] += np.cos(medium) * RII[i]
            
            sqresults[:, 1] += np.square(sqtotal).sum(axis = 1) / self.ParticleNumber
            sqresults[:, 2] += sqtotal[:, 1]

        sqresults[:, 0]  = wavevector[:, 0]
        sqresults[:, 1]  = sqresults[:, 1] / (self.SnapshotNumber - X4time)
        sqresults[:, 2]  = np.square(sqresults[:, 2] / (self.SnapshotNumber - X4time)) / self.ParticleNumber

        sqresults = pd.DataFrame(sqresults)
        results   = np.array(sqresults.groupby(sqresults[0]).mean())
        results[:, 1] = results[:, 0] - results[:, 1] / qcount

        qvalue    = twopidl * np.sqrt(qvalue)
        results   = np.column_stack((qvalue, results))
        names = 'q  S4a(q)  S4b(q)'
        np.savetxt(results_path + outputfile, results, fmt='%.6f', header = names, comments = '')
        print ('--------- Compute Cage Relative S4(q) of fast particles over ------')

        return results
Esempio n. 18
0
    def slowS4(self, X4time, dt = 0.002, a = 1.0, qrange = 10, outputfile = ''):
        """ Compute four-point dynamic structure factor at peak timescale of dynamic susceptibility

            Based on overlap function Qt and its corresponding dynamic susceptibility QtX4     
            a is the cutoff for the overlap function, default is 1.0 (EAM) and 0.3(LJ) (0.3<d>)
            X4time is the peaktime scale of X4
            dt is the timestep in MD simulations
            Dynamics should be calculated before computing S4
            Only considered the particles which are slow

            ********CONSIDER CAGE RELATIVE DISPLACEMENTS********
        """
        print ('-----------------Compute dynamic S4(q) of slow particles --------------')

        X4time = int(X4time / dt / self.TimeStep)

        twopidl = 2 * pi / self.Boxlength
        Numofq = int(qrange / twopidl.max())
        wavevector = choosewavevector(Numofq, self.ndim)[:, 1:] #Only S4(q) at low wavenumber range is interested
        wavevector = wavevector.astype(np.float64) * twopidl[np.newaxis, :]
        wavenumber = np.linalg.norm(wavevector, axis=1)

        sqresults = np.zeros((wavevector.shape[0], 2))
        sqresults[:, 0] = wavenumber

        fneighbor = open(self.Neighborfile, 'r')  #consider neighbors' displacements
        for n in range(self.SnapshotNumber - X4time):
            RII = self.Positions[n + X4time] - self.Positions[n]  #absolute displacements
            if self.PBC:
                hmatrixinv = np.linalg.inv(self.hmatrix[n])
                matrixij = np.dot(RII, hmatrixinv)
                RII  = np.dot(matrixij - np.rint(matrixij) * self.ppp, self.hmatrix[n]) #remove PBC

            RII_relative = RII.copy()
            Neighborlist = Voropp(fneighbor, self.ParticleNumber) #neighbor list [number, list...]
            for i in range(self.ParticleNumber):
                RII_relative[i] = RII[i] - RII[Neighborlist[i, 1: Neighborlist[i, 0] + 1]].mean(axis = 0) #cage relative displacements            
                #keep RII of each atom unchanged during subtraction

            RII = np.linalg.norm(RII_relative, axis=1)#np.sqrt(np.square(RII_relative).sum(axis = 1))
            RII = np.where(RII <= a, 1, 0)
        
            sqtotal = np.zeros_like(sqresults)
            for i in range(self.ParticleNumber):
                if RII[i]:
                    thetas = (self.Positions[n][i][np.newaxis, :] * wavevector).sum(axis=1)
                    sqtotal[:, 0] += np.sin(thetas)
                    sqtotal[:, 1] += np.cos(thetas)
            
            sqresults[:, 1] += np.square(sqtotal).sum(axis=1) / self.ParticleNumber
        sqresults[:, 1] /= (self.SnapshotNumber - X4time)
        
        sqresults = pd.DataFrame(sqresults).round(6)
        results = sqresults.groupby(sqresults[0]).mean().reset_index().values
        
        names = 'q  S4'
        if outputfile:
            np.savetxt(outputfile, results, fmt='%.6f', header = names, comments = '')
        fneighbor.close()

        print ('--------- Compute Cage Relative S4(q) of slow particles over ------')
        return results, names
Esempio n. 19
0
def Vonmises(inputfile,
             Neighborfile,
             ndim,
             strainrate,
             ppp=[1, 1, 1],
             dt=0.002,
             filetype='lammps',
             moltypes='',
             outputfile=''):
    """ Calculate Non-Affine Von-Mises Strain (local shear invariant)
        
        With the first snapshot of inputfile as reference
        The unit of strianrate should in align with the intrinsic time unit (i.e with dt)
        The code accounts for both orthogonal and triclinic boxes

        The keyword filetype is used for different MD engines
        It has four choices:
        'lammps' (default)

        'lammpscenter' (lammps molecular dump with known atom type of each molecule center)
        moltypes is a dict mapping center atomic type to molecular type
        moltypes is also used to select the center atom 
        such as moltypes = {3: 1, 5: 2}

        'gsd' (HOOMD-blue standard output for static properties)
    
        'gsd_dcd' (HOOMD-blue outputs for static and dynamic properties)
    """

    d = readdump(inputfile, ndim, filetype, moltypes)
    d.read_onefile()
    positions = d.Positions
    particlenumber = d.ParticleNumber[0]
    snapshotnumber = d.SnapshotNumber
    boxlength = d.Boxlength
    hmatrix = d.hmatrix
    timestep = d.TimeStep[1] - d.TimeStep[0]
    PI = np.eye(ndim, dtype=int)  #identity matrix along diag
    results = np.zeros((particlenumber, snapshotnumber - 1))

    fneighbor = open(Neighborfile, 'r')
    Neighborlist = Voropp(fneighbor,
                          particlenumber)  #neighbor list [number, list...]
    fneighbor.close()
    for i in range(particlenumber):
        neighbors = Neighborlist[i, 1:Neighborlist[i, 0] + 1]
        RIJ0 = positions[0][neighbors] - positions[0][
            i]  #reference snapshot: the initial one
        #periodic  = np.where(np.abs(RIJ0 / boxlength[0]) > 0.50, np.sign(RIJ0), 0)
        #RIJ0     -= boxlength[0] * periodic * ppp #remove periodic boundary conditions
        matrixij = np.dot(RIJ0, np.linalg.inv(hmatrix[0]))
        RIJ0 = np.dot(matrixij - np.rint(matrixij) * ppp, hmatrix[0])
        for j in range(snapshotnumber - 1):
            RIJ1 = positions[j + 1][neighbors] - positions[j + 1][
                i]  #deformed snapshot
            matrixij = np.dot(RIJ1, np.linalg.inv(hmatrix[j + 1]))
            RIJ1 = np.dot(matrixij - np.rint(matrixij) * ppp, hmatrix[j + 1])
            PJ = np.dot(np.linalg.inv(np.dot(RIJ0.T, RIJ0)),
                        np.dot(RIJ0.T, RIJ1))
            etaij = 0.5 * (np.dot(PJ, PJ.T) - PI)
            results[i, j] = np.sqrt(0.5 * np.trace(
                np.linalg.matrix_power(
                    etaij - (1 / ndim) * np.trace(etaij) * PI, 2)))

    results = np.column_stack((np.arange(particlenumber) + 1, results))
    strain = np.arange(snapshotnumber) * timestep * dt * strainrate
    results = np.vstack((strain, results))
    names = 'id   The_first_row_is_the_strain.0isNAN'
    fformat = '%d ' + '%.6f ' * (snapshotnumber - 1)
    if outputfile:
        np.savetxt(outputfile, results, fmt=fformat, header=names, comments='')

    print('------ Calculate Von Mises Strain Over -------')
    return results, names
Esempio n. 20
0
    def slowS4(self, outputfile, X4time, dt = 0.002, a = 1.0, results_path = '../../analysis/cagedynamics'):
        """ Compute four-point dynamic structure factor at peak timescale of dynamic susceptibility

            Based on overlap function Qt and its corresponding dynamic susceptibility QtX4     
            a is the cutoff for the overlap function, default is 1.0 (EAM) and 0.3(LJ) (0.3<d>)
            X4time is the peaktime scale of X4
            dt is the timestep in MD simulations
            Dynamics should be calculated before computing S4
            Only considered the particles which are slow

            ********CONSIDER CAGE RELATIVE DISPLACEMENTS********
        """
        print ('-----------------Compute dynamic S4(q) of slow particles --------------')

        if not os.path.exists(results_path):
            os.makedirs(results_path)

        X4time = int(X4time / dt / self.TimeStep)
        twopidl = 2 * pi / self.Boxlength[0]
        if self.Boxlength[0] <= 40.0:
            Numofq = int(self.Boxlength[0] / twopidl)
        else:
            Numofq = int(self.Boxlength[0] / 2 / twopidl)

        wavevector = wavevector2d(Numofq) #Only S4(q) at low wavenumber range is interested
        qvalue, qcount = np.unique(wavevector[:, 0], return_counts = True)
        sqresults = np.zeros((len(wavevector[:, 0]), 3)) #the first row accouants for wavenumber

        fneighbor = open(self.Neighborfile, 'r')  #consider neighbors' displacements
        for n in range(self.SnapshotNumber - X4time):
            Neighborlist = Voropp(fneighbor, self.ParticleNumber) #neighbor list [number, list...]
            RII = self.Positions[n + X4time] - self.Positions[n]  #absolute displacements
            for i in range(self.ParticleNumber):
                RII[i] -= RII[Neighborlist[i, 1: Neighborlist[i, 0] + 1]].mean(axis = 0) #cage relative displacements            

            RII = np.sqrt(np.square(RII).sum(axis = 1))
            RII = np.where(RII <= a, 1, 0)
            sqtotal = np.zeros((len(wavevector[:, 0]), 2))
            for i in range(self.ParticleNumber):
                medium   = twopidl * (self.Positions[n][i] * wavevector[:, 1:]).sum(axis = 1)
                sqtotal[:, 0] += np.sin(medium) * RII[i]
                sqtotal[:, 1] += np.cos(medium) * RII[i]
            
            sqresults[:, 1] += np.square(sqtotal).sum(axis = 1) / self.ParticleNumber
            sqresults[:, 2] += sqtotal[:, 1]

        sqresults[:, 0]  = wavevector[:, 0]
        sqresults[:, 1]  = sqresults[:, 1] / (self.SnapshotNumber - X4time)
        sqresults[:, 2]  = np.square(sqresults[:, 2] / (self.SnapshotNumber - X4time)) / self.ParticleNumber

        sqresults = pd.DataFrame(sqresults)
        results   = np.array(sqresults.groupby(sqresults[0]).mean())
        results[:, 1] = results[:, 0] - results[:, 1] / qcount

        qvalue    = twopidl * np.sqrt(qvalue)
        results   = np.column_stack((qvalue, results))
        names = 'q  S4a(q)  S4b(q)'
        np.savetxt(results_path + outputfile, results, fmt='%.6f', header = names, comments = '')
        print ('--------- Compute Cage Relative S4(q) of slow particles over ------')

        return results