Beispiel #1
0
def bondOrientation(atoms, basis, l, neighbs=None, rcut=1, debug=False):

    if neighbs == None:
        bounds = [[0, basis[0][0]], [0, basis[1][1]], [0, basis[2][2]]]
        if rcut <= 1:
            rcut = generateRCut(atoms, basis, debug=debug)
            print "Automatically generating r-cutoff=", rcut
            neighbs = neighbors(atoms, bounds, rcut)
        elif rcut == 2:
            rcut = generateRCut(atoms, basis, debug=debug)
            print "Automatically generating r-cutoff=", rcut
            neighbs = neighbors(atoms, bounds, rcut)
            neighbs = secondShell(neighbs)
        else:
            neighbs = neighbors(atoms, bounds, rcut)

    #sum the spherical harmonic over ever neighbor pair
    Qlms = [
        sum([
            pairSphereHarms(atoms[i], minImageAtom(atoms[i], atoms[j], basis),
                            l) for j in ineighbs
        ]) / len(ineighbs) for i, ineighbs in enumerate(neighbs)
    ]
    Ql = [(((Qlm.conjugate() * Qlm * 4 * np.pi / (2 * l + 1.))).real)**0.5
          for Qlm in Qlms]

    return Ql, rcut
def computing_frequencies_with_mismatches_and_reverse_complements(text, k, d):
    frequency_array = [0] * 4**k
    for i in range(0, len(text) - k):
        pattern = text[i:i + k]
        neighborhood = neighbors(pattern, d) + neighbors(
            reverse_complement(pattern), d)
        for approximate_pattern in neighborhood:
            j = pattern_to_number(approximate_pattern)
            frequency_array[j] = frequency_array[j] + 1
    return frequency_array
Beispiel #3
0
def bondOrientation2sh(atoms,basis,l,neighbs=None,rcut=None,debug=False):
    atoms = array(atoms)
    basis = array(basis)    
    atoms = rectify(atoms,basis)

    if neighbs==None:
        bounds=[[0,basis[0][0]],[0,basis[1][1]],[0,basis[2][2]]]

        if rcut==None:
            rcut = generateRCut(atoms,basis,debug=debug)
            #print "Automatically generating r-cutoff=",rcut

        neighbs = secondShell( neighbors(atoms,bounds,rcut) )

    #sum the spherical harmonic over ever neighbor pair
    a = 4*np.pi / (2*l+1.)
    Ql=list()
    for i,ineighbs in enumerate(neighbs):
        n=len(ineighbs)

        shij = np.vectorize(complex)(zeros(2*l+1)) #spherical harmonic for bond i-j
        for j in ineighbs:
            shij += pairSphereHarms(atoms[i],minImageAtom(atoms[i],atoms[j],basis),l)/n
        shi = a * sum( scipy.real( scipy.multiply(shij,scipy.conj(shij)) ) )
        Ql.append(shi**0.5)
    
    return Ql,rcut
def frequent_words_with_mismatches(text, k, d):
    '''
    Finds most frequent k-mers within the text with at most d mismatches. Does so by sliding a
    k-sized window down the text to find a pattern, generates the likely d-neighborhood for that
    pattern, and stores the frequency of those neighbors in a map. Then, the patterns with the
    max frequency are returned. Runs in O(n * k^2) time, where n is the length of the text and
    k is the length of the pattern.

    Parameters:
    text (str): Sequence in which k-mers are being searched for
    k (int): size of k-mer
    d (int): maximum Hamming Distance between k-mer and neighbors

    Returns:
    patterns (str(list)): all k-mers with frequence == max
    '''
    patterns = []
    frequency_map = {}
    for i in range(len(text) + 1 - k):
        pattern = text[i:i + k]
        neighborhood = neighbors(pattern, d)
        for neighbor in neighborhood:
            if neighbor in frequency_map:
                frequency_map[neighbor] += 1
            else:
                frequency_map[neighbor] = 1
    max_val = frequency_map[max(frequency_map, key=frequency_map.get)]
    for pattern in frequency_map:
        if frequency_map[pattern] == max_val:
            patterns.append(pattern)
    return patterns
Beispiel #5
0
def count_sequences(start_position, num_hops):
    if num_hops == 0:
        return 1
    
    num_sequences = 0
    for position in neighbors(start_position):
        num_sequences += count_sequences(position, num_hops - 1)
    return num_sequences
Beispiel #6
0
def motifs_enumeration(sequences, k, d):
    """
    Check if a motif of length k appears in each sequence in strings with at most d mismatches
    :param sequences: the array of sequences
    :param k: the length of the motif
    :param d: the maximum number of mismatches
    :return: the (k, d)-motifs in string as a set
    """
    motifs = set()
    for kmer in kmers(sequences, k):
        neighborhood = neighbors(kmer, d)
        for neighbor in neighborhood:
            neighborhood2 = neighbors(neighbor, d)
            if all(
                    any(neighbor2 in seq for neighbor2 in neighborhood2)
                    for seq in sequences):
                motifs.add(neighbor)
    return motifs
def count_sequences(start_pos, num_hops):
    """Recursion solution."""
    if num_hops is 0:
        return 1

    num_sequences = 0
    for pos in neighbors(start_pos):
        num_sequences += count_sequences(pos, num_hops - 1)
    return num_sequences
Beispiel #8
0
def count_sequences(start, num_hops):
    global function_calls
    function_calls['count_sequence_calls'] += 1
    if num_hops == 0:
        return 1

    num_sequences = 0
    for position in neighbors(start):
        num_sequences += count_sequences(position, num_hops - 1)
    return num_sequences
Beispiel #9
0
def yield_sequences(starting_position, num_hops, sequence=None):
    if sequence is None:
        sequence = [starting_position]

    if num_hops == 0:
        yield sequence
        return

    for neighbor in neighbors(starting_position):
        yield from yield_sequences(neighbor, num_hops - 1,
                                   sequence + [neighbor])
Beispiel #10
0
def bondOrientation(atoms,basis,l,neighbs=None,rcut=1,debug=False):

    if neighbs==None:
        bounds=[[0,basis[0][0]],[0,basis[1][1]],[0,basis[2][2]]]
        if rcut<=1:
            rcut = generateRCut(atoms,basis,debug=debug)
            print "Automatically generating r-cutoff=",rcut
            neighbs = neighbors(atoms,bounds,rcut)
        elif rcut==2:
            rcut = generateRCut(atoms,basis,debug=debug)
            print "Automatically generating r-cutoff=",rcut
            neighbs = neighbors(atoms,bounds,rcut)
            neighbs = secondShell(neighbs)
        else:
            neighbs = neighbors(atoms,bounds,rcut)

    #sum the spherical harmonic over ever neighbor pair
    Qlms = [sum( [ pairSphereHarms(atoms[i],minImageAtom(atoms[i],atoms[j],basis),l) for j in ineighbs ] ) / len(ineighbs) for i,ineighbs in enumerate(neighbs) ] 
    Ql = [ (((Qlm.conjugate()*Qlm *4*np.pi / (2*l+1.))).real)**0.5 for Qlm in Qlms] 

    return Ql,rcut
def yield_sequences(starting_pos, num_hops, sequence=None):
    """Yield all the sequence values of hops done."""
    if sequence is None:
        sequence = [starting_pos]

    if num_hops is 0:
        yield sequence
        return

    for neighbor in neighbors(starting_pos):
        yield from yield_sequences(neighbor, num_hops - 1,
                                   sequence + [neighbor])
Beispiel #12
0
def radangDistribution(atoms,basis,l=None,neighbs=None,rcut=None,debug=False):
    #l: not used
    if neighbs==None:
        if rcut==None:
            rcut = generateRCut(atoms,basis,debug=debug)
            print "Using RDF to generate r-cutoff=",rcut
        else:
            print "Using r-cutoff=",rcut

        bounds=[[0,basis[0][0]],[0,basis[1][1]],[0,basis[2][2]]]
        neighbs = neighbors(atoms,bounds,rcut,style="full")
    return rdf_by_adf(atoms,neighbs,basis,rcut=rcut)
    def helper(position, num_hops):
        if (position, num_hops) in table:
            return table[(position, num_hops)]

        if num_hops is 0:
            return 1

        else:
            num_sequences = 0
            for neighbor in neighbors(position):
                num_sequences += helper(neighbor, num_hops - 1)
            table[(position, num_hops)] = num_sequences
            return num_sequences
    def helper(position, num_hops):
        if (position, num_hops) in cache:
            return cache[(position, num_hops)]

        if num_hops == 0:
            return 1

        else:
            num_sequences = 0
            for neighbor in neighbors(position):
                num_sequences += helper(neighbor, num_hops - 1)
            cache[(position, num_hops)] = num_sequences
            return num_sequences
Beispiel #15
0
def angleDistribution(atoms,basis,l=None,neighbs=None,rcut=None,debug=False):
    if rcut==None:
        rcut = generateRCut(atoms,basis,debug=debug)
        print "Using RDF to generate r-cutoff=",rcut
    else:
        print "Using r-cutoff=",rcut

    if neighbs==None:
        bounds = [[0,basis[0][0]],[0,basis[1][1]],[0,basis[2][2]]]
        #neighbs = voronoiNeighbors(atoms,basis,[1]*len(atoms),style="full")
        neighbs = neighbors(atoms,bounds,rcut)

    return adf(atoms,neighbs,basis,rcut,nbins=360)
Beispiel #16
0
def yeild_sequences(starting_position, num_hops, sequence=None):
    if sequence is None:
        sequence = [starting_position]

    print('start: {}\tsequence: {}\tnum_hops: {}'.format(
        starting_position, sequence, num_hops))

    if num_hops == 0:
        yield sequence
        return

    for neighbor in neighbors(starting_position):
        print('Found neighbor {}'.format(neighbor))
        yield yeild_sequences(neighbor, num_hops - 1, sequence + [neighbor])
Beispiel #17
0
    def helper(position, num_hops):
        global function_calls
        function_calls['count_with_cache_calls'] += 1
        if (position, num_hops) in cache:
            return cache[(position, num_hops)]

        if num_hops == 0:
            return 1

        num_sequences = 0
        for neighbor in neighbors(position):
            num_sequences += helper(neighbor, num_hops - 1)
        cache[(position, num_hops)] = num_sequences
        return num_sequences
Beispiel #18
0
def coordinationNumber(atoms,basis,l=None,neighbs=None,rcut=None,debug=False):
    #l: not used
    if neighbs==None:
        if rcut==None:
            rcut = generateRCut(atoms,basis,debug=debug)
            print "Using RDF to generate r-cutoff=",rcut
        else:
            "Using r-cutoff=",rcut

        bounds=[[0,basis[0][0]],[0,basis[1][1]],[0,basis[2][2]]]
        neighbs = neighbors(atoms,bounds,rcut,style="full")
        #neighbs = voronoiNeighbors(atoms,basis,[1]*len(atoms),style="full")
    cns = map(len,neighbs)
        
    return cns,rcut
Beispiel #19
0
def tick(g):

    gp = g

    j = gp > 0
    L = np.argwhere(j)
    D = np.argwhere(np.invert(j))

    K_live = kernel(L, space=gp.shape[0])
    K_dead = kernel(D, space=gp.shape[0])

    N_live = neighbors(gp, K_live, L)
    N_dead = neighbors(gp, K_dead, D)

    S_live = np.array([np.sum(n) for n in N_live])
    S_dead = np.array([np.sum(n) for n in N_dead])

    rip = L[np.any([S_live < 2, S_live > 3], axis=0)]
    gp[rip[:, 0], rip[:, 1]] = 0

    baby = D[S_dead == 3]
    gp[baby[:, 0], baby[:, 1]] = 1

    return gp
def count_sequences(start_position, num_hops):
    prior_case = [1] * 10
    current_case = [0] * 10
    current_num_hops = 1

    while current_num_hops <= num_hops:
        current_case = [0] * 10
        current_num_hops += 1

        for position in range(0, 10):
            for neighbor in neighbors(position):
                current_case[position] += prior_case[neighbor]
        prior_case = current_case

    return current_case[start_position]
Beispiel #21
0
def bondAngleCorr(atoms,basis,l,neighbs=None,rcut=None,debug=False):
    atoms = array(atoms)
    basis = array(basis)    
    atoms = rectify(atoms,basis)

    print "Start Bond Angle Correlation Calculation"
    if rcut==None:
        rcut = generateRCut(atoms,basis,debug=debug)

    if neighbs==None:
        rcut = 6.0
        bounds=[[0,basis[0][0]],[0,basis[1][1]],[0,basis[2][2]]]
        hneighbs = neighbors(atoms,bounds,rcut,style="half")    

    #At distances rbins calculate the bond angle correlation function
    nbins = 256
    delr  = rcut/nbins

    #Histogram of bond lengths
    rbins = [i*delr for i in range(nbins)]
    bcnts = [0 for i in range(nbins)]
    gvals = [0.0 for i in range(nbins)]

    #Get the atomic pairs at each bond length
    for i,ineighbs in enumerate(hneighbs):
        for j in ineighbs:
            #i & j make an atom pair, d is the bond length between them
            jatom = minImageAtom(atoms[i],atoms[j],basis)
            d = dist(atoms[i],jatom)
            bbin=int(d/delr)
            bcnts[bbin]+=1
            theta,phi = sphang(atoms[i],jatom)
            gvals[bbin]+= special.sph_harm(0,l,theta,phi)
    
    #At bond length 0, Qlm has one non-zero value at m=0
    Ql0 = conj(sph_harm(0,l,0,0))
    Q0=bondOrientR(atoms,basis,0,0,1) #always 0.28209479 = 1/sqrt(4*pi)
    
    #always use m=0, due to Ql0 normalizing factor which is only non-zero at m=0.
    norm  = 2*(l+1)*Q0*Q0
    for i,n in enumerate(bcnts):
        if n>0:
            w = Ql0/n/norm
            gvals[i] = (gvals[i]*w).real

    print "Finished binning bond angle values"

    return rbins,gvals
Beispiel #22
0
def bondAngleCorr(atoms, basis, l, neighbs=None, rcut=None, debug=False):

    print "Start Bond Angle Correlation Calculation"
    if rcut == None:
        rcut = generateRCut(atoms, basis, debug=debug)

    if neighbs == None:
        rcut = 6.0
        bounds = [[0, basis[0][0]], [0, basis[1][1]], [0, basis[2][2]]]
        hneighbs = neighbors(atoms, bounds, rcut, style="half")

    #At distances rbins calculate the bond angle correlation function
    nbins = 256
    delr = rcut / nbins

    #Histogram of bond lengths
    rbins = [i * delr for i in range(nbins)]
    bcnts = [0 for i in range(nbins)]
    gvals = [0.0 for i in range(nbins)]

    #Get the atomic pairs at each bond length
    for i, ineighbs in enumerate(hneighbs):
        #    print i,len(ineighbs)
        for j in ineighbs:
            #i & j make an atom pair, d is the bond length between them
            jatom = minImageAtom(atoms[i], atoms[j], basis)
            d = dist(atoms[i], jatom)
            bbin = int(d / delr)
            bcnts[bbin] += 1
            theta, phi = sphang(atoms[i], jatom)
            gvals[bbin] += special.sph_harm(0, l, theta, phi)

    #At bond length 0, Qlm has one non-zero value at m=0
    Ql0 = conj(sph_harm(0, l, 0, 0))
    Q0 = bondOrientR(atoms, basis, 0, 0, 1)  #always 0.28209479 = 1/sqrt(4*pi)

    #always use m=0, due to Ql0 normalizing factor which is only non-zero at m=0.
    norm = 2 * (l + 1) * Q0 * Q0
    for i, n in enumerate(bcnts):
        if n > 0:
            w = Ql0 / n / norm
            gvals[i] = (gvals[i] * w).real

    print "Finished binning bond angle values"

    return rbins, gvals
Beispiel #23
0
def count_with_dp(start, num_hops):
    global function_calls
    prior_case = [1] * 10
    current_case = [0] * 10
    current_num_hops = 1

    while current_num_hops <= num_hops:
        function_calls['count_with_dp_calls'] += 1
        current_case = [0] * 10
        current_num_hops += 1

        for position in range(10):
            for neighbor in neighbors(position):
                current_case[position] += prior_case[neighbor]
        prior_case = current_case

    return current_case[start]
Beispiel #24
0
def radangDistribution(atoms,
                       basis,
                       l=None,
                       neighbs=None,
                       rcut=None,
                       debug=False):
    #l: not used
    if neighbs == None:
        if rcut == None:
            rcut = generateRCut(atoms, basis, debug=debug)
            print "Using RDF to generate r-cutoff=", rcut
        else:
            print "Using r-cutoff=", rcut

        bounds = [[0, basis[0][0]], [0, basis[1][1]], [0, basis[2][2]]]
        neighbs = neighbors(atoms, bounds, rcut, style="full")
    return rdf_by_adf(atoms, neighbs, basis, rcut=rcut)
Beispiel #25
0
def angleDistribution(atoms,
                      basis,
                      l=None,
                      neighbs=None,
                      rcut=None,
                      debug=False):
    if rcut == None:
        rcut = generateRCut(atoms, basis, debug=debug)
        print "Using RDF to generate r-cutoff=", rcut
    else:
        print "Using r-cutoff=", rcut

    if neighbs == None:
        bounds = [[0, basis[0][0]], [0, basis[1][1]], [0, basis[2][2]]]
        #neighbs = voronoiNeighbors(atoms,basis,[1]*len(atoms),style="full")
        neighbs = neighbors(atoms, bounds, rcut)

    return adf(atoms, neighbs, basis, rcut, nbins=360)
def count_sequences(start_pos, num_hops):
    """Tabulation loop by having predetermined tables and iterate each case.

    Same approach of memoization, just with loops this timeself.
    """
    current_case = [0] * 10
    prior_case = [1] * 10
    current_num_hops = 1

    while current_num_hops <= num_hops:
        current_case = [0] * 10
        current_num_hops += 1

        for pos in range(0, 10):
            for neighbor in neighbors(pos):
                current_case[pos] += prior_case[neighbor]
        prior_case = current_case

    return current_case[start_pos]
Beispiel #27
0
def coordinationNumber(atoms,
                       basis,
                       l=None,
                       neighbs=None,
                       rcut=None,
                       debug=False):
    #l: not used
    if neighbs == None:
        if rcut == None:
            rcut = generateRCut(atoms, basis, debug=debug)
            print "Using RDF to generate r-cutoff=", rcut
        else:
            "Using r-cutoff=", rcut

        bounds = [[0, basis[0][0]], [0, basis[1][1]], [0, basis[2][2]]]
        neighbs = neighbors(atoms, bounds, rcut, style="full")
        #neighbs = voronoiNeighbors(atoms,basis,[1]*len(atoms),style="full")
    cns = map(len, neighbs)

    return cns, rcut
def motif_enumeration(dna, k, d):
    '''
    Finds all k-mers that appear in multiple DNA sequences with no more than d mismatches. Does so
    by generating all k-mer neighbors for the first sequence, and checking if they occur in other
    sequences with a hamming distance <= d. Time complexity is very poor (O(n^2 * k^3 * s), where
    n = len(seq[0]), k = len(k-mer), and s = len(seq)

    Parameters:
    dna (str): dna sequences for which motifs are being found, separated by \n
    k (int): size of motif
    d (int): maximum Hamming Distance between sequence and pattern

    Returns:

    patterns (set): motifs found in all dna strands
    '''
    seqs = dna.split('\n')
    if seqs[-1] == '':
        seqs.pop()
    patterns = set()
    #O(n)
    for i in range(len(seqs[0]) + 1 - k):
        pattern = seqs[0][i:i + k]
        #O(k^2)
        neighborhood = neighbors(pattern, d)
        for neighbor in neighborhood:
            all_match = True
            #O(s). Checks that k-mer with <= d mismatches appears in all seqs
            for seq in seqs:
                match = False
                #O(n^2)
                for l in range(len(seq) + 1 - k):
                    window = seq[l:l + k]
                    #O(k)
                    if hamming_distance(neighbor, window) <= d:
                        match = True
                if not match:
                    all_match = False
            if all_match:
                patterns.add(neighbor)
    return patterns
Beispiel #29
0
    #Find the starting locations of atomic data in outcarfile
    grepResults = subprocess.check_output("grep -b POSITION %s" % filename,
                                          shell=True).split("\n")
    bytenums = [int(i.split(":")[0]) for i in grepResults if len(i) > 2]

    outcar = open(filename, "r")
    for i, b in enumerate(bytenums):
        outcar.seek(b)
        outcar.readline()
        outcar.readline()
        atoms = [
            map(float,
                outcar.readline().split()[:3]) for a in range(nAtoms)
        ]
        neighbs.append(neighbors.neighbors(atoms, bounds, rcut))

if lammpsFlag:
    nAtoms = lammpsIO.nAtoms(filename)
    basisByteNums = lammpsIO.basisBytes(filename)
    atomsByteNums = lammpsIO.atomsBytes(filename)

    for i, (bByte, aByte) in enumerate(zip(basisByteNums, atomsByteNums)):
        basis = lammpsIO.parseBasis(filename, bByte)
        bounds = [[0, basis[0][0]], [0, basis[1][1]], [0, basis[2][2]]]
        atoms, dummy = lammpsIO.parseAtoms(filename, aByte, nAtoms, basis)
        neighbs.append(neighbors.neighbors(atoms, bounds, rcut))

neighbsfile = filename + ".neighb"
header = [
    "Spaces Seperate Neighbs, Commas Seperate Atoms, Lines Seperate Arrangements\n"
Beispiel #30
0
        lmpFile = inputFile
        atomByteNums = lammpsIO.atomsBytes(lmpFile)
        nAtom = lammpsIO.nAtoms(lmpFile)
        basis = lammpsIO.basis(lmpFile)
        bounds=[[0,basis[0][0]],[0,basis[1][1]],[0,basis[2][2]]]

        configIterator = parserGens.parseLammpsAtoms(atomByteNums,lmpFile,nAtom)
        atomsTime = [array(atoms) for atoms in configIterator]
    bounds=[[0,basis[0][0]],[0,basis[1][1]],[0,basis[2][2]]]
    
    if len(crTime)+1 == nAtom:
        crTime=crTime[1:] #chop off the average if necessary

    if sh2Enable:
        rcut = 3.1
        neighbs = secondShell( neighbors(array(atomsTime[-1]),bounds,rcut) )
        ns = map(len,neighbs)
        crTime = [sum([crTime[j] for j in neighbs[i]])/ns[i] if ns[i]>0 else crTime[i] for i in range(len(neighbs))]

    if isfType == "total":
        steps,isfs = ISFFull(atomsTime,basis,crTime,nqVecs=3,nStep=nStep,criteria=criteria)
    elif isfType == "self":
        steps,isfs = ISFSelf(atomsTime,basis,crTime,nqVecs=3,nStep=nStep,criteria=criteria)

    scale /= 1E-12 #picosecond conversion
    steps = [i*scale for i in steps]
    if logtEnable:
        steps=log10(steps)

    #Write Data
    outputFile = criteFile + ".isf" + isfType[0].upper() + "_" + criteria
Beispiel #31
0
def calculate_neighbours(path):
    raw_input = fs_helpers.read_lines(path)
    output = ' '.join(neighbors.neighbors(raw_input[0], int(raw_input[1])))
    print(output)
Beispiel #32
0
#Band's and Band Energy from Procar
nIon,nGridPoints,bandGrid,occGrid = procarIO.readLDOS(procarFile)
bandGrid-=eFermi

#Ion positions from CONTCAR/POSCAR
basis,atypes,atoms,head,poscar = poscarIO.read(poscarFile)
basis=np.asarray(map(np.asarray,basis))
atoms=np.asarray(map(np.asarray,atoms))
lengths=np.asarray(map(np.linalg.norm,basis))
bounds=[[0,lengths[0]],[0,lengths[1]],[0,lengths[2]]]

#Parse Neighbors file
try:
    rcut=float(neighbsFile)
    neighbs=neighbors(atoms,bounds,rcut,style="full")
except ValueError:
    neighbs=neighborIO.read(neighbsFile)#[map(int,i.split()) for i in open(neighbsFile,"r").readlines()[1:]]

#chemical potential integral
delU = 0.1
uBounds = np.where(np.logical_and(bandGrid > -delU, bandGrid < 0))[0]
bandBounds=bandGrid[uBounds]
d= (bandBounds.max()-bandBounds.min())/len(bandBounds)

ax=atoms[:,0]
ay=atoms[:,1]
az=atoms[:,2]
types=[0]*atypes[0]
fig=mlab.figure(bgcolor=(1.0,1.0,1.0))
Beispiel #33
0
    xdatcarFlag=True
    poscarFile = sys.argv[3]
else:
    lammpsFlag=True

neighbsfile=filename+".neighb"
header=["Spaces Seperate Neighbs, Commas Seperate Atoms, Lines Seperate Arrangements\n"]
lines=header

if xdatcarFlag:
    basis,dummy,atoms,dummy,dummy = poscarIO.read(poscarFile)
    nAtoms = len(atoms)
    bounds = [[0,1],[0,1],[0,1]]
    rcut /= basis[0][0]
    for i,atoms in enumerate(xdatcarIO.read2(filename)):
        neighbs = neighbors.neighbors(atoms,bounds,rcut)
        lines += [",".join([" ".join(map(str,atomn)) for atomn in neighbs])+"\n"]

if outcarFlag:
    nAtoms = outcarIO.nAtoms(filename)
    basis = array(map(array,outcarIO.basis(filename)))
    bounds = [[0,basis[0][0]],[0,basis[1][1]],[0,basis[2][2]]]

    #Find the starting locations of atomic data in outcarfile
    grepResults = subprocess.check_output("grep -b POSITION %s"%filename,shell=True).split("\n")
    bytenums=[int(i.split(":")[0]) for i in grepResults if len(i)>2]

    outcar = open(filename,"r")
    for i,b in enumerate(bytenums):
        outcar.seek(b)
        outcar.readline()
Beispiel #34
0
if outcarFlag:
    nAtoms = outcarIO.nIons(filename)
    basis = array(map(array,outcarIO.basis(filename)))
    bounds = [[0,basis[0][0]],[0,basis[1][1]],[0,basis[2][2]]]

    #Find the starting locations of atomic data in outcarfile
    grepResults = subprocess.check_output("grep -b POSITION %s"%filename,shell=True).split("\n")
    bytenums=[int(i.split(":")[0]) for i in grepResults if len(i)>2]

    outcar = open(filename,"r")
    for i,b in enumerate(bytenums):
        outcar.seek(b)
        outcar.readline()
        outcar.readline()
        atoms = [map(float,outcar.readline().split()[:3]) for a in range(nAtoms)]
        neighbs.append(neighbors.neighbors(atoms,bounds,rcut))

if lammpsFlag:
    nAtoms = lammpsIO.nAtoms(filename)
    basisByteNums = lammpsIO.basisBytes(filename)
    atomsByteNums = lammpsIO.atomsBytes(filename)
    
    for i,(bByte,aByte) in enumerate(zip(basisByteNums,atomsByteNums)):
        basis = lammpsIO.parseBasis(filename,bByte)
        bounds = [[0,basis[0][0]],[0,basis[1][1]],[0,basis[2][2]]]
        atoms,dummy = lammpsIO.parseAtoms(filename,aByte,nAtoms,basis)
        neighbs.append(neighbors.neighbors(atoms,bounds,rcut))

neighbsfile=filename+".neighb"
header=["Spaces Seperate Neighbs, Commas Seperate Atoms, Lines Seperate Arrangements\n"]
lines=header
Beispiel #35
0
#Band's and Band Energy from Procar
nIon, nGridPoints, bandGrid, occGrid = procarIO.readLDOS(procarFile)
bandGrid -= eFermi

#Ion positions from CONTCAR/POSCAR
basis, atypes, atoms, head, poscar = poscarIO.read(poscarFile)
basis = np.asarray(map(np.asarray, basis))
atoms = np.asarray(map(np.asarray, atoms))
lengths = np.asarray(map(np.linalg.norm, basis))
bounds = [[0, lengths[0]], [0, lengths[1]], [0, lengths[2]]]

#Parse Neighbors file
try:
    rcut = float(neighbsFile)
    neighbs = neighbors(atoms, bounds, rcut, style="full")
except ValueError:
    neighbs = neighborIO.read(
        neighbsFile
    )  #[map(int,i.split()) for i in open(neighbsFile,"r").readlines()[1:]]

#chemical potential integral
delU = 0.1
uBounds = np.where(np.logical_and(bandGrid > -delU, bandGrid < 0))[0]
bandBounds = bandGrid[uBounds]
d = (bandBounds.max() - bandBounds.min()) / len(bandBounds)

ax = atoms[:, 0]
ay = atoms[:, 1]
az = atoms[:, 2]
types = [0] * atypes[0]