Пример #1
0
 def get_clusters_with_n_numneighs(self,cutoff,numneighs,cluster_types):
     m = Model(self.model.comment, self.model.lx, self.model.ly, self.model.lz, self.model.atoms[:])
     m.generate_neighbors(cutoff)
     vp_atoms = []
     #print(cluster_types)
     neighs = [[]]*m.natoms
     for i,atom in enumerate(m.atoms):
         if(atom.vp.type in cluster_types):
             vp_atoms.append(atom.copy())
     numfound = 0
     for i,atomi in enumerate(vp_atoms):
         for j,atomj in enumerate(vp_atoms[vp_atoms.index(atomi)+1:]):
             # Get all the neighbors they have in common
             #common_neighs = [n for n in atomi.neighs if n in atomj.neighs if n.vp.type not in cluster_types]
             common_neighs = [n for n in atomi.neighs if n in atomj.neighs]
             if(len(common_neighs) >= numneighs):
                 ind = m.atoms.index(atomi)
                 neighs[ind] = neighs[ind] + copy.copy([x for x in common_neighs if x not in neighs[ind]])
                 ind = m.atoms.index(atomj)
                 neighs[ind] = neighs[ind] + copy.copy([x for x in common_neighs if x not in neighs[ind]])
                 for n in common_neighs:
                     ind = m.atoms.index(n)
                     neighs[ind] = neighs[ind] + [x for x in [atomi,atomj] if x not in neighs[ind]]
                 numfound += 1
     for i,tf in enumerate(neighs):
         m.atoms[i].neighs = tf
     m.check_neighbors()
     print('Total number of {0} atoms: {1}'.format(cluster_types,len(vp_atoms)))
     print('Total number of {2}-sharing {0} atoms: {1}'.format(cluster_types,numfound,numneighs))
     # Now I should be able to go through the graph/model's neighbors.
     return self.search(m,cluster_types)
Пример #2
0
def main():
    # sys.argv == [categorize_parameters.txt, modelfile]
    if(len(sys.argv) <= 2): sys.exit("\nERROR! Fix your inputs!\n\nArg 1:  input param file detailing each voronoi 'structure'.\nShould be of the form:\nCrystal:\n    0,2,8,*\n\nArg2: a model file.\n\nOutput is printed to screen.")

    paramfile = sys.argv[1]
    modelfiles = sys.argv[2:]

    from cutoff import cutoff

    vp_dict = load_param_file(paramfile)

    m0 = Model(modelfiles[0])
    m0.generate_neighbors(cutoff)
    voronoi_3d(m0, cutoff)
    set_atom_vp_types(m0, vp_dict)
    stats0 = VPStatistics(m0)
    print(modelfiles[0])
    #stats0.print_indexes()
    stats0.print_categories()
    return

    if len(modelfiles) > 1:
        for modelfile in modelfiles[1:]:
            print(modelfile)
            m = Model(modelfile)
            voronoi_3d(m, cutoff)
            set_atom_vp_types(m, vp_dict)
            stats = VPStatistics(m)
            stats.print_categories()
Пример #3
0
def main():
    # sys.argv == [categorize_parameters.txt, modelfile]
    if len(sys.argv) <= 2:
        sys.exit(
            "\nERROR! Fix your inputs!\n\nArg 1:  input param file detailing each voronoi 'structure'.\nShould be of the form:\nCrystal:\n    0,2,8,*\n\nArg2: a model file.\n\nOutput is printed to screen."
        )

    paramfile = sys.argv[1]
    modelfiles = sys.argv[2:]

    from cutoff import cutoff

    vp_dict = load_param_file(paramfile)

    m0 = Model(modelfiles[0])
    m0.generate_neighbors(cutoff)
    # voronoi_3d(m0, cutoff)
    # set_atom_vp_types(m0, vp_dict)
    # stats0 = VPStatistics(m0)
    # stats0.print_indexes()
    # stats0.print_categories()

    from atom import Atom

    m = Model(
        "atoms with less than 12 neighbors in Zr50Cu45Al15 MD model (originally with 91200 atoms)",
        m0.lx,
        m0.ly,
        m0.lz,
        [],
    )
    count = 0
    for j, atom in enumerate(m0.atoms):
        if len(atom.neighs) >= 12:
            continue
        new = Atom(count, "Si", *atom.coord)
        count += 1
        print("Added new atom {0}".format(new))
        m.add(new)
    m.write("less_than_12_neighbors.xyz")

    if len(modelfiles) > 1:
        for modelfile in modelfiles[1:]:
            m = Model(modelfile)
            voronoi_3d(m, cutoff)
            set_atom_vp_types(m, vp_dict)

            stats0 = stats0 + m
            stats0.print_categories()
            stats0.print_indexes()
def main():
    modelfile = sys.argv[1]
    paramfile = sys.argv[2]

    m = Model(modelfile)
    m.generate_neighbors(3.45)

    cutoff = {}
    cutoff[(40,40)] = 3.6
    cutoff[(13,29)] = 3.6
    cutoff[(29,13)] = 3.6
    cutoff[(40,13)] = 3.6
    cutoff[(13,40)] = 3.6
    cutoff[(29,40)] = 3.6
    cutoff[(40,29)] = 3.6
    cutoff[(13,13)] = 3.6
    cutoff[(29,29)] = 3.6

    cutoff[(41,41)] = 3.7
    cutoff[(28,28)] = 3.7
    cutoff[(41,28)] = 3.7
    cutoff[(28,41)] = 3.7

    cutoff[(46,46)] = 3.45
    cutoff[(14,14)] = 3.45
    cutoff[(46,14)] = 3.45
    cutoff[(14,46)] = 3.45

    m.generate_neighbors(cutoff)

    voronoi_3d(m,cutoff)

    vp_dict = load_param_file(paramfile)
    set_atom_vp_types(m,vp_dict)

    #vor_stats(m) # Prints what you probably want
    #index_stats(m)
    count = 0
    for atom in m.atoms:
        #print(atom.vp.index[:4])
        if(atom.vp.index[:4] == (0,1,10,2)):
            atoms = [a for a in atom.neighs]+[atom]
            for atom in atoms[1:]:
                #if( abs(atom.coord[0]-atoms[0].coord[0]) > 10 ):
                fix_atom(m, atoms[0], atom)
            vp = Model('0,1,10,2 vp',100,100,100,atoms)
            recenter_model(vp)
            vp.write('vp_data/vp{0}.xyz'.format(count))
            convert(vp,'polyhedron','vp_data/vp{0}.txt'.format(count))
            count += 1
Пример #5
0
 def get_sharing_clusters(self,cutoff,numneighs,*cluster_types):
     """ Connected cluster finding via vertex/edge/face sharing. 
         The last argument (1,2, or 3) specifies which. """
     if(type(cluster_types[0]) == type(())):
         cluster_types = cluster_types[0]
     if(numneighs == 0):
         temp = 'interepenetrating'
     elif(numneighs == 1):
         temp = 'interepenetrating and vertex'
     elif(numneighs ==2):
         temp = 'interepenetrating, vertex, and edge'
     elif(numneighs == 3):
         temp = 'interpenetrating, vertex, edge, and face'
     else:
         raise Exception("Wrong argument passsed to vertex/edge/face sharing cluster finding!")
     m = Model(self.model.comment, self.model.lx, self.model.ly, self.model.lz, self.model.atoms[:])
     m.generate_neighbors(cutoff)
     vp_atoms = []
     neighs = [[]]*m.natoms
     vp_atoms = [atom.copy() for atom in m.atoms if atom.vp.type in cluster_types]
     neighs = [[n for n in atom.neighs if n.vp.type in cluster_types] for atom in m.atoms]
     numfound = 0
     if(numneighs > 0): # Look for vertex, edge, or face sharing
         for i,atomi in enumerate(vp_atoms):
             print(i)
             # Interpenetrating
             ind = m.atoms.index(atomi)
             for j,atomj in enumerate(vp_atoms[vp_atoms.index(atomi)+1:]):
                 # Get all the neighbors they have in common
                 common_neighs = [n for n in atomi.neighs if n in atomj.neighs]
                 if(len(common_neighs) and (len(common_neighs) <= numneighs or numneighs == 3) ):
                     ind = m.atoms.index(atomi)
                     neighs[ind] = neighs[ind] + copy.copy([x for x in common_neighs if x not in neighs[ind]])
                     ind = m.atoms.index(atomj)
                     neighs[ind] = neighs[ind] + copy.copy([x for x in common_neighs if x not in neighs[ind]])
                     numfound += 1
     else:
         interpenetrating = sum(1 for atomi in vp_atoms for atomj in vp_atoms[vp_atoms.index(atomi)+1:] if atomi in atomj.neighs)
         numfound = interpenetrating
     for i,tf in enumerate(neighs):
         m.atoms[i].neighs = tf
     print('Total number of {0} atoms: {1}'.format(cluster_types,len(vp_atoms),temp))
     print('Total number of {2} sharing {0} atoms: {1}'.format(cluster_types,numfound,temp))
     # Now I should be able to go through the graph/model's neighbors.
     return self.search(m,cluster_types)
def main():
    cutoff = {}
    cutoff[(40,40)] = 3.6
    cutoff[(13,29)] = 3.6
    cutoff[(29,13)] = 3.6
    cutoff[(40,13)] = 3.6
    cutoff[(13,40)] = 3.6
    cutoff[(29,40)] = 3.6
    cutoff[(40,29)] = 3.6
    cutoff[(13,13)] = 3.6
    cutoff[(29,29)] = 3.6

    cutoff[(41,41)] = 3.7
    cutoff[(28,28)] = 3.7
    cutoff[(41,28)] = 3.7
    cutoff[(28,41)] = 3.7

    cutoff[(46,46)] = 3.45
    cutoff[(14,14)] = 3.45
    cutoff[(46,14)] = 3.45
    cutoff[(14,46)] = 3.45

    paramfile = sys.argv[1]
    vp_dict = load_param_file(paramfile)

    modelfiles = sys.argv[2:]
    count = defaultdict(int) # Stores how many VPs have been found of each index type
    count = 0
    direc = 'ZrCuAl/md_80k/'
    for modelfile in modelfiles:
        print(modelfile)
        m = Model(modelfile)
        m.generate_neighbors(cutoff)
        #voronoi_3d(m,cutoff)
        #set_atom_vp_types(m,vp_dict)
        #vor_stats(m)
        #cats = index_stats(m)
        #for atom in m.atoms:
        #    #new = Model('0,0,12,0; number of atoms is {0};'.format(count), m.lx, m.ly, m.lz, atom.neighs + [atom])
        #    new = Model('{0}; number of atoms is {1};'.format(atom.vp.index, count[atom.vp.index]), m.lx, m.ly, m.lz, atom.neighs + [atom])
        #    fix_cluster_pbcs(new)
        #    val = normalize_bond_distances(new)
        #    new.comment = '{0}; number of atoms is {1}; bond length scaling factor is {2}'.format(atom.vp.index, count,val)
        #    center = find_center_atom(new)
        #    new.remove(center)
        #    new.add(center)
        #    vp_str = ''.join([str(x) for x in atom.vp.index])
        #    if not os.path.exists(direc+vp_str):
        #        os.makedirs(direc+vp_str)
        #    new.write(direc+'{0}/{0}.{1}.xyz'.format(vp_str, count[atom.vp.index]))
        #    count[atom.vp.index] += 1
        #print(count)
        cn = 0.0
        for atom in m.atoms:
            cn += atom.cn
        cn = float(cn)/m.natoms
        print(cn)

        for atom in m.atoms:
            new_cut = copy.copy(cutoff)
            old_cn = atom.cn
            inc = 0.0
            while atom.cn < 12:
                for key,val in new_cut.items(): new_cut[key] = val + 0.1
                inc += 0.1
                atom.neighs = m.get_atoms_in_cutoff(atom, new_cut)
                if(atom in atom.neighs): atom.neighs.remove(atom)
                atom.cn = len(atom.neighs)
            new = Model('CN changed from {0} to {1};'.format(old_cn, atom.cn), m.lx, m.ly, m.lz, atom.neighs + [atom])
            new.write('temp/{0}.xyz'.format(count))
            if inc > 0.0: print("Increased shell by {0} Ang. for atom {1}".format(inc, count))
            count += 1
        cn = 0.0
        for atom in m.atoms:
            cn += atom.cn
        cn = float(cn)/m.natoms
        print(cn)

    return 0

    modelfile = sys.argv[2]
    m = Model(modelfile)
    xtal_atoms = sys.argv[3]
    xtal_atoms = Model(xtal_atoms).atoms

    #x,y,z = (round(x,6) for x in self.coord)
    #a,b,c = (round(x,6) for x in other.coord)
    #print("HERE")
    #print([round(x,7) for x in xtal_atoms[13].coord])
    #print([round(x,7) for x in m.atoms[153].coord])
    #print(type(xtal_atoms[13]))
    #print(type(m.atoms[153]))
    #print(xtal_atoms[13] == m.atoms[153])
    #return 0

    glassy_atoms = []
    for atom in m.atoms:
        if atom not in xtal_atoms:
            glassy_atoms.append(atom)
    print(len(glassy_atoms))
    print(len(xtal_atoms))
    assert len(glassy_atoms) + len(xtal_atoms) == m.natoms
    voronoi_3d(m, cutoff)
    set_atom_vp_types(m, vp_dict)
    m.generate_neighbors(3.45)
    head, tail = os.path.split(modelfile)
    head = head + '/'
    if not os.path.exists(head+'glassy/'):
        os.makedirs(head+'glassy/')
    if not os.path.exists(head+'xtal/'):
        os.makedirs(head+'xtal/')
    for count, atom in enumerate(xtal_atoms):
        i = m.atoms.index(atom)
        atom = m.atoms[i]
        new = Model('{0}'.format(atom.vp.index), m.lx, m.ly, m.lz, atom.neighs + [atom])
        fix_cluster_pbcs(new)
        val = normalize_bond_distances(new)
        new.comment = '{0}; bond length scaling factor is {1}'.format(atom.vp.index, val)
        center = find_center_atom(new)
        new.remove(center)
        new.add(center)
        vp_str = ''.join([str(x) for x in atom.vp.index])
        new.write(head+'xtal/{0}.xyz'.format(count))
    for count, atom in enumerate(glassy_atoms):
        i = m.atoms.index(atom)
        atom = m.atoms[i]
        new = Model('{0}'.format(atom.vp.index), m.lx, m.ly, m.lz, atom.neighs + [atom])
        fix_cluster_pbcs(new)
        val = normalize_bond_distances(new)
        new.comment = '{0}; bond length scaling factor is {1}'.format(atom.vp.index, val)
        center = find_center_atom(new)
        new.remove(center)
        new.add(center)
        vp_str = ''.join([str(x) for x in atom.vp.index])
        new.write(head+'glassy/{0}.xyz'.format(count))
    return 0
        




    for atom in volume_atoms.atoms:
        for i,atom2 in enumerate(m.atoms):
            if(atom.z == atom2.z and [round(x, 5) for x in atom.coord] == [round(x, 5) for x in atom2.coord]): good[i] = True
    count = defaultdict(int) # Stores how many VPs have been found of each index type
    for modelfile in modelfiles:
        print(modelfile)
        m = Model(modelfile)
        voronoi_3d(m,cutoff)
        set_atom_vp_types(m,vp_dict)
        #vor_stats(m)
        #cats = index_stats(m)
        for i,atom in enumerate(m.atoms):
            if not good[i]: continue
            #new = Model('0,0,12,0; number of atoms is {0};'.format(count), m.lx, m.ly, m.lz, atom.neighs + [atom])
            new = Model('{0}; number of atoms is {1};'.format(atom.vp.index, count), m.lx, m.ly, m.lz, atom.neighs + [atom])
            fix_cluster_pbcs(new)
            val = normalize_bond_distances(new)
            new.comment = '{0}; number of atoms is {1}; bond length scaling factor is {2}'.format(atom.vp.index, count,val)
            center = find_center_atom(new)
            new.remove(center)
            new.add(center)
            vp_str = ''.join([str(x) for x in atom.vp.index])
            if not os.path.exists(vp_str):
                os.makedirs(vp_str)
            new.write('{0}/{0}.{1}.xyz'.format(vp_str, count[atom.vp.index]))
            count[atom.vp.index] += 1
        print(count)
        for c,v in count.items():
            print("{0}: {1}".format(c,v))
        print(sum(count.values()))
        cn = 0.0
        for atom in m.atoms:
            cn += atom.cn
        cn = float(cn)/m.natoms
        print(cn)
Пример #7
0
 def get_connected_clusters_with_neighs(self,cutoff,*cluster_types):
     """ Connected cluster finding via vertex sharing.
         Finds O -- O bonds and O -- X -- O bonds, where O
         represents an atom of the VP type(s) """
     # This code currently gives me a first nearest neighbor search. (Vertex sharing)
     m = Model(self.model.comment, self.model.lx, self.model.ly, self.model.lz, self.model.atoms[:])
     m.generate_neighbors(cutoff)
     count = 0
     for atom in m.atoms:
         keep = False
         if( atom.vp.type in cluster_types):
             keep = True
             #print('Keeping due to atom')
         ncount = 0
         if(not keep):
             temp = [n for n in atom.neighs if n.vp.type in cluster_types]
             if(len(temp) >= 1):
                 #keep = True
                 atom.neighs = [n for n in atom.neighs if n.vp.type in cluster_types]
         if(not keep):
             atom.neighs = [n for n in atom.neighs if n.vp.type in cluster_types]
             #print('Removing neighbors')
             #print(self.model.atoms[atom.id].neighs)
         else:
             count += 1
             if(atom.vp.type not in cluster_types): print(len(temp),ncount,atom)
     print('Total number of {0} atoms: {1}'.format(cluster_types,count))
     # Now I should be able to go through the graph/model's neighbors.
     clusters = []
     for atom in m.atoms:
         already_found = False
         for cluster in clusters:
             if atom in cluster:
                 already_found = True
         # If the VP atom isn't already in a cluster:
         if(not already_found and atom.vp.type in cluster_types):
             # Breadth first search
             queue = []
             visited = {}
             queue.append(atom)
             visited[atom] = True
             while( len(queue) ):
                 t = queue.pop()
                 for n in t.neighs:
                     if( not visited.get(n,False) ):
                         queue.append(m.atoms[m.atoms.index(n)])
                         visited[n] = True
             clusters.append(list(visited))
     for i,atom in enumerate(clusters[0]):
         found = atom.vp.type in cluster_types
         for n in atom.neighs:
             if(n.vp.type in cluster_types):
                 found = True
         if(not found):
             print('AG found an atom that isnt connected to a VP type! {0} {1} {2} {3}'.format(i+1,atom,atom.neighs,atom.vp.type))
             for atom2 in m.atoms:
                 if atom in atom2.neighs:
                     print('  It is connected to {0} {1} {2}'.format(atom2,atom2.neighs,atom2.vp.type))
                     for n in atom2.neighs:
                         print('   Dist from {0} to neighbor {1}: {2}. n.vp.type={3}'.format(atom2,n,m.dist(atom2,n),n.vp.type))
     for cluster in clusters:
         for atom in cluster:
             if(cluster.count(atom) > 1):
                 print('     ERROR!!!!')
                 #cluster.remove(atom)
     return clusters
Пример #8
0
def rdf_2d(m, dr):
    mean = 0.0
    std = 0.0
    # dr is the bin size in atom coord units (A probably)
    # it should be large enough so that the intensity isn't 1 in every bin
    # but small enough to not overlook important information (ie peaks)

    # Algorithm:
    # Calculation the distance between all pairs of atoms, and calculate
    # the rx and ry components of them. This will results in two huge
    # lists: rx and ry. Then create a square matrix of size boxlen/dr,
    # and go through rx and ry simultaneously, incrementing the matrix
    # index if an rx,ry pair falls in that spot. Contour plot the matrix.


    # Pseudocode
    #grid = Zeros(100,100)
    #for atomi in model:
    #    for atomj in model:
    #        if(atomi != atomj):
    #            rx = xdist(atomi,atomj)
    #            ry = ydist(atomi,atomj)
    #            grid[rx][ry] += 1

    size = int(np.ceil(m.lx/dr))
    #print("Initializing matrix: {0}x{1}".format(size,size))
    hist2d = np.zeros((size,size),dtype=np.int)
    rx = []
    ry = []
    bindict = {}

    #print("Calculating all distances...")
    for i,atomi in enumerate(m.atoms):
        for j,atomj in enumerate(m.atoms):
            if(j != i):
                x,y = rxrydist(atomi,atomj,m)
                xbin = int(math.floor((x+m.lx/2.0)/dr))
                ybin = int(math.floor((y+m.ly/2.0)/dr))
                #try:
                #    bindict[(xbin,ybin)].append( [i,j] )
                #except KeyError:
                #    bindict[(xbin,ybin)] = [[i,j]]
                #rx.append(x)
                #ry.append(y)
                hist2d[xbin][ybin] += 1
    # This print statement prints the raw histogram image.
    #print(hist2d.tolist())
    orig_hist = copy(hist2d)

    # Blur the image to get thing smoother for analysis.
    # hist2d is no longer usable.
    hist2d = blur_image(hist2d,20)
    #print(hist2d)

    # Note: scipy.ndimage.measurements.find_objects did not work well.

    # Use the stdev and mean to find out what value constitutes a "peak".
    std = np.std(hist2d)
    mean = np.mean(hist2d)
    sm = 3*std+mean
    lsm = np.array( [[0 if x < sm else x for x in list] for list in hist2d] )
    # This print statement prints the blurred image where the background is removed (set to 0).
    #print(lsm)

    # Detected peaks is a T/F mask. This actually finds where the peaks are.
    detected_peaks = detect_peaks(lsm)
    ## Set the center of each peak to 0 for viewing purposes, hist2d is no longer usable.
    #for i in range(0,len(detected_peaks)):
    #    for j in range(0,len(detected_peaks[i])):
    #        if(detected_peaks[i][j]): hist2d[i][j] = 0.0  # hist2d is blurred at this point as well
    #        if(detected_peaks[i][j]): orig_hist[i][j] = 0.0
    # This print line prints out the image matrix with each center black (ie 0).
    #print(hist2d.tolist())

    # Here we find where the peaks occur, using detected_peaks.
    peak_indexes = [[i*dr,j*dr] for i,ilist in enumerate(detected_peaks) for j,val in enumerate(detected_peaks[i]) if detected_peaks[i][j] == True]
    #for peak in peak_indexes:
    #    print(peak)
    # Calculate all the distances between a peak and the 0-peak.
    # First find the 0-peak.
    center = len(hist2d)*dr/2.0
    dmin = 100000000.0
    for ind in peak_indexes:
        d = math.sqrt( (center-ind[0])**2 + (center-ind[1])**2 )
        if(d < dmin):
            dmin = d
            center_peak = peak_indexes.index(ind)
    # Now calculate all the distances
    peak_dists = scipy.spatial.distance.cdist([peak_indexes[center_peak]], peak_indexes, 'euclidean')
    #peak_dists = [ x for x in peak_dists if x < 4.0 ]
    peak_dists.sort()
    print("Guess at plane spacings:")
    for x in peak_dists[0]: print(x)

    # Create a model out of the peak_indexes to do a BAD on
    atoms = copy(peak_indexes).tolist()
    for i in range(0,len(atoms)):
        atoms[i].append(0.0)
        atoms[i].insert(0,'Al')
        atoms[i].insert(0,i)
        atoms[i] = Atom(atoms[i][0],atoms[i][1],atoms[i][2],atoms[i][3],atoms[i][4])
    badmodel = Model('eh',m.lx,m.ly,m.lz, atoms)
    badmodel.generate_neighbors(4.0)
    g = bad(badmodel,180)
    print("Bond angle distribution:")
    for i in range(0,len(g[0])):
        print('{0}\t{1}'.format(g[0][i],g[1][i]))

    return (orig_hist,hist2d)
Пример #9
0
def main():
    # sys.argv[1] should be the modelfile in the xyz format
    # sys.argv[2] should be the cutoff desired
    modelfile = sys.argv[1]
    cutoff = float(sys.argv[2])
    ag = AtomGraph(modelfile,cutoff)
    model = Model(modelfile)
    model.generate_neighbors(cutoff)
    #submodelfile = sys.argv[3]

    #mixedmodel = Model('Mixed atoms',model.lx,model.ly,model.lz, [atom for atom in ag.model.atoms if atom.vp.type == 'Mixed'])
    #icolikemodel = Model('Ico-like atoms',model.lx,model.ly,model.lz, [atom for atom in ag.model.atoms if(atom.vp.type == 'Icosahedra-like' or atom.vp.type == 'Full-icosahedra')])
    #fullicomodel = Model('Full-icosahedra atoms',model.lx,model.ly,model.lz, [atom for atom in ag.model.atoms if atom.vp.type == 'Full-icosahedra'])
    #xtalmodel = Model('Xtal-like atoms',model.lx,model.ly,model.lz, [atom for atom in ag.model.atoms if atom.vp.type == 'Crystal-like'])
    #undefmodel = Model('Undef atoms',model.lx,model.ly,model.lz, [atom for atom in ag.model.atoms if atom.vp.type == 'Undef'])
    ##mixedmodel.write_cif('mixed.cif')
    ##mixedmodel.write_our_xyz('mixed.xyz')
    ##icolikemodel.write_cif('icolike.cif')
    ##icolikemodel.write_our_xyz('icolike.xyz')
    ##fullicomodel.write_cif('fullico.cif')
    ##fullicomodel.write_our_xyz('fullico.xyz')
    ##xtalmodel.write_cif('xtal.cif')
    ##xtalmodel.write_our_xyz('xtal.xyz')
    ##undefmodel.write_cif('undef.cif')
    ##undefmodel.write_our_xyz('undef.xyz')
    #icomixedmodel = Model('ico+mix atoms',model.lx,model.ly,model.lz, mixedmodel.atoms + icolikemodel.atoms)
    ##mixedmodel.write_cif('icomixed.cif')
    ##mixedmodel.write_our_xyz('icomixed.xyz')
    #vpcoloredmodel = Model('vp colored atoms',model.lx,model.ly,model.lz, ag.model.atoms)
    #for atom in vpcoloredmodel.atoms:
    #    if(atom.vp.type == 'Full-icosahedra'):
    #        atom.z = 1
    #    elif(atom.vp.type == 'Icosahedra-like'):
    #        atom.z = 2
    #    elif(atom.vp.type == 'Mixed'):
    #        atom.z = 3
    #    elif(atom.vp.type == 'Crystal-like'):
    #        atom.z = 4
    #    elif(atom.vp.type == 'Undef'):
    #        atom.z = 5
    ##vpcoloredmodel.write_cif('vpcolored.cif')
    ##vpcoloredmodel.write_our_xyz('vpcolored.xyz')
    #subvpcoloredmodel = Model(submodelfile)
    #for atom in subvpcoloredmodel.atoms:
    #    atom.z = vpcoloredmodel.atoms[ag.model.atoms.index(atom)].z
    #subvpcoloredmodel.write_cif('subvpcolored.cif')
    #subvpcoloredmodel.write_our_xyz('subvpcolored.xyz')
    #return

    golden = False

    #cluster_prefix = 'ico.t3.'
    #cluster_types = 'Icosahedra-like', 'Full-icosahedra' # Need this for final/further analysis

    #cluster_prefix = 'fi.t3.'
    #cluster_types = ['Full-icosahedra'] # Need this for final/further analysis

    cluster_prefix = 'xtal.t3.'
    cluster_types = 'Crystal-like' # Need this for final/further analysis

    #cluster_prefix = 'mix.t3'
    #cluster_types = ['Mixed'] # Need this for final/further analysis

    #cluster_prefix = 'undef.t3'
    #cluster_types = ['Undef'] # Need this for final/further analysis

    # Decide what time of clustering you want to do
    #clusters = ag.get_clusters_with_n_numneighs(cutoff,5,cluster_types) #Vertex
    #clusters = ag.get_vertex_sharing_clusters(cutoff,cluster_types) #Vertex
    #clusters = ag.get_edge_sharing_clusters(cutoff,cluster_types) #Edge
    #clusters = ag.get_face_sharing_clusters(cutoff,cluster_types) #Face
    #clusters = ag.get_interpenetrating_atoms(cutoff,cluster_types) #Interpenetrating
    #clusters = ag.get_interpenetrating_clusters_with_neighs(cutoff,cluster_types) #Interpenetrating+neighs
    #clusters = ag.get_connected_clusters_with_neighs(cutoff, cluster_types) #Connected (vertex) + neighs
    v,e,f,i = ag.vefi_sharing(cluster_types)
    print("V: {0}  E: {1}  F: {2}  I: {3}".format(int(v),int(e),int(f),int(i)))
    return

    orig_clusters = clusters[:]
    # Print orig clusters
    j = 0
    for i,cluster in enumerate(clusters):
        print("Orig cluster {0} contains {1} atoms.".format(i,len(cluster)))
        if(golden):
            for atom in cluster:
                if(atom.vp.type in cluster_types):
                    atom.z = 0
        # Save cluster files
        cluster_model = Model("Orig cluster {0} contains {1} atoms.".format(i,len(cluster)),model.lx, model.ly, model.lz, cluster)
        cluster_model.write_cif('{1}cluster{0}.cif'.format(i,cluster_prefix))
        cluster_model.write_our_xyz('{1}cluster{0}.xyz'.format(i,cluster_prefix))

    allclusters = []
    for cluster in clusters:
        for atom in cluster:
            if(atom not in allclusters):
                allclusters.append(atom)
                #if(atom.vp.type in cluster_types): print('  {0}\t{1}'.format(atom,atom.vp.type))
    allclusters = Model("All clusters.",model.lx, model.ly, model.lz, allclusters)
    allclusters.write_cif('{0}allclusters.cif'.format(cluster_prefix))
    allclusters.write_our_xyz('{0}allclusters.xyz'.format(cluster_prefix))
    print("{0}allclusters.cif and {0}allclusters.xyz contain {1} atoms.".format(cluster_prefix, allclusters.natoms))

    if(not golden):
        x_cluster = []
        for i,atom in enumerate(model.atoms):
            if atom not in allclusters.atoms:
                x_cluster.append(atom)
        x_cluster = Model("Opposite cluster of {0}".format(cluster_prefix),model.lx, model.ly, model.lz, x_cluster)
        x_cluster.write_cif('{0}opposite.cif'.format(cluster_prefix))
        x_cluster.write_our_xyz('{0}opposite.xyz'.format(cluster_prefix))
        print('{0}opposite.cif and {0}opposite.xyz contain {1} atoms.'.format(cluster_prefix, x_cluster.natoms))
    
    if(False): # Further analysis
        cn = 0.0
        for atom in model.atoms:
            cn += atom.cn
        cn /= model.natoms

        vpcn = 0.0
        count = 0
        for atom in ag.model.atoms:
            if( atom.vp.type in cluster_types ):
                vpcn += atom.cn
                count += 1
        vpcn /= count

        natomsinVPclusters = allclusters.natoms # Number of atoms in VP clusters
        nVPatoms = count # Number of VP atoms
        numsepVPatoms = nVPatoms * vpcn # Number of atoms in VP clusters if all clusters were separated
        maxnumatoms = model.natoms # Max number of atoms in VP clusters if all clusters were separated but still within the model size

        print('Average CN is {0}'.format(cn))
        print('Average CN of VP atoms is {0}'.format(vpcn))
        print('# atoms in all clusters: {0}. # VP atoms * vpcn: {1}. # VP atoms: {2}'.format(natomsinVPclusters,numsepVPatoms,nVPatoms))
        print('~ Number of VP that can fit in the model: {0}'.format(maxnumatoms/vpcn))
        print('Ratio of: (# atoms involved in VP clusters)/(# atoms involved in VP clusters if all clusters were completely separated):                          {0}%  <--- Therefore {1}% sharing.'.format(round(float(natomsinVPclusters)/(numsepVPatoms)*100.0,3),100.0-round(float(natomsinVPclusters)/(numsepVPatoms)*100.0,3)))
        print('Ratio of: (# atoms involved in VP clusters)/(# atoms involved in VP clusters if all clusters were separated as much as possible within the model): {0}%  <--- Therefore {1}% sharing.'.format(round(float(natomsinVPclusters)/min(numsepVPatoms,maxnumatoms)*100.0,3),100.0-round(float(natomsinVPclusters)/min(numsepVPatoms,maxnumatoms)*100.0,3) if numsepVPatoms < maxnumatoms else round(float(natomsinVPclusters)/min(numsepVPatoms,maxnumatoms)*100.0,3)))

        vor_instance = Vor()
        vor_instance.runall(modelfile,cutoff)
        index = vor_instance.get_indexes()
        vor_instance.set_atom_vp_indexes(model)
        vp_dict = categorize_vor.load_param_file('/home/jjmaldonis/model_analysis/scripts/categorize_parameters_iso.txt')
        atom_dict = categorize_vor.generate_atom_dict(index,vp_dict)
        categorize_vor.set_atom_vp_types(model,vp_dict)
        # Count the number of common neighbors in each of the VP
        vp_atoms = []
        for atom in model.atoms:
            if(atom.vp.type in cluster_types):
                vp_atoms.append(atom)
        common_neighs = 0.0
        atom_pairs = []
        for atomi in vp_atoms:
            for atomj in vp_atoms:
                if(atomi != atomj):
                    if(atomi in atomj.neighs and [atomi,atomj] not in atom_pairs and [atomj,atomi] not in atom_pairs):
                        common_neighs += 1
                        atom_pairs.append([atomi,atomj])
                    #if(atomj in atomi.neighs): common_neighs += 0.5
                    for n in atomi.neighs:
                        if(n in atomj.neighs and [n,atomj] not in atom_pairs and [atomj,n] not in atom_pairs):
                            common_neighs += 1
                            atom_pairs.append([n,atomj])
                    #for n in atomj.neighs:
                    #    if(n in atomi.neighs): common_neighs += 0.5
        # Now common_neighs is the number of shared atoms
        #print(common_neighs)
        print('Percent shared based on common neighsbors: {0}'.format(100.0*common_neighs/natomsinVPclusters))