def vacancylabel_io(file_path, filename): orig_dir = os.getcwd() try: os.chdir(file_path) except: print('Couldn' 't find that directory, check syntax (/mnt/d/UserName/...)') sys.exit(1) try: pipeline = import_file(filename) except: print('File not found (check the name and that you' 're in the right directory)') sys.exit(1) wigmod = WignerSeitzAnalysisModifier( per_type_occupancies=True, affine_mapping=ReferenceConfigurationModifier.AffineMapping.ToReference ) wigmod.reference = FileSource() try: wigmod.reference.load('FCCreference.lmp') except: print('FCCreference.lmp was not found!') sys.exit(1) pipeline.modifiers.append(wigmod) pipeline.modifiers.append( ComputePropertyModifier(output_property='Particle Type', expressions='3')) # see custom modifier above pipeline.modifiers.append(modify) pipeline.modifiers.append(InvertSelectionModifier()) pipeline.modifiers.append(DeleteSelectedModifier()) # export a file with the vacancies only (type = 3) export_file(pipeline, filename.split('.')[0] + "vacancies.xyz", "xyz", columns=[ 'Particle Identifier', 'Particle Type', 'Position.X', 'Position.Y', 'Position.Z' ], multiple_frames=True) # new pipeline to combine the vacancies with the original data (this will append the vacancies at the end of the Particle IDs and reassign new IDs accordingly) newpipeline = import_file(filename) mod = CombineDatasetsModifier() mod.source.load(filename.split('.')[0] + 'vacancies.xyz') newpipeline.modifiers.append(mod) # make a new file with the vacancies and all atoms included for visualization export_file(newpipeline, "new" + filename.split('.')[0] + ".xyz", "xyz", columns=[ 'Particle Identifier', 'Particle Type', 'Position.X', 'Position.Y', 'Position.Z' ], multiple_frames=True) os.chdir(orig_dir)
def frames(trjfile: str, nframes: Optional[int] = None) -> np.ndarray: """ Get the frames from the lammps trajectory using ovito pipeline and import_file function. It returns frames and the number of frames to use for calculating the Lindemann Index. """ if not os.path.exists(trjfile): raise RuntimeError(f"Error: file {trjfile} not found!") pipeline = import_file(trjfile, sort_particles=True) num_frame = pipeline.source.num_frames pipeline.modifiers.append( SelectTypeModifier(operate_on="particles", property="Particle Type", types={1, 2, 3})) data = pipeline.compute() num_particle = data.particles.count # If no argument is given use all frames if nframes is None: nframes = num_frame # make sure nobody puts more frames then exists assert num_frame >= nframes # initialise array, could be problematic for big clusters and a lot of frames position = np.zeros((nframes, num_particle, 3)) for frame in range(nframes): data = pipeline.compute(frame) position[frame, :, :] = np.array(data.particles["Position"]) frames = position return frames
def vp(name, frame, edge_threshold=0.1): ''' Grap the Voronoi Polyhedra (VP) indexes. inputs: name = The name with the path of the trajectories edge_threshold = The threshold for the edges considered frame = The frame of the trajectories outputs: indexes = The VP indexes ''' # Load input data and create an ObjectNode with a data pipeline. node = import_file(name, multiple_frames=True) voro = VoronoiAnalysisModifier(compute_indices=True, use_radii=False, edge_threshold=edge_threshold) node.modifiers.append(voro) out = node.compute(frame) indexes = out.particle_properties['Voronoi Index'].array return indexes
def validate_coords(coords_path, bond_path): holder = readPDB(coords_path) bonds = readbond(bond_path) if use_ovito: pipeline = import_file(coords_path) types = pipeline.source.data.particles.particle_types for i in range(holder.num_atoms): for j in range(i + 1, holder.num_atoms): if ([i + 1, j + 1] not in bonds) and ([j + 1, i + 1] not in bonds): if np.linalg.norm(holder.pos[i] - holder.pos[j]) <= ( types.type_by_name(holder.el_names[i]).radius + types.type_by_name(holder.el_names[j]).radius): return False else: for i in range(holder.num_atoms): for j in range(i + 1, holder.num_atoms): if ([i + 1, j + 1] not in bonds) and ([j + 1, i + 1] not in bonds): if (holder.el_names[i] in atomic_radii) and (holder.el_names[j] in atomic_radii): if np.linalg.norm(holder.pos[i] - holder.pos[j]) <= ( atomic_radii[holder.el_names[i]] + atomic_radii[holder.el_names[j]]): return False else: if np.linalg.norm(holder.pos[i] - holder.pos[j]) < 1.5: return False return True
def Mods(Fracs, Beads, Denss): pre = 'results/snapshots/coordination_analysis/f_' for Frac in Fracs: f = int((1 - Frac) * 10 % 10) for Bead in Beads: # os.makedirs('./'+str(pre)+str(f)+'/b_'+str(Bead)) for Dens in Denss: data = import_file('./frac_' + str(Frac) + '/m_' + str(Bead) + '/d_' + str(Dens) + '/final_snapshot.xyz') co = ((Bead * 1000) / Dens)**(1 / 3) modifier = CoordinationAnalysisModifier(cutoff=co / 2, partial=True, number_of_bins=180) data.modifiers.append(modifier) export_file( data, str(pre) + str(f) + "/b_" + str(Bead) + "/d_" + str(Dens) + ".txt", "txt/table") exit()
def calc_ovito_cna(atoms_in): # from ovito.pipeline import StaticSource, Pipeline # from ovito.io.ase import ase_to_ovito from ovito.io import import_file from ovito.modifiers import CommonNeighborAnalysisModifier print('==> running CNA in ovito ') # atoms = copy.deepcopy(atoms_in) # data = ase_to_ovito(atoms) # pipeline = Pipeline(source = StaticSource(data = data)) pipeline = import_file('CONTCAR_for_ovito') modifier = CommonNeighborAnalysisModifier() pipeline.modifiers.append(modifier) data = pipeline.compute() cna = data.tables['structures'].xy() mask = cna[:, 1] > 0.5 cna1 = cna[mask] if cna1.shape[0] != 1: sys.exit('ABORT: wrong CNA. ') ovito_struc = ['other', 'fcc', 'hcp', 'bcc', 'ico'] struc = ovito_struc[cna1[0, 0]] return struc
def read_lammps_dump(fdump, nequil=None, iframes=None): from ovito.io import import_file pl = import_file(fdump) # pipe line nframe = pl.source.num_frames traj = [] if iframes is None: iframes = range(nframe) if nequil is not None: iframes = iframes[nequil:] for iframe in iframes: dc = pl.compute(iframe) # data collection atoms = dc.to_ase_atoms() atoms.info['Timestep'] = dc.attributes['Timestep'] keys_to_delete = [key for key in atoms.arrays.keys() if key not in ['numbers', 'positions']] for key in keys_to_delete: del atoms.arrays[key] # any results to add? results = get_particle_results(dc) if 'Charge' in results: # add charges atoms.set_initial_charges(results['Charge']) add_results = {} lmp_names = ['Force', 'Dipole Orientation'] ase_names = ['forces', 'dipole'] for lmp_name, ase_name in zip(lmp_names, ase_names): if lmp_name in results: add_results[ase_name] = results[lmp_name] traj.append(atoms) if len(add_results) > 0: from ase.calculators.singlepoint import SinglePointCalculator calc = SinglePointCalculator(atoms) calc.results.update(add_results) atoms.set_calculator(calc) return traj
def vp(name, frame, maxedge=6, threshold=0.1): ''' Calculate the Voronoi polyhedra. inputs: name = trajectory file frame = frame of interest maxedge = the maximum Voronoi polyhedra edge count threshold = minimum length for an edge to be counted ''' # Load input data and create an ObjectNode with a data pipeline. node = import_file(name, multiple_frames=True) voro = VoronoiAnalysisModifier(compute_indices=True, use_radii=False, edge_count=maxedge, edge_threshold=threshold) node.modifiers.append(voro) out = node.compute(frame) indexes = out.particle_properties['Voronoi Index'].array return indexes
def ico(self, traj_path, in_path, edges, faces, threshold=0.1, write=True, verbose=True): ''' Compute the ICO fraction at low T. inputs: self = the object reference traj_path = Path with the trajectory snapshots name in_path = The path to the input file. edges = the number of VP edges faces = the number of minimum faces for the specified edges threshold = the maximum length for a VP edge write = whether or not to save the fractions and temperatures verbose = Wheter or not to print calculation status outputs: fraction = the ICO fraction at low T ''' if verbose: print('Calculating ICO fraction at low T') edges -= 1 # Compensate for indexing df, counts = traj.info(traj_path) frame = df.index[-1] # The last frame # Load input data and create an ObjectNode with a data pipeline. node = import_file(traj_path, multiple_frames=True) voro = VoronoiAnalysisModifier(compute_indices=True, use_radii=False, edge_threshold=threshold) node.modifiers.append(voro) out = node.compute(frame) indexes = out.particle_properties['Voronoi Index'].array indexes = indexes[:, edges] # Gather edge bin count = sum(indexes >= faces) # Count condition fraction = count / indexes.shape[0] # Calculate fraction if write: write_name = os.path.join(self.datapath, 'ico_at_tlow.txt') with open(write_name, 'w+') as outfile: outfile.write(str(fraction)) return fraction
def calc(name, start, stop): ''' Load the lammps trajectories and calculate MSD. ''' # Load input data and create an ObjectNode with a data pipeline. node = import_file(name, multiple_frames=True) # Calculate per-particle displacements with respect to a start modifier = CalculateDisplacementsModifier() modifier.assume_unwrapped_coordinates = True modifier.reference.load(name) modifier.reference_frame = start node.modifiers.append(modifier) # Insert custom modifier into the data pipeline. node.modifiers.append(PythonScriptModifier(function=msdmodify)) # The variables where data will be held msd = [] msdeim = [] step = [] msd_types = {} msd_types_eim = {} order = [] for type in node.compute().particles['Particle Type'].types: msd_types[type.id] = [] msd_types_eim[type.id] = [] order.append(type.id) # Compute the MSD for each frame of interest for frame in range(start, stop + 1): out = node.compute(frame) msd.append(out.attributes['MSD']) msdeim.append(out.attributes['MSD_EIM']) step.append(out.attributes['Timestep']) for type in out.particles['Particle Type'].types: attr_name = 'MSD_type' + str(type.id) attr_name_eim = 'MSD_type_EIM' + str(type.id) msd_types[type.id].append(out.attributes[attr_name]) msd_types_eim[type.id].append(out.attributes[attr_name_eim]) # MSD data msdall = {} msdall['all'] = msd msdall['all_EIM'] = msdeim # Create columns for each particle type and ensure type order for key in msd_types: msdall[str(key)] = msd_types[key] msdall[str(key) + '_EIM'] = msd_types_eim[key] return msdall
def ovito_view(sample_path, filename, view="Perspective"): """ Use the package ovito to make visualizaitons of molecules. Parameters ---------- sample_path : str The path of the file to visualize filename : str The name of the output file image view : str (optional) The view to use """ if use_ovito: # Import the sample file. pipeline = import_file(sample_path) pipeline.source.data.cell.vis.enabled = False pipeline.source.data.particles.vis.radius = 0.5 pipeline.add_to_scene() vp = Viewport() if view == "Perspective": vp.type = Viewport.Type.Perspective vp.camera_dir = (-1, -1, -1) elif view == "Ortho": vp.type = Viewport.Type.Ortho vp.camera_dir = (-1, -1, -1) elif view == "Top": vp.type = Viewport.Type.Top elif view == "Bottom": vp.type = Viewport.Type.Bottom elif view == "Front": vp.type = Viewport.Type.Front elif view == "Back": vp.type = Viewport.Type.Back elif view == "Left": vp.type = Viewport.Type.Left elif view == "Right": vp.type = Viewport.Type.Right vp.zoom_all() vp.render_image(size=(800, 600), filename=filename, background=(0, 0, 0), frame=0) pipeline.remove_from_scene() else: print( "Cannot use function ovito_view - the package ovito is not installed or cannot be found." )
def main(args): if args.verbosity > 0: print("input file: {}".format(args.input_file)) print("output file: {}".format(args.output_file)) if args.verbosity == 2: print("dimensions: {}".format(args.dimensions)) print("renderer: {}".format(args.renderer)) print("particle size: {}".format(args.particle_size)) if args.renderer == 'tachyon': renderer = TachyonRenderer() elif args.renderer == 'opengl': renderer = OpenGLRenderer() else: raise ValueError("{} is not a valid renderer".format(args.renderer)) node = import_file(args.input_file, multiple_frames=True) node.add_to_scene() vp = dataset.viewports.viewports[args.viewport] if args.output_ext in MOVIE_TYPES and args.frames_per_second is not None: dataset.anim.frames_per_second = args.frames_per_second node.compute() if args.overlay_frame_num == True: overlay = TextLabelOverlay( text='[SourceFrame]', alignment=QtCore.Qt.AlignRight ^ QtCore.Qt.AlignBottom, # offset_y = 0.1, font_size=0.05, text_color=(0, 0, 0), ) vp.overlays.append(overlay) vp.zoom_all() for t in node.output.particle_properties.particle_type.type_list: t.radius = args.particle_size os.makedirs(os.path.dirname(args.output_file), exist_ok=True) for i in range(len(args.custom_range)): if args.custom_range[i] < 0: args.custom_range[i] += node.source.num_frames if args.output_ext in MOVIE_TYPES: range_opt = RenderSettings.Range.CUSTOM_INTERVAL elif args.output_ext in IMAGE_TYPES: dataset.anim.current_frame = args.custom_range[0] range_opt = RenderSettings.Range.CURRENT_FRAME settings = RenderSettings( filename=args.output_file, size=args.dimensions, range=range_opt, custom_range=args.custom_range, renderer=renderer, ) if args.output_ext in MOVIE_TYPES: settings.everyNthFrame = args.every_nth_frame vp.render(settings)
def Cut(Fracs, Beads, Denss, Cut): os.makedirs(f'./results/snapshots/cluster_analysis/cuts-phobics/{Cut}') for Frac in Fracs: print(f'Iniciando frac {Frac}') table = pd.DataFrame({'Densities': Denss}).set_index('Densities') for Bead in Beads: print(f'Iniciando bead {Bead}') clusters = [] for Dens in Denss: print(f'Iniciando dens {Dens}') cutter = ClusterAnalysisModifier(cutoff=Cut) pipeline = import_file( f'./frac_{Frac}/m_{Bead}/d_{Dens}/pb.xyz') pipeline.modifiers.append(cutter) data = pipeline.compute() clusters_table = data.tables['clusters'] number_of_clusters = len(clusters_table['Cluster Identifier']) clusters.append(number_of_clusters) print(f'Dens {Dens} finalizado.') print('') table[Bead] = clusters print(f'Bead {Bead} finalizado.') print('') table.to_csv( f'./results/snapshots/cluster_analysis/cuts-phobics/{Cut}/Frac_{Frac}.csv' ) print(f'Frac {Frac} finalizado.') print('')
def Frame_CNA(frame, R_Cut, Masterkey=None, filename=None): pipeline = import_file(filename) pipeline.modifiers.append(CreateBondsModifier(cutoff=R_Cut)) pipeline.modifiers.append( CommonNeighborAnalysisModifier( mode=CommonNeighborAnalysisModifier.Mode.BondBased)) data = pipeline.compute(frame) # The 'CNA Indices' bond property is a a two-dimensional array # containing the three CNA indices computed for each bond in the system. cna_indices = data.particles.bonds['CNA Indices'] # This helper function takes a two-dimensional array and computes the frequency # histogram of the data rows using some NumPy magic. # It returns two arrays (of same length): # 1. The list of unique data rows from the input array # 2. The number of occurences of each unique row def row_histogram(a): ca = np.ascontiguousarray(a).view([('', a.dtype)] * a.shape[1]) unique, indices, inverse = np.unique(ca, return_index=True, return_inverse=True) counts = np.bincount(inverse) return (a[indices], counts) # Used below for enumerating the bonds of each particle: bond_enumerator = BondsEnumerator(data.particles.bonds) # Loop over particles and print their CNA indices. all_cnas = {} for particle_index in range(data.particles.count): # Create local list with CNA indices of the bonds of the current particle. bond_index_list = list( bond_enumerator.bonds_of_particle(particle_index)) local_cna_indices = cna_indices[bond_index_list] # Count how often each type of CNA triplet occurred. unique_triplets, triplet_counts = row_histogram(local_cna_indices) # Print list of triplets with their respective counts. for triplet, count in zip(unique_triplets, triplet_counts): try: all_cnas[tuple(triplet)] += count except KeyError: all_cnas[tuple(triplet)] = count if tuple(triplet) not in Masterkey: Masterkey.append(tuple(triplet)) return all_cnas, Masterkey
def Mods(Fracs, Beads, Denss, Inters): for Frac in Fracs: frac = int((1-Frac)*10%10) print(frac) print('') for Bead in Beads: print(Bead) print('') for Dens in Denss: print(Dens) print('') for Inter in Inters: print(Inter) print('') File = f'centros_de_massas/{Inter}/f_{frac}-b_{Bead}-d_{Dens}.xyz' path = f'centros_de_massas/{Inter}/rdfs/f-' data = import_file(File) co = ((Bead*1000)/Dens)**(1/3) modifier = CoordinationAnalysisModifier(cutoff = co/2, number_of_bins = 60) data.modifiers.append(modifier) export_file(data, path+str(frac)+"_b-"+str(Bead)+"_d-"+str(Dens)+".txt", "txt/table") print('Done. Next Inter') print('Done. Next Dens') print('Done. Next Bead') print('Done. Next Frac') print('') print('that\'s all, folks!') exit()
def write_lammps_dump(fout, fxyz, charge=False, columns=None): import os if os.path.isfile(fout): raise RuntimeError('%s exists' % fout) from ovito.io import import_file, export_file pl = import_file(fxyz) if columns is None: columns = ["Particle Identifier", "Particle Type", "Position.X", "Position.Y", "Position.Z"] if charge: cname = 'Charge' columns += [cname] def add_charge(frame, data): data.particles_.create_property(cname, data=data.particles['initial_charges']) pl.modifiers.append(add_charge) export_file(pl, fout, 'lammps/dump', columns=columns, multiple_frames=True)
def rdfcalc(name, frame, cut, bins): ''' Use ovito to calculate RDF ''' # Load a particle dataset node = import_file(name, multiple_frames=True) # Apply modifier modifier = CoordinationNumberModifier(cutoff=cut, number_of_bins=bins) node.modifiers.append(modifier) # Compute RDF of the current frame out = node.compute(frame) return modifier.rdf
def visualize(infile, outfile='out.png'): ''' This function takes a xyz-file as an argument and visualizes the particles using Ovito. ''' pipeline = import_file(infile) # Create a pipeline object pipeline.add_to_scene() # Add pipeline to three-dimensional scene vp = Viewport() # Object defining viewpoint from which the scene is seen vp.type = Viewport.Type.Perspective vp.camera_pos = (-250, -250, 300) vp.camera_dir = (3, 3, -3) vp.fov = math.radians(60.0) vp.render_image(size=(800, 600), filename=outfile, background=(0, 0, 0), frame=8) pipeline.remove_from_scene()
def render(self, latticePath): name = latticePath cols = ['Particle Type', 'Position.X', 'Position.Y'] node = import_file(name, columns=cols) node.add_to_scene() rs = RenderSettings( filename=name[:name.find('.')] + '.png', size=(1024, 768), background_color=(0.4, 0.4, 0.4) # generate_alpha=True ) vp = ovito.dataset.viewports.active_vp vp.type = Viewport.Type.TOP vp.zoom_all() vp.render(rs) node.remove_from_scene()
def main(pdb_path, png_path): node = import_file(pdb_path) node.add_to_scene() vp = dataset.viewports.active_vp # node.modifiers.append(SelectExpressionModifier(expression = 'Position.X < Position.Y')) node.modifiers.append(SliceModifier(distance=0, normal=(1, -1, 0))) node.compute() particle_types = node.output.particle_properties.particle_type particle_types.get_type_by_name('H').color = (1, 0.5, 0.5) particle_types.get_type_by_name('He').color = (0.5, 0.5, 1) particle_types.get_type_by_name('O').color = (0.5, 0.5, 0.5) os.makedirs(os.path.dirname(png_path), exist_ok=True) settings = RenderSettings( filename=png_path, size=(800, 600), renderer=TachyonRenderer(), ) vp.render(settings)
def vp(traj, frames, edges, faces, threshold): ''' Count the fraction of Voronoi polyhedra (VP) meeting criteria. inputs: traj = The trajectory files frames = A list of frames to analyize edges = The VP edge considered faces = The minimum number of faces for considered edge threshold = The minimum length for VP edge outputs: fraction = The fraction of VP meeting criteria ''' # Indexing correction edges -= 1 # Load input data and create an ObjectNode with a data pipeline. node = import_file(traj, multiple_frames=True) voro = VoronoiAnalysisModifier(compute_indices=True, use_radii=False, edge_threshold=threshold) node.modifiers.append(voro) all_indexes = [] for frame in frames: out = node.compute(frame) indexes = out.particle_properties['Voronoi Index'].array all_indexes.append(indexes) # Combine all the frames all_indexes = [pd.DataFrame(i) for i in all_indexes] df = pd.concat(all_indexes) df = df.fillna(0) # Make sure indexes are zero if not included df = df.astype(int) # Make sure all counts are integers total = df.shape[0] # The total number of VP count = df[df[edges] >= faces].shape[0] # Counts matching of matching VP fraction = count / total # The fraction of matching VP return fraction
def ovito_render(): "Render images using OVITO" import ovito from ovito.io import import_file from ovito.vis import Viewport, RenderSettings, TachyonRenderer from ovito.modifiers import ColorCodingModifier from ovito.modifiers import PolyhedralTemplateMatchingModifier pipeline = import_file("gcmc.out.xyz") #ptm_modifier = PolyhedralTemplateMatchingModifier( # color_by_type=True, # rmsd_cutoff=0.3, #) #for structure_type in ['BCC', 'CUBIC_DIAMOND', 'FCC', 'GRAPHENE', # 'HCP', 'HEX_DIAMOND', 'ICO', 'OTHER', 'SC']: # structure_type = getattr(PolyhedralTemplateMatchingModifier.Type, structure_type) # ptm_modifier.structures[structure_type].enabled = True #pipeline.modifiers.append(ptm_modifier) data_collection = pipeline.compute() pipeline.add_to_scene() renderer = TachyonRenderer(antialiasing_samples=32, ) vp = Viewport(type=Viewport.Type.Perspective, camera_dir=(2, 1, -1)) vp.zoom_all() vp.camera_pos = np.asarray(vp.camera_pos) * np.array([0.8, 0.8, 0.75]) vp.render_image( filename='sample.png', size=(1000, 750), frame=pipeline.source.num_frames - 1, alpha=True, renderer=renderer, ) every_nth = pipeline.source.num_frames // 20 video = vp.render_anim( filename='sample.gif', size=(480, 320), fps=5, renderer=renderer, every_nth=every_nth, ) image = vp.render_image(size=(800, 600), filename='img.png', frame=pipeline.source.num_frames) pipeline.remove_from_scene()
def Render(Frac, Bead, Dens): data = import_file('./../frac_' + str(Frac) + '/m_' + str(Bead) + '/d_' + str(Dens) + '/final_snapshot.xyz') data.add_to_scene() vis_element = data.source.data.particles.vis vis_element.radius = .9 cell_vis = data.source.data.cell.vis cell_vis.render_cell = False vp = Viewport(type=Viewport.Type.Bottom) vp.zoom_all(size=(600, 500)) image = vp.render_image(size=(600, 500), alpha=True) image.save("f-" + str(int((1 - Frac) * 10 % 10)) + "/b-" + str(Bead) + "_d-" + str(Dens) + ".png") data.remove_from_scene()
def compute_ovito_data(filename0): """ Computes the attributes of ovito Parameters ------------ filename0 : string The name of the input file. Returns -------- data : class all the attributes of data """ pipeline = oio.import_file(filename0, sort_particles=True) dmod = ovm.PolyhedralTemplateMatchingModifier(rmsd_cutoff=.1) pipeline.modifiers.append(dmod) data = pipeline.compute() return data
def Mods(Fracs, Beads, Denss): pre = 'results/snapshots/coordination_analysis/f_' for Frac in Fracs: f = int((1-Frac)*10%10) for Bead in Beads: os.makedirs('./'+str(pre)+str(f)+'/b_'+str(Bead)) for Dens in Denss: data = import_file('./frac_'+str(Frac)+'/m_'+str(Bead)+'/d_'+str(Dens)+'/final_snapshot.xyz') data.modifiers.append(modifier) export_file(data, str(pre)+str(f)+"/b_"+str(Bead)+"/d_"+str(Dens)+".txt", "txt/table") exit()
def calc_ovito_rdf(atoms_in, cutoff=6.0): # from ovito.pipeline import StaticSource, Pipeline # from ovito.io.ase import ase_to_ovito from ovito.io import import_file from ovito.modifiers import CoordinationAnalysisModifier print('==> cutoff in ovito rdf: {0}'.format(cutoff)) # atoms = copy.deepcopy(atoms_in) # data = ase_to_ovito(atoms) # pipeline = Pipeline(source = StaticSource(data = data)) pipeline = import_file('CONTCAR_for_ovito') modifier = CoordinationAnalysisModifier(cutoff=cutoff, number_of_bins=200, partial=True) pipeline.modifiers.append(modifier) data = pipeline.compute() # np.savetxt("y_post_ovito_rdf.txt", # data.tables['coordination-rdf'].xy() ) data_rdf = data.tables['coordination-rdf'].xy() return data_rdf
def Render1(item, Renderer, selector0, delector): pipeline = import_file('./../frac_' + str(item[0]) + '/m_' + str(item[1]) + '/d_' + str(item[2]) + '/final_snapshot.xyz') pipeline.modifiers.append(selector0) pipeline.modifiers.append(delector) pipeline.add_to_scene() vis_element = pipeline.source.data.particles.vis vis_element.radius = .5 cell_vis = pipeline.source.data.cell.vis cell_vis.render_cell = False vp = Viewport(type=Viewport.Type.Left) vp.zoom_all(size=(600, 500)) image = vp.render_image(size=(600, 500), alpha=True, renderer=Renderer) image.save("f-" + str(int((1 - item[0]) * 10 % 10)) + "/rand/b-" + str(item[1]) + "_d-" + str(item[2]) + "(phobic).png") pipeline.remove_from_scene()
def to_lammps(trjfile: str, indices_per_atom: npt.NDArray[np.float64]) -> str: pipeline = import_file(trjfile, sort_particles=True) for frame, linde in enumerate(indices_per_atom): data = pipeline.compute(frame) data.particles_.create_property("lindemann", data=linde) export_file( data, f"lindemann_outputfile_X{frame}.dump", "lammps/dump", columns=[ "Particle Identifier", "Particle Type", "Position.X", "Position.Y", "Position.Z", "lindemann", ], multiple_frames=True, ) """ This is section is weird and very hacky, I dont like it. Does someone have a idea how to do this in a better way? First I save the files with the ovito export_file, then I save them in a file, finally I remove the files... I know... """ filenames = [f"lindemann_outputfile_X{frame}.dump" for frame in range(len(indices_per_atom))] file_name = "lindemann_per_atom.lammpstrj" with open(file_name, "w") as outfile: for fname in filenames: with open(fname) as infile: outfile.write(infile.read()) for frame in range(len(indices_per_atom)): os.remove(f"lindemann_outputfile_X{frame}.dump") return "saved trajectory as lindemann_per_atom.lammpstrj"
from ovito.io import import_file from ovito.modifiers import ConstructSurfaceModifier # Load a particle structure and construct its surface mesh: node = import_file("simulation.dump") node.modifiers.append(ConstructSurfaceModifier(radius = 2.8)) node.compute() # Access the computed surface mesh and export it to VTK files for # visualization with ParaView. mesh = node.output.surface mesh.export_vtk('surface.vtk', node.output.cell) mesh.export_cap_vtk('surface_cap.vtk', node.output.cell)
numParticles, \ cell ) status = numpy.asarray(myChiller.get_status()) output.create_user_particle_property("Chill Status", "float") output['Chill Status'].marray[:] = status[:]; output['Chill Status'].changed() #color_property = output.create_particle_property(ParticleProperty.Type.Color) #color_property.marray[:] = np.asarray(((status==0).astype(np.float64), (status==3).astype(np.float64), (status==4).astype(np.float64))).transpose() #print (states) if __name__ == "__main__": import numpy filename = "/bumblebee/simulation_data/mw_polycrystalline_thermalization/trajectory_low_freq.bin" #filename = "/work/users/henriasv/molecular-simulations/systematic_mw_pennycracks_VariableMethaneConcentrationInCavity-2016-05-09-124346/lmp_Nthermalize=200.0_Nerate=1000.0_temperature=260.0_crackRadius=40.0_Nproduction=4000.0_timeStep=10.0_Nx=24_Ny=24_Nz=24_crackHeight=6.0_maxStrain=1.072_seed=000/trajectory.lammpstrj" node = import_file(filename, columns=["Particle Identifier", "Particle Type", "Position.X", "Position.Y", "Position.Z"], multiple_frames=True) #node.compute() node.modifiers.append(SelectParticleTypeModifier(property='Particle Type', types={1})) node.modifiers.append(DeleteSelectedParticlesModifier()) # slices = [[5, 295], [5, 295], [144, 160]] # offset = 6 # sliceModifiers2 = [ SliceModifier(distance=slices[0][0], inverse=True), \ # SliceModifier(distance=slices[0][1]), \ # SliceModifier(distance=slices[1][0],normal=[0,1,0], inverse=True), \ # SliceModifier(distance=slices[1][1],normal=[0,1,0]), \ # SliceModifier(distance=slices[2][0],normal=[0,0,1], inverse=True), \
def atoms_to_data(atoms): data = DataCollection() cell_matrix = np.zeros((3,4)) cell_matrix[:, :3] = atoms.get_cell() cell_matrix[:, 3] = atoms.info.get('cell_origin', [0., 0., 0.]) cell = SimulationCell(matrix=cell_matrix, pbc=atoms.get_pbc()) data.addObject(cell) position = ParticleProperty(name='Position', type=Particles.ParticleProperty.Type.Position, array=atoms.get_positions()) data.addObject(position) return data node = import_file(sys.argv[1]) data = node.compute() atoms = data_to_atoms(data) atoms.positions[...] *= 1.1 # would be something more complex node.remove_from_scene() #new_data = atoms_to_data(atoms) # make new_node from new_data and add to scence
from ovito.vis import * from ovito.io import import_file import ovito.vis import sys import math data = sys.argv[1] size = int(sys.argv[2]) image = sys.argv[3] columns = ["Particle Type", "Position.X", "Position.Y", "Position.Z"] node = import_file(data, multiple_frames=False, columns=columns) # Disable simulation box cell = node.source.cell cell.display.enabled = False node.compute() vp = Viewport() z = 0.7 vp.type = Viewport.Type.PERSPECTIVE vp.camera_pos = (360, -160, 214) vp.camera_dir = (-0.68, 0.62, -0.38) vp.fov = math.radians(38.0) node.add_to_scene() settings = RenderSettings( filename = image, size = (size, size), renderer = TachyonRenderer() )
for i in range(threads): start_end[i] = [int(start + dist * i), int(start + dist * (i + 1))] if i == (threads - 1): start_end[i] = [int(start + dist * i), int(end)] for s, e in start_end: print((s, e)) x = threading.Thread(target=main_function, args=( s, e, node, )) x.start() try: node = import_file(sys.argv[1]) nodename = sys.argv[1] totframe = int(node.source.num_frames) start = int(sys.argv[2]) end = int(sys.argv[3]) threads = int(sys.argv[4]) except: node = import_file( "/uio/hume/student-u00/simonsch/Desktop/sommerprosjekt/fillHoleWithWater/partialSprekk/tryWithAddGravity/abeldump/lykket_equalforce/massNotCount/dump.dump" ) nodename = "/uio/hume/student-u00/simonsch/Desktop/sommerprosjekt/fillHoleWithWater/partialSprekk/tryWithAddGravity/abeldump/lykket_equalforce/massNotCount/dump.dump" totframe = int(node.source.num_frames) start = 0 end = totframe threads = 5 datdir = "pH_analysis_threads"
def main_function(start, end, nodename): for i in range(start, end): node = import_file(nodename) noNeighbours = [] #re-initiate H3Opluss = [] #re-initiate Ominus = [] #re-initiate OHminus = [] print(i) ovito.dataset.anim.current_frame = i #Load in new Frame data = node.compute() positions = data.particles.positions #x, y and z positions ID = data.particles["Particle Identifier"] ptypes = data.particles.particle_types alldat = np.zeros((len(ptypes), 6)) alldat[:, 0] = ID alldat[:, 1] = ptypes alldat[:, 2:5] = positions finder_Hpluss = NearestNeighborFinder(1, data) finder_H3Opluss = NearestNeighborFinder(3, data) for index in range(data.particles.count): #for each particle if (ptypes[index] == 3): #If it is a hydrogen for neigh in finder_Hpluss.find( index ): #this is really just one particle, but for-syntax is preferred if (neigh.distance > cutoff_distance): noNeighbours.append(index) elif (ptypes[index] == 2): #If it is an oxygen amount_H_neighbors = 0 amount_Si_neighbours = 0 for neigh in finder_H3Opluss.find(index): if ((neigh.distance < cutoff_distance and ptypes[neigh.index] == 3 )): #if it's (hydrogen and closer than 1.1 steps)! amount_H_neighbors += 1 elif (neigh.distance < cutoff_distance_silisium and ptypes[neigh.index] == 1): #if there is a silisium nearby amount_Si_neighbours += 1 if amount_H_neighbors == 3: # 3 H ==> H3O+ H3Opluss.append(index) elif (amount_H_neighbors + amount_Si_neighbours) < 2: if (amount_H_neighbors == 0): Ominus.append(index) #0 or 1 Si and 0 H: O- else: OHminus.append(index) # OH- if makeXYZ: outfile_h = open("HPluss%d.xyz" % i, "w") outfile_h3O = open("H3OPluss%d.xyz" % i, "w") outfile_oh = open("OHMinuss%d.xyz" % i, "w") outfile_o = open("reactive_Ominus%d.xyz" % i, "w") outfile_o.write(str(len(Ominus)) + "\n") outfile_h.write(str(len(noNeighbours)) + "\n") outfile_h3O.write(str(len(H3Opluss)) + "\n") outfile_oh.write(str(len(OHminus)) + "\n") outfile_o.write("ID type x y z\n") outfile_h.write("ID type x y z\n") outfile_h3O.write("ID type x y z\n") outfile_oh.write("ID type x y z\n") for r in Ominus: outfile_o.write( str(int(alldat[r, 0])) + " " + str(int(alldat[r, 1])) + " " + str(alldat[r, 2]) + " " + str(alldat[r, 3]) + " " + str(alldat[r, 4]) + "\n") for r in OHminus: outfile_oh.write( str(int(alldat[r, 0])) + " " + str(int(alldat[r, 1])) + " " + str(alldat[r, 2]) + " " + str(alldat[r, 3]) + " " + str(alldat[r, 4]) + "\n") for r in H3Opluss: outfile_h3O.write( str(int(alldat[r, 0])) + " " + str(int(alldat[r, 1])) + " " + str(alldat[r, 2]) + " " + str(alldat[r, 3]) + " " + str(alldat[r, 4]) + "\n") for r in noNeighbours: outfile_h.write( str(int(alldat[r, 0])) + " " + str(int(alldat[r, 1])) + " " + str(alldat[r, 2]) + " " + str(alldat[r, 3]) + " " + str(alldat[r, 4]) + "\n") outfile_h.close() outfile_h3O.close() outfile_oh.close() outfile_o.close()
import ovito from ovito.io import import_file test_data_dir = "../../files/" node1 = import_file(test_data_dir + "LAMMPS/animation.dump.gz") assert(ovito.dataset.selected_node == node1) assert(ovito.dataset.scene_nodes[0] == node1) node2 = import_file(test_data_dir + "CFG/fcc_coherent_twin.0.cfg") assert(len(ovito.dataset.scene_nodes) == 2) node3 = import_file(test_data_dir + "Parcas/movie.0000000.parcas") assert(len(ovito.dataset.scene_nodes) == 3) node4 = import_file(test_data_dir + "CFG/shear.void.120.cfg") node5 = import_file(test_data_dir + "IMD/nw2.imd.gz") node1.remove_from_scene() node2.remove_from_scene() node3.remove_from_scene() node4.remove_from_scene() node5.remove_from_scene() node = import_file(test_data_dir + "LAMMPS/multi_sequence_1.dump") assert(ovito.dataset.anim.last_frame == 2) node.remove_from_scene() node = import_file(test_data_dir + "LAMMPS/shear.void.dump.bin", columns = ["Particle Identifier", "Particle Type", "Position.X", "Position.Y", "Position.Z"]) node.remove_from_scene() try: