def save_snapshot(ind, a, eigenvectors, coors, path, snapshots_path, new_indexes, snapshots_files, snapshots_names, comm, rank, size): fields_local = Vectors() fields_local.dot_lumped_separated(eigenvectors, a) fields_local.make_non_homogeneous_bc(coors, Tup_dimless, Tdown_dimless, ymax_dimless, P00, RR) fields_local_dim = copy.deepcopy(fields_local) fields_local_dim.make_dimensional(v0, T0, T1, p0) #coors_local_dim = coors * L0 sols_snap = np.vstack( (fields_local_dim.P[new_indexes.local_indices_inner], fields_local_dim.u[new_indexes.local_indices_inner])) sols_snap = np.vstack( (sols_snap, fields_local_dim.v[new_indexes.local_indices_inner])) sols_snap = np.vstack( (sols_snap, fields_local_dim.w[new_indexes.local_indices_inner])) sols_snap = np.vstack( (sols_snap, fields_local_dim.T[new_indexes.local_indices_inner])) sols_snap = np.vstack( (sols_snap, coors.coors_dim_rounded[new_indexes.local_indices_inner, 0])) sols_snap = np.vstack( (sols_snap, coors.coors_dim_rounded[new_indexes.local_indices_inner, 1])) sols_snap = np.vstack( (sols_snap, coors.coors_dim_rounded[new_indexes.local_indices_inner, 2])) sols_snap = sols_snap.transpose() q = "%05i" % (ind) nasol_h = path + "Solutions_" + q + ".h5" npsol_h = "Solutions_" + q + ".h5" namel_h = "Solutions_" + q comm.Barrier() snap_h = h5py.File(nasol_h, "w", driver='mpio', comm=MPI.COMM_WORLD) all = snap_h.create_dataset( "All", (coors.coors_global_dimless_rounded.shape[0], sols_snap.shape[1]), dtype=np.float64) all[new_indexes.global_indices_inner, :] = sols_snap[:, :] snap_h.close() comm.Barrier() snapshots_files.append(npsol_h) snapshots_names.append(namel_h)
def read_database_h5(hdf5_database_file, comm, size, rank): """Read the snapshot database from a hdf5 file and distribute it into the corresponding database""" ###read database in h5 files if rank == 0: print "-----------------------------------------------" print "Reading preprocessed HDF5 file with snapshots" print "-----------------------------------------------" ### read datatable ### fhdf = h5py.File(hdf5_database_file, "r", driver='mpio', comm=MPI.COMM_WORLD) coors = coordinates() ts_dim = fhdf['ts_dim'][()] coors.coors_global_dim_rounded = fhdf['coors_global_dim_rounded'][()] new_indexes = Intexes() new_indexes.set_index(coors.coors_global_dim_rounded) new_indexes.set_csr(comm, size, rank) new_indexes.organize_split_vectors(copy.deepcopy(coors.coors_global_dim_rounded), number_ghosh_cells) coors.coors_dim_rounded =\ new_indexes.scatter_vector(copy.deepcopy(coors.coors_global_dim_rounded)) [neigbours, boundaries] = calculate_neighbors(coors.coors_dim_rounded, comm, size, rank) dim_vars = Vectors() dim_vars.read_in_hdf5_file(fhdf, new_indexes.patch_indexes_ghosh_cells) fhdf.close() if rank == 0: print "-----------------------------------------------" print "Reading and postprocesing database finished " print "-----------------------------------------------" return [dim_vars, coors, neigbours, boundaries, ts_dim, new_indexes]
def main(text): # FORMAT parsed = parse(text) # BRIDGING vectors = Vectors(optimize=False, filename="GoogleNewsVecs.txt") bridging(parsed, vectors) # CFS cfs = centered_f(parsed) # CBS cbs = cb_finder(parsed) # CLASSIFY transitions = classify_transitions(cfs, cbs) # SCORE score = scoring(transitions) print "COHERENCE: {}".format(score) return
def pod_info(coors_dim, eigenvectors, path, comm, rank, size): pod = Vectors() pod.P = np.vstack([eigenvectors.P.transpose(), coors_dim.transpose()]).transpose() pod.u = np.vstack([eigenvectors.u.transpose(), coors_dim.transpose()]).transpose() pod.v = np.vstack([eigenvectors.v.transpose(), coors_dim.transpose()]).transpose() pod.w = np.vstack([eigenvectors.w.transpose(), coors_dim.transpose()]).transpose() pod.T = np.vstack([eigenvectors.T.transpose(), coors_dim.transpose()]).transpose() # print "Number of POD obtained: " # print "For Pressure: %i" % eigenvalues.P.size # print "For U velocity: %i" % eigenvalues.u.size # print "For V velocity: %i" % eigenvalues.v.size # print "For W velocity: %i" % eigenvalues.w.size # print "For Temperature: %i" % eigenvalues.T.size print "-----------------------" print "--- OUTPUTTING PODS ---" print "-----------------------" for p in range(size): if rank == p: if rank == 0: fp = open(path + 'pod_pi.dat', 'wb') np.savetxt(fp, pod.P) fp.close() ftau = open(path + 'pod_tau.dat', 'wb') np.savetxt(ftau, pod.T) ftau.close() fu = open(path + 'pod_u.dat', 'wb') np.savetxt(fu, pod.u) fu.close() fv = open(path + 'pod_v.dat', 'wb') np.savetxt(fv, pod.v) fv.close() fw = open(path + 'pod_w.dat', 'wb') np.savetxt(fw, pod.w) fw.close() else: fp = open(path + 'pod_pi.dat', 'ab') np.savetxt(fp, pod.P) fp.close() ftau = open(path + 'pod_tau.dat', 'ab') np.savetxt(fp, pod.T) ftau.close() fu = open(path + 'pod_u.dat', 'ab') np.savetxt(fp, pod.u) fu.close() fv = open(path + 'pod_v.dat', 'ab') np.savetxt(fp, pod.v) fv.close() fw = open(path + 'pod_w.dat', 'ab') np.savetxt(fp, pod.w) fw.close() comm.Barrier() argumentop = 'python ./mode_plotter.py --file_pod pod_pi.dat --path ' + path[: -1] + ' --name pod_obtenido_pi --pod' argumentot = 'python ./mode_plotter.py --file_pod pod_tau.dat --path ' + path[: -1] + ' --name pod_obtenido_tau --pod' argumentou = 'python ./mode_plotter.py --file_pod pod_u.dat --path ' + path[: -1] + ' --name pod_obtenido_u --pod' argumentov = 'python ./mode_plotter.py --file_pod pod_v.dat --path ' + path[: -1] + ' --name pod_obtenido_v --pod' argumentow = 'python ./mode_plotter.py --file_pod pod_w.dat --path ' + path[: -1] + ' --name pod_obtenido_w --pod' os.system(argumentop) os.system(argumentou) os.system(argumentov) os.system(argumentow) os.system(argumentot) plot_velocity_field_files( 'Pods_vectorial_', path, path + 'pod_u.dat', path + 'pod_v.dat', )
def variables_info(coors, dimless_homogeneous_vars, name, path, new_indexes, snapshots_path, comm, rank, size): pod = Vectors() pod.P = np.vstack([ dimless_homogeneous_vars.P[ new_indexes.get_local_inner_indices(), :].transpose(), coors.coors_dim_rounded[ new_indexes.get_local_inner_indices(), :].transpose() ]).transpose() pod.u = np.vstack([ dimless_homogeneous_vars.u[ new_indexes.get_local_inner_indices(), :].transpose(), coors.coors_dim_rounded[ new_indexes.get_local_inner_indices(), :].transpose() ]).transpose() pod.v = np.vstack([ dimless_homogeneous_vars.v[ new_indexes.get_local_inner_indices(), :].transpose(), coors.coors_dim_rounded[ new_indexes.get_local_inner_indices(), :].transpose() ]).transpose() pod.w = np.vstack([ dimless_homogeneous_vars.w[ new_indexes.get_local_inner_indices(), :].transpose(), coors.coors_dim_rounded[ new_indexes.get_local_inner_indices(), :].transpose() ]).transpose() pod.T = np.vstack([ dimless_homogeneous_vars.T[ new_indexes.get_local_inner_indices(), :].transpose(), coors.coors_dim_rounded[ new_indexes.get_local_inner_indices(), :].transpose() ]).transpose() # print "Number of POD obtained: " # print "For Pressure: %i" % eigenvalues.P.size # print "For U velocity: %i" % eigenvalues.u.size # print "For V velocity: %i" % eigenvalues.v.size # print "For W velocity: %i" % eigenvalues.w.size # print "For Temperature: %i" % eigenvalues.T.size if rank == 0: print "------------------------------------------------" print "--- OUTPUTTING VARIABLES ---" print "------------------------------------------------" for p in range(size): if rank == p: if rank == 0: fp = open(path + name + 'pi.dat', 'wb') np.savetxt(fp, pod.P) fp.close() ftau = open(path + name + 'tau.dat', 'wb') np.savetxt(ftau, pod.T) ftau.close() fu = open(path + name + 'u.dat', 'wb') np.savetxt(fu, pod.u) fu.close() fv = open(path + name + 'v.dat', 'wb') np.savetxt(fv, pod.v) fv.close() fw = open(path + name + 'w.dat', 'wb') np.savetxt(fw, pod.w) fw.close() else: fp = open(path + name + 'pi.dat', 'ab') np.savetxt(fp, pod.P) fp.close() ftau = open(path + name + 'tau.dat', 'ab') np.savetxt(ftau, pod.T) ftau.close() fu = open(path + name + 'u.dat', 'ab') np.savetxt(fu, pod.u) fu.close() fv = open(path + name + 'v.dat', 'ab') np.savetxt(fv, pod.v) fv.close() fw = open(path + name + 'w.dat', 'ab') np.savetxt(fw, pod.w) fw.close() comm.Barrier() #modeploter_function(path, file_pod, name, pod_yes=False, dir=2, dx=0.000625): files_pod = [ name + 'pi.dat', name + 'tau.dat', name + 'u.dat', name + 'v.dat', name + 'w.dat' ] names = [name + 'pi', name + 'tau', name + 'u', name + 'v', name + 'w'] # argumentos.append('python ./mode_plotter.py --file_pod '+name+'pi.dat --path ' + path[:-1] + ' --name '+name+'pi --pod') # argumentos.append('python ./mode_plotter.py --file_pod '+name+'tau.dat --path ' + path[:-1] + ' --name '+name+'tau --pod') # argumentos.append('python ./mode_plotter.py --file_pod '+name+'u.dat --path ' + path[:-1] + ' --name '+name+'u --pod') # argumentos.append('python ./mode_plotter.py --file_pod '+name+'v.dat --path ' + path[:-1] + ' --name '+name+'v --pod') # argumentos.append('python ./mode_plotter.py --file_pod '+name+'w.dat --path ' + path[:-1] + ' --name '+name+'w --pod') # for p in range(len(files_pod)): # if p%size==rank: # # os.system(argumentos[p]) # modeploter_function(path, files_pod[p], names[p], True, False) # # os.system(argumentop) # # os.system(argumentou) # # os.system(argumentov) # # os.system(argumentow) # # os.system(argumentot) # # if rank==len(files_pod)%size: # plot_velocity_field_files('Vels_database_', path, path+name+'u.dat', path+name+'v.dat', snapshots_path) for p in range(len(files_pod)): if rank == 0: # os.system(argumentos[p]) modeploter_function(path, files_pod[p], names[p], True, False) plot_velocity_field_files('Vels_database_', path, path + name + 'u.dat', path + name + 'v.dat', snapshots_path)
temp = numpy.array( [distance for distance in distances if not numpy.isnan(distance)]) min_distance = temp.min() if type is 'conservative': threshold = temp.mean() - (2 * temp.std()) else: threshold = temp.mean() - temp.std() if min_distance <= threshold: xrenner.markables[i].antecedent = xrenner.markables[ distances.index(min_distance)] xrenner.markables[i].coref_type = 'bridge' return None if __name__ == "__main__": xrenner = Xrenner(override='GUM') xrenner.analyze('clinton_example.conll10', 'conll') vecs = Vectors('../vectors/GoogleNewsVecs.txt', False) bridging(xrenner, vecs) for markable in xrenner.markables: print(markable.text, markable.antecedent, markable.coref_type)
def read_database(newnames_files, comm, size, rank): """Read the database from separated txt files. Distribute it between processes""" ### reading database in texfiles ### initilize vectors ### coors = coordinates() dim_vars = Vectors() dim_vars.zeros() neigbours = Neigbors() boundaries = Boundary() new_indexes = Intexes() new_indexes.set_csr(comm, size, rank) first_flag = True for my_file in newnames_files: #### read file #### comm.barrier() if rank == 0: print "-------------------------------------------------------------------" sys.stdout.flush() comm.barrier() print "Processor %d Reading and processing data of file %s" % (rank, my_file) sys.stdout.flush() comm.barrier() if rank == 0: print "-------------------------------------------------------------------" sys.stdout.flush() comm.barrier() datos_ori = np.loadtxt(my_file, delimiter=',', skiprows=1, dtype=np.float64) ### re-order read data datos_ori = datos_ori.copy(order='C') if first_flag: new_indexes.re_order(datos_ori[:, -3:datos_ori.shape[1]]) datos = datos_ori[new_indexes.inew[:]] coors_round = round_coordinates(copy.deepcopy(datos[:, -3:datos.shape[1]]), round_cell_size) new_indexes.organize_split_vectors(coors_round, number_ghosh_cells) coors.coors_dim_rounded = new_indexes.scatter_vector(coors_round.copy()) [neigbours, boundaries] = calculate_neighbors(coors.coors_dim_rounded, comm, size, rank) temp_local = new_indexes.scatter_vector(datos[:, 4]) pres_local = new_indexes.scatter_vector(datos[:, 0]) vels_local = new_indexes.scatter_vector(datos[:, 1:4]) coors.coors_global_dim_rounded = round_coordinates(datos[:, -3:datos.shape[1]], round_cell_size) dim_vars.append(pres_local, vels_local[:, 0], vels_local[:, 1], vels_local[:, 2], temp_local) first_flag = False else: datos = datos_ori[new_indexes.inew[:]] temp_local = new_indexes.scatter_vector(datos[:, 4]) pres_local = new_indexes.scatter_vector(datos[:, 0]) vels_local = new_indexes.scatter_vector(datos[:, 1:4]) dim_vars.vstack(pres_local, vels_local[:, 0], vels_local[:, 1], vels_local[:, 2], temp_local) dim_vars.transpose() new_indexes.local_indices() new_indexes.distribute_indexes() return [dim_vars, coors, neigbours, boundaries, new_indexes]