def increment_progress_mp(): """ Small method to increment progress when using multiple processors """ progress.value += 1 if progress.value % 10 == 0 or progress.value == total.value: print_tools.progress(progress.value, total.value, status='-- iterating over blocks...') return
def get_i_f(data, altitude=20000): """ Returns the degree of ionization for each datapoint in the given input. :param data: Reduced data object. :param altitude: Altitude at which to evaluate (in km). Default is 20000, otherwise rounded to nearest base 1e4 integer. Type is double or integer :return: i | Degree of ionization at each data point of the input matrix. Type is np.ndarray of dimension ndim. f | f-value at each data point of the input matrix, multiplied by 1e16 (see table in paper). Type is np.ndarray of dimension ndim. """ #Perform altitude check if not altitude == 20000: altitude = _round_to_base(altitude, 10000) #Select ionization table if int(altitude) == 10000: i_table = ionization_10k elif int(altitude) == 20000: i_table = ionization_20k else: i_table = ionization_30k #Select f table if int(altitude) == 10000: f_table = f_10k elif int(altitude) == 20000: f_table = f_20k else: f_table = f_30k #Create bivariate spline approximation over rectangular mesh #Has to be done only once, then use it to evaluate (T, p) point spline_ion = RectBivariateSpline(T_table, pg_table, i_table) spline_f = RectBivariateSpline(T_table, pg_table, f_table) #Re-dimensionalize temperature and pressure temp = data.T * units.unit_temperature pg = data.p * units.unit_pressure # Prevent double loading during the same run if data.ion is not None and data.f_param is not None: return data.ion, data.f_param else: #See if data is already stored to disk: filen = print_tools.trim_filename(settings.filename) if os.path.isfile("interpolated_files/ion_" + filen + ".npy") and os.path.isfile( "interpolated_files/f_param_" + filen + ".npy"): print("Interpolated data already exists -- loading files.") print(" Loading Numpy files...") ion = np.load("interpolated_files/ion_" + filen + ".npy") f = np.load("interpolated_files/f_param_" + filen + ".npy") print(" Done.") data.ion = ion data.f_param = f * 1e16 return ion, f * 1e16 #Create matrix of same size of input ion = np.zeros_like(temp) f = np.zeros_like(temp) #Fast iteration over array elements (calls the C array operator API) it = np.nditer(temp, flags=['multi_index']) print("Interpolating matrix for ionization and f.") if data._ndim == 2: tot_points = len(data.T[0, :]) else: tot_points = len(data.T[0, 0, :]) ctr = 0 while not it.finished: #Get current index of iterator, Type = Tuple idx = it.multi_index #Get temperature and pressure at current index t_i = temp[idx] p_i = pg[idx] #Interpolate ionization degree, save to current index ion[idx] = spline_ion.ev(t_i, p_i) #Interpolate f, save to current index f[idx] = spline_f.ev(t_i, p_i) #Advance iterator ctr += 1 #Print out progress if ctr % 250 == 0: print_tools.progress(idx[-1], tot_points, '-- interpolating...') it.iternext() print_tools.progress(tot_points, tot_points, '-- completed.') print("\n") # save Numpy arrays for easy acces later on if settings.saveFiles: np.save("interpolated_files/ion_" + filen, ion) np.save("interpolated_files/f_param_" + filen, f) print("Interpolated arrays saved to") print(" interpolated_files/ion_" + filen + ".npy") print(" interpolated_files/f_param_" + filen + ".npy") data.ion = ion data.f_param = f * 1e16 # Parameter f is tabulated in units of 10^16 cm-3 return ion, f * 1e16
def get_amr_data_multiprocessing(dat): """ Method to regrid entire mesh using the multiprocessing module. Same principle as the get_amr_data() method, except now each block that needs regridding is passed on to one of the multiple processors in use. :param dat: .dat file, opened in binary mode. :return: Dictionary containing grid data. """ # Perform version check PY2 = sys.version_info[0] == 2 h = get_header(dat) blocks = get_block_data(dat) refined_nx = 2**(h['levmax'] - 1) * h['domain_nx'] domain_shape = np.append(refined_nx, h['nw']) d = np.zeros(domain_shape, order='F') max_lvl = h['levmax'] # Get amount of blocks that need regridding print_regrid_amount(blocks, max_lvl) # Create multiprocessing iterable block_iterable = [(b, h) for b in blocks] # Create variable for multiprocess progress tracking init_progress = multiprocessing.Value("i", 0) mp_bool = multiprocessing.Value("i", True) total_blocks = multiprocessing.Value("i", len(blocks)) #Initialize multiprocessing pool pool = multiprocessing.Pool( initializer=multiprocessing_init, initargs=[init_progress, mp_bool, total_blocks], processes=settings.nb_of_procs) print_tools.progress(0, 100, status='-- iterating over blocks...') #The aray blocks_regridded contains the data for each regridded block #:note: pool.(star)map obeys the array order during parallelization, i.e. # blocks_regridded[i] equals the calculation for blocks[i] if h['ndim'] == 1: if PY2: blocks_regridded = np.array( pool.map(interpolate_block_1d_unpack, block_iterable)) else: blocks_regridded = np.array( pool.starmap(interpolate_block_1d, block_iterable)) elif h['ndim'] == 2: if PY2: blocks_regridded = np.array( pool.map(interpolate_block_2d_unpack, block_iterable)) else: blocks_regridded = np.array( pool.starmap(interpolate_block_2d, block_iterable)) else: if PY2: blocks_regridded = np.array( pool.map(interpolate_block_3d_unpack, block_iterable)) else: blocks_regridded = np.array( pool.starmap(interpolate_block_3d, block_iterable)) pool.close() pool.join() print_tools.progress(100, 100, status='-- completed.') print("\n") # Fill arrays with regridded data for i in range(len(blocks)): b = blocks[i] block_lvl = b['lvl'] block_idx = b['ix'] grid_diff = 2**(max_lvl - block_lvl) max_idx = block_idx * grid_diff min_idx = max_idx - grid_diff idx0 = min_idx * h['block_nx'] if h['ndim'] == 1: if block_lvl == max_lvl: idx1 = idx0 + h['block_nx'] d[idx0[0]:idx1[0], :] = b['w'] else: idx1 = idx0 + (h['block_nx'] * grid_diff) d[idx0[0]:idx1[0], :] = blocks_regridded[i] elif h['ndim'] == 2: if block_lvl == max_lvl: idx1 = idx0 + h['block_nx'] d[idx0[0]:idx1[0], idx0[1]:idx1[1], :] = b['w'] else: idx1 = idx0 + (h['block_nx'] * grid_diff) d[idx0[0]:idx1[0], idx0[1]:idx1[1], :] = blocks_regridded[i] elif h['ndim'] == 3: if block_lvl == max_lvl: idx1 = idx0 + h['block_nx'] d[idx0[0]:idx1[0], idx0[1]:idx1[1], idx0[2]:idx1[2], :] = b['w'] else: idx1 = idx0 + (h['block_nx'] * grid_diff) d[idx0[0]:idx1[0], idx0[1]:idx1[1], idx0[2]:idx1[2], :] = blocks_regridded[i] else: raise IOError("Unknown number of dimensions %s" % h['ndim']) save_regridded_data(d) return d
def get_amr_data(dat): """ Returns a uniform grid in the case the mesh is not uniformely refined, hence when the method call to get_uniform_data() throws an IOError. This method calculates the maximum refinement level present in the grid, and regrids the entire mesh to this level. Blocks at a higher level than the maximum are refined using linear interpolation. :param dat: .dat file, opened in binary mode. :return: Dictionary containing grid data. :raise IOError: If number of dimensions in the header is not equal to 1, 2 or 3 for some reason. """ h = get_header(dat) blocks = get_block_data(dat) refined_nx = 2**(h['levmax'] - 1) * h['domain_nx'] #Perform regridding to finest level domain_shape = np.append(refined_nx, h['nw']) d = np.zeros(domain_shape, order='F') max_lvl = h['levmax'] #Get amount of blocks that need regridding print_regrid_amount(blocks, max_lvl) # No multiprocessing, so set mp_activated to False mp_bool = multiprocessing.Value("i", False) multiprocessing_init(0, mp_bool, len(blocks)) counter = 0 print_tools.progress(counter, len(blocks), status='-- iterating over blocks...') for b in blocks: block_lvl = b['lvl'] block_idx = b['ix'] grid_diff = 2**(max_lvl - block_lvl) max_idx = block_idx * grid_diff min_idx = max_idx - grid_diff if h['ndim'] == 1: idx0 = min_idx * h['block_nx'] if block_lvl == max_lvl: idx1 = idx0 + h['block_nx'] d[idx0[0]:idx1[0], :] = b['w'] else: idx1 = idx0 + (h['block_nx'] * grid_diff) d[idx0[0]:idx1[0], :] = interpolate_block_1d(b, h) elif h['ndim'] == 2: idx0 = min_idx * h['block_nx'] if block_lvl == max_lvl: #block is on finest level, return block idx1 = idx0 + h['block_nx'] d[idx0[0]:idx1[0], idx0[1]:idx1[1], :] = b['w'] else: #block is not on finest level, so interpolate idx1 = idx0 + (h['block_nx'] * grid_diff) d[idx0[0]:idx1[0], idx0[1]:idx1[1], :] = interpolate_block_2d(b, h) elif h['ndim'] == 3: idx0 = min_idx * h['block_nx'] if block_lvl == max_lvl: idx1 = idx0 + h['block_nx'] d[idx0[0]:idx1[0], idx0[1]:idx1[1], idx0[2]:idx1[2], :] = b['w'] else: idx1 = idx0 + (h['block_nx'] * grid_diff) d[idx0[0]:idx1[0], idx0[1]:idx1[1], idx0[2]:idx1[2], :] = interpolate_block_3d(b, h) else: raise IOError("Unknown number of dimensions %s" % h['ndim']) counter += 1 if counter % 10 == 0 or counter == len(blocks): print_tools.progress(counter, len(blocks), status='-- iterating over blocks...') print("\n") save_regridded_data(d) return d