def test_dist_nldas_file(self): test = PFData(('NLDAS.APCP.000001_to_000024.pfb')) test.distFile(P=2, Q=2, R=1, outFile=('NLDAS.APCP.000001_to_000024.pfb.tmp')) out_file = PFData(('NLDAS.APCP.000001_to_000024.pfb.tmp')) dist_file = open(('NLDAS.APCP.000001_to_000024.pfb.tmp.dist'), 'r') dist_lines = dist_file.readlines() [ self.assertEqual(int(line.rstrip('\n')), val) for line, val in zip( dist_lines, [0, 84772, 165448, 246124, 322960]) ] self.assertEqual(0, out_file.loadHeader(), 'should load distributed file header') self.assertEqual(0, out_file.loadData(), 'should load distributed data') self.assertIsNone( np.testing.assert_array_equal(test.getDataAsArray(), out_file.getDataAsArray()), 'should find matching data values in original and distributed files' ) test.close() out_file.close() dist_file.close() os.remove(('NLDAS.APCP.000001_to_000024.pfb.tmp')) os.remove(('NLDAS.APCP.000001_to_000024.pfb.tmp.dist'))
def test_create_from_data(self): data = np.random.random_sample((50, 49, 31)) test = PFData(data) self.assertEqual(31, test.getNX()) self.assertEqual(49, test.getNY()) self.assertEqual(50, test.getNZ()) self.assertEqual(1, test.getP()) self.assertEqual(1, test.getQ()) self.assertEqual(1, test.getR()) self.assertEqual(0, test.getX()) self.assertEqual(0, test.getY()) self.assertEqual(0, test.getZ()) test.writeFile(('test_write_raw.pfb')) test_read = PFData(('test_write_raw.pfb')) test_read.loadHeader() test_read.loadData() self.assertEqual(0, test_read.getX()) self.assertEqual(0, test_read.getY()) self.assertEqual(0, test_read.getZ()) self.assertEqual(31, test_read.getNX()) self.assertEqual(49, test_read.getNY()) self.assertEqual(50, test_read.getNZ()) self.assertEqual(1, test_read.getP()) self.assertEqual(1, test_read.getQ()) self.assertEqual(1, test_read.getR()) test_data = test_read.getDataAsArray() self.assertIsNone( np.testing.assert_array_equal(data, test_data), 'Data written to array should exist in ' 'written PFB file.') del data test.close() test_read.close() os.remove(('test_write_raw.pfb'))
def test_dist_file(self): test = PFData(('press.init.pfb')) test.distFile(P=2, Q=2, R=1, outFile=('press.init.pfb.tmp')) out_file = PFData(('press.init.pfb.tmp')) dist_file = open(('press.init.pfb.tmp.dist'), 'r') dist_lines = dist_file.readlines() [ self.assertEqual(int(line.rstrip('\n')), val) for line, val in zip( dist_lines, [0, 176500, 344536, 512572, 672608]) ] self.assertEqual(0, out_file.loadHeader(), 'should load distributed file header') self.assertEqual(0, out_file.loadData(), 'should load distributed data') self.assertIsNone( np.testing.assert_array_equal(test.getDataAsArray(), out_file.getDataAsArray()), 'should find matching data values in original and distributed files' ) test.close() out_file.close() dist_file.close() os.remove(('press.init.pfb.tmp')) os.remove(('press.init.pfb.tmp.dist'))
def test_compare(self): test1 = PFData(('press.init.pfb')) test1.loadHeader() test1.loadData() test2 = PFData(('press.init.pfb')) test2.loadHeader() test2.loadData() self.assertEqual(PFData.differenceType_none, test1.compare(test2)[0], "test1 and test2 are the same") test1.setX(test1.getX() + 1.0) self.assertEqual(PFData.differenceType_x, test1.compare(test2)[0], "The x values differ") test1.setX(test1.getX() - 1.0) arr = test1.getDataAsArray() arr[1][2][3] += 1.0 ret, zyx = test1.compare(test2) self.assertEqual(PFData.differenceType_data, ret, "The data values differ") self.assertEqual((1, 2, 3), zyx, "The differing data's coordinates are correct") arr[1][2][3] -= 1.0 test1.close() test2.close()
def test_load_data_threaded(self): base = PFData(('press.init.pfb')) base.loadHeader() base.loadData() # 1 thread test1 = PFData(('press.init.pfb')) test1.loadHeader() test1.loadPQR() test1.loadDataThreaded(1) self.assertEqual(PFData.differenceType_none, base.compare(test1)[0], "base and test1 are the same") # 8 threads test8 = PFData(('press.init.pfb')) test8.loadHeader() test8.loadPQR() test8.loadDataThreaded(8) self.assertEqual(PFData.differenceType_none, base.compare(test8)[0], "base and test8 are the same") # 40 threads (more than the number of subgrids) test40 = PFData(('press.init.pfb')) test40.loadHeader() test40.loadPQR() test40.loadDataThreaded(40) self.assertEqual(PFData.differenceType_none, base.compare(test40)[0], "base and test40 are the same") base.close() test1.close() test8.close() test40.close()
def test_good_filename(self): test = PFData(('press.init.pfb')) retval = test.loadHeader() self.assertEqual(0, retval, 'should load header of file that exists') self.assertEqual(41, test.getNX(), 'sample file should have 41 columns') self.assertEqual(41, test.getNY(), 'sample file should have 41 rows') self.assertEqual(50, test.getNZ(), 'sample file should have 50 z-layers') self.assertEqual(0, test.getX(), 'sample file starts at X=0') self.assertEqual(0, test.getY(), 'sample file starts at Y=0') self.assertEqual(0, test.getZ(), 'sample file starts at Z=0') self.assertEqual(16, test.getNumSubgrids(), 'sample file should have 16 subgrids') test.close()
def pfread(pfbfile): """ Read a pfb file and return data as an ndarray :param pfbfile: path to pfb file :return: An ndarray of ndim=3, with shape (nz, ny, nx) """ if not os.path.exists(pfbfile): raise RuntimeError(f'{pfbfile} not found') pfb_data = PFData(pfbfile) pfb_data.loadHeader() pfb_data.loadData() arr = pfb_data.moveDataArray() pfb_data.close() assert arr.ndim == 3, 'Only 3D arrays are supported' return arr
def pfwrite(arr, pfbfile, dx=1, dy=1, dz=1, overwrite=False): """ Save an ndarray to a pfb file :param arr: ndarray to save (must be 3-dimensional) :param pfbfile: path to pfb file :param overwrite: whether to overwrite the file if it exists :return: None on success. Raises Exception on failure. """ if os.path.exists(pfbfile) and not overwrite: raise RuntimeError(f'{pfbfile} already exists') assert arr.ndim == 3, 'Only 3D arrays are supported' pfb_data = PFData() pfb_data.setDataArray(arr) pfb_data.setDX(dx) pfb_data.setDY(dy) pfb_data.setDZ(dz) pfb_data.writeFile(pfbfile) pfb_data.close()
def test_load_data_threaded_perf(self): # loadData - Not using threads non_threaded_time = 0 for i in range(10000): base = PFData( ('LW.out.press.00000.pfb' )) # Little Washita pressure PFB with 2x2x1 grid size base.loadHeader() start = time.time_ns() base.loadData() non_threaded_time += time.time_ns() - start # loadDataThreaded - Using 4 threads num_threads = 4 threaded_time = 0 for i in range(10000): test = PFData( ('LW.out.press.00000.pfb' )) # Little Washita pressure PFB with 2x2x1 grid size test.loadHeader() start = time.time_ns() test.loadPQR( ) # loadPQR() must be called before loadDataThreaded() test.loadDataThreaded(num_threads) threaded_time += time.time_ns() - start base.close() test.close() # loadDataThreaded() should have less total time spent than loadData() self.assertTrue( threaded_time < non_threaded_time, f'Using {num_threads} threads has degraded the performance of loadDataThreaded()' ) # Display performance increase in percent change pct_change = 100 * abs(threaded_time - non_threaded_time) / non_threaded_time print( f'{pct_change:.2f}% performance increase when using LoadDataThreaded() with {num_threads} threads' )
def read_file(infile): """read an input file and return a 3d numpy array Parameters ---------- infile : str file to open (.pfb, .sa, .tif, .tiff) Returns ------- res_arr : ndarray a 3d numpy array with data from file in (z,y,x) format with y axis 0 at bottom """ infile_path = Path(infile) # get extension ext = infile_path.suffix file_string_path = os.fspath(infile_path) if ext in ['.tif', '.tiff']: res_arr = gdal.Open(file_string_path).ReadAsArray() if len(res_arr.shape) == 2: res_arr = res_arr[np.newaxis, ...] # flip y axis so tiff aligns with PFB native alignment res_arr = np.flip(res_arr, axis=1) elif ext == '.sa': # parflow ascii file with open(file_string_path, 'r') as fi: header = fi.readline() nx, ny, nz = [int(x) for x in header.strip().split(' ')] arr = pd.read_csv(file_string_path, skiprows=1, header=None).values res_arr = np.reshape(arr, (nz, ny, nx))[:, :, :] elif ext == '.pfb': # parflow binary file pfdata = PFData(file_string_path) pfdata.loadHeader() pfdata.loadData() res_arr = pfdata.moveDataArray() pfdata.close() del pfdata else: raise ValueError('can not read file type ' + ext) return res_arr
def test_validate_cell_values(self): test = PFData(('press.init.pfb')) retval = test.loadHeader() self.assertEqual(0, retval, 'should load header of file that exists') retval = test.loadData() self.assertEqual(0, retval, 'should load data from valid file') data = test.getDataAsArray() self.assertIsNotNone( data, 'data from object should be available as python object') self.assertSequenceEqual((50, 41, 41), data.shape) self.assertAlmostEqual(98.003604098773, test(0, 0, 0), 12, 'valid data in cell (0,0,0)') self.assertAlmostEqual(97.36460429313328, test(40, 0, 0), 12, 'data in cell (40,0,0)') self.assertAlmostEqual(98.0043134691891, test(0, 1, 0), 12, 'data in cell (0, 1, 0)') self.assertAlmostEqual(98.00901307022781, test(1, 0, 0), 12, 'data in cell (1, 0, 0)') self.assertAlmostEqual(92.61370155558751, test(21, 1, 2), 12, 'data in cell (21, 1, 2)') self.assertAlmostEqual(7.98008728357588, test(0, 1, 45), 12, 'data in cell (0, 1, 45)') self.assertAlmostEqual(97.30205516102234, test(22, 1, 0), 12, 'valid data in cell (22,1,0)') self.assertEqual(test(0, 0, 0), data[0, 0, 0], 'data array and c array match values at (0,0,0)') self.assertEqual(test(40, 0, 0), data[0, 0, 40], 'data array and c array match values at (40,0,0)') self.assertEqual(test(0, 1, 0), data[0, 1, 0], 'data array and c array match values at (0,1,0)') self.assertEqual(test(1, 0, 0), data[0, 0, 1], 'data array and c array match values at (1,0,0)') self.assertEqual(test(21, 1, 2), data[2, 1, 21], 'data array and c array match values at (21,1,2)') self.assertEqual(test(0, 1, 45), data[45, 1, 0], 'data array and c array match values at (0,1,45)') self.assertEqual(test(22, 1, 0), data[0, 1, 22], 'data array and c array match values at (22,1,0)') test.close()
def test_read_write_data(self): test = PFData(('press.init.pfb')) retval = test.loadHeader() self.assertEqual(0, retval, 'should load header of file that exists') retval = test.loadPQR() self.assertEqual(0, retval, 'should load PQR of file that exists') retval = test.loadData() self.assertEqual(0, retval, 'should load data from valid file') retval = test.writeFile(('press.init.pfb.tmp')) self.assertEqual(0, retval, 'should write data from previously loaded file') data2 = PFData(('press.init.pfb.tmp')) data2.loadHeader() data2.loadData() data2.loadPQR() self.assertIsNone( np.testing.assert_array_equal( test.viewDataArray(), data2.viewDataArray(), 'should read back same values we wrote')) in_file_hash = calculate_sha1_hash(('press.init.pfb')) out_file_hash = calculate_sha1_hash(('press.init.pfb.tmp')) # This assertion (that the files are identical) is failing in Python and in C++ # because the original test input file was written by a tool that incorrectly set the value self.assertNotEqual( in_file_hash, out_file_hash, 'sha1 hash of input and output files should not match') same, byte_diff = byte_compare_files(('press.init.pfb'), ('press.init.pfb.tmp')) self.assertFalse( same, 'press.init.pfb should differ from version just written') self.assertEqual(92, byte_diff, 'first byte difference at byte 92') test.close() data2.close() os.remove(('press.init.pfb.tmp'))
def test_manipulate_data(self): test = PFData(('press.init.pfb')) retval = test.loadHeader() self.assertEqual(0, retval, 'should load header of file that exists') retval = test.loadData() self.assertEqual(0, retval, 'should load data from valid file') test_data = test.getDataAsArray() self.assertSequenceEqual( (50, 41, 41), test_data.shape, 'test file array should have shape (50,41,41)') self.assertAlmostEqual(98.003604098773, test(0, 0, 0), 12, 'valid data in cell (0,0,0)') test_data[0, 0, 0] = 1 test_data[0, 0, 40] = 1 test_data[2, 1, 21] = 1 self.assertEqual(1, test(0, 0, 0), 'data update affects underlying array') self.assertEqual(1, test(40, 0, 0), 'data update affects underlying array') self.assertEqual(1, test(21, 1, 2), 'data update affects underlying array') retval = test.writeFile(('press.init.pfb.tmp')) self.assertEqual(0, retval, 'able to write updated data to output file') test_read = PFData(('press.init.pfb.tmp')) test_read.loadHeader() test_read.loadData() self.assertEqual(1, test_read(0, 0, 0), 'updates to data written to file can be read back') self.assertEqual(1, test_read(40, 0, 0), 'updates to data written to file can be read back') self.assertEqual(1, test_read(21, 1, 2), 'updates to data written to file can be read back') test.close() test_read.close() os.remove(('press.init.pfb.tmp'))
rich_fbx.Solver.Nonlinear.FlowBarrierX = True rich_fbx.FBx.Type = 'PFBFile' rich_fbx.Geom.domain.FBx.FileName = 'Flow_Barrier_X.pfb' ## write flow boundary file FBx_data = np.full((20, 20, 20), 1.0) for i in range(20): for j in range(20): # from cell 10 (index 9) to cell 11 # reduction of 1E-3 FBx_data[i, j, 9] = 0.001 FBx_data_pfb = PFData(FBx_data) FBx_data_pfb.writeFile(get_absolute_path('Flow_Barrier_X.pfb')) FBx_data_pfb.close() rich_fbx.dist('Flow_Barrier_X.pfb') #----------------------------------------------------------------------------- # Wells #----------------------------------------------------------------------------- rich_fbx.Wells.Names = '' #----------------------------------------------------------------------------- # Time Cycles #----------------------------------------------------------------------------- rich_fbx.Cycle.Names = 'constant' rich_fbx.Cycle.constant.Names = 'alltime'
rich_fby.Solver.Nonlinear.FlowBarrierY = True rich_fby.FBy.Type = 'PFBFile' rich_fby.Geom.domain.FBy.FileName = 'Flow_Barrier_Y.pfb' ## write flow barrier file FBy_data = np.full((20, 20, 20), 1.0) for i in range(20): for j in range(20): # from cell 10 (index 9) to cell 11 # reduction of 1E-3 FBy_data[i, 9, j] = 0.001 FBy_data_pfb = PFData(FBy_data) FBy_data_pfb.writeFile(get_absolute_path('Flow_Barrier_Y.pfb')) FBy_data_pfb.close() rich_fby.dist('Flow_Barrier_Y.pfb') #----------------------------------------------------------------------------- # Wells #----------------------------------------------------------------------------- rich_fby.Wells.Names = '' #----------------------------------------------------------------------------- # Time Cycles #----------------------------------------------------------------------------- rich_fby.Cycle.Names = 'constant' rich_fby.Cycle.constant.Names = 'alltime'
def test_set_index_order(self): test = PFData(('press.init.pfb')) self.assertEqual(test.getIndexOrder(), 'zyx', 'indexOrder should equal \'zyx\'') test.setIndexOrder('xyz') self.assertEqual(test.getIndexOrder(), 'xyz', 'indexOrder should equal \'xyz\'') test.setIndexOrder('xYz') self.assertEqual(test.getIndexOrder(), 'xyz', 'indexOrder should equal \'xyz\'') test.setIndexOrder('xYZ') self.assertEqual(test.getIndexOrder(), 'xyz', 'indexOrder should equal \'xyz\'') test.setIndexOrder('XYZ') self.assertEqual(test.getIndexOrder(), 'xyz', 'indexOrder should equal \'xyz\'') test.setIndexOrder('XYZZZZ') self.assertEqual(test.getIndexOrder(), 'xyz', 'indexOrder should equal \'xyz\'') # Should not work, should still equal 'xyz' test.setIndexOrder('abc') self.assertEqual(test.getIndexOrder(), 'xyz', 'indexOrder should equal \'xyz\'') # Should not be able to write to file when indexOrder == 'xyz' self.assertEqual( test.writeFile(('test_write_index_order.pfb')), 1, 'Should not be able to write to file when indexOrder == \'xyz\'') # Should equal 'zyx' test.setIndexOrder('ZYX') self.assertEqual(test.getIndexOrder(), 'zyx', 'indexOrder should equal \'zyx\'') # Should equal 'zyx' test.setIndexOrder('zYx') self.assertEqual(test.getIndexOrder(), 'zyx', 'indexOrder should equal \'zyx\'') # Should be able to write to file self.assertEqual( test.writeFile(('test_write_index_order.pfb')), 0, 'Should be able to write to file when indexOrder == \'zyx\'') # Read file, indexOrder should equal 'zyx' test_read = PFData(('test_write_index_order.pfb')) test_read.loadHeader() test_read.loadData() self.assertEqual(test.getIndexOrder(), 'zyx', 'indexOrder should equal \'zyx\'') test.close() test_read.close() os.remove(('test_write_index_order.pfb'))
control2 = st.button(" Animate Results ") if control2 == True: N = 100 time = np.zeros( [N + 1]) # time array, we will probably want to swap with a date outflow = np.zeros([N + 1]) # array to load in the meterological forcing sat = np.zeros([N + 1, 300, 20]) for icount in range(0, N): base = (base_dir + "/dunne_over/Dunne.out.satur.{:05d}.pfb") filename = base.format(icount) data_obj = PFData(filename) data_obj.loadHeader() data_obj.loadData() data_arr = data_obj.getDataAsArray() data_obj.close() sat[icount, :, :] = np.where(data_arr[:, 0, :] <= 0.0, 0.0, data_arr[:, 0, :]) fig, ax = plt.subplots() image = st.pyplot(plt) for i in range(N): ax.cla() ax.imshow(sat[i, :, :], vmin=0.1, vmax=1.0, origin='lower', aspect=0.015, cmap='Blues', interpolation='none') #,extent=[0,100,0,1]) ax.set_title("frame {}".format(i))
## write flow boundary file flux_file_names = ['dry', 'rainflux_all', 'rainflux_left', 'rainflux_right'] for name in flux_file_names: array = np.full((nz, ny, nx), 0.0) if name == 'rainflux_all': array[(nz - 1), :, :] = rain_flux * 2 if name == 'rainflux_left': array[(nz - 1), :, 0:49] = rain_flux if name == 'rainflux_right': array[(nz - 1), :, 50:nx] = rain_flux pfb = PFData(array) pfb.writeFile(get_absolute_path(f'{name}.pfb')) pfb.close() sandtank.dist(f'{name}.pfb') # ----------------------------------------------------------------------------- # Boundary Condition definitions # ----------------------------------------------------------------------------- sandtank.BCPressure.PatchNames = sandtank.Geom.domain.Patches sandtank.Patch.x_lower.BCPressure.Type = 'DirEquilRefPatch' sandtank.Patch.x_lower.BCPressure.Cycle = 'constant' sandtank.Patch.x_lower.BCPressure.RefGeom = 'domain' sandtank.Patch.x_lower.BCPressure.RefPatch = 'z_lower' sandtank.Patch.x_lower.BCPressure.alltime.Value = hleft
def test_open_close(self): test = PFData() self.assertIsNone(test.close(), 'should be able to open and close an empty object')