def test_geometry_3(self): if self.test_only and inspect.stack()[0][3] not in self.test_only: return f = cf.read(self.geometry_3_file, verbose=0) self.assertEqual(len(f), 2, 'f = ' + repr(f)) for g in f: self.assertTrue(g.equals(g.copy(), verbose=2)) self.assertEqual(len(g.auxiliary_coordinates), 3) g = f[0] for axis in ('X', 'Y', 'Z'): coord = g.construct('axis=' + axis) self.assertFalse(coord.has_node_count(), 'axis=' + axis) self.assertFalse(coord.has_part_node_count(), 'axis=' + axis) self.assertFalse(coord.has_interior_ring(), 'axis=' + axis) cf.write(f, self.tempfilename, Conventions='CF-' + VN, verbose=0) f2 = cf.read(self.tempfilename, verbose=0) self.assertEqual(len(f2), 2, 'f2 = ' + repr(f2)) for a, b in zip(f, f2): self.assertTrue(a.equals(b, verbose=2))
def test_geometry_2(self): f = cf.read(self.geometry_2_file, verbose=0) self.assertEqual(len(f), 2, "f = " + repr(f)) for g in f: self.assertTrue(g.equals(g.copy(), verbose=2)) self.assertEqual(len(g.auxiliary_coordinates()), 3) g = f[0] for axis in ("X", "Y", "Z"): coord = g.construct("axis=" + axis) self.assertTrue(coord.has_node_count(), "axis=" + axis) self.assertFalse(coord.has_part_node_count(), "axis=" + axis) self.assertFalse(coord.has_interior_ring(), "axis=" + axis) cf.write(f, self.tempfilename, Conventions="CF-" + VN, verbose=0) f2 = cf.read(self.tempfilename, verbose=0) self.assertEqual(len(f2), 2, "f2 = " + repr(f2)) for a, b in zip(f, f2): self.assertTrue(a.equals(b, verbose=2)) # Setting of node count properties coord = f[0].construct("axis=X") nc = coord.get_node_count() cf.write(f, self.tempfilename) nc.set_property("long_name", "Node counts") cf.write(f, self.tempfilename, verbose=0) nc.nc_set_variable("new_var_name") cf.write(f, self.tempfilename, verbose=0)
def test_DSG_indexed(self): if self.test_only and inspect.stack()[0][3] not in self.test_only: return f = cf.read(self.indexed) self.assertEqual(len(f), 2) # Select the specific humidity field q = [g for g in f if g.get_property('standard_name') == 'specific_humidity'][0] self.assertTrue(q._equals(q.data.array.mask, self.a.mask)) self.assertTrue( q._equals(q.data.array, self.a), '\nself.a=\n' + str(self.a) + '\nq.array=\n' + str(q.array) ) cf.write(f, tmpfile, verbose=0) g = cf.read(tmpfile) self.assertEqual(len(g), len(f)) for i in range(len(f)): self.assertTrue(g[i].equals(f[i], verbose=2))
def test_read_CDL(self): if self.test_only and inspect.stack()[0][3] not in self.test_only: return subprocess.run( ' '.join(['ncdump', self.filename, '>', tmpfile]), shell=True, check=True) subprocess.run( ' '.join(['ncdump', '-h', self.filename, '>', tmpfileh]), shell=True, check=True) subprocess.run( ' '.join(['ncdump', '-c', self.filename, '>', tmpfilec]), shell=True, check=True) f0 = cf.read(self.filename)[0] f = cf.read(tmpfile)[0] h = cf.read(tmpfileh)[0] c = cf.read(tmpfilec)[0] self.assertTrue(f0.equals(f, verbose=2)) self.assertTrue(f.construct('grid_latitude').equals( c.construct('grid_latitude'), verbose=2)) self.assertTrue(f0.construct('grid_latitude').equals( c.construct('grid_latitude'), verbose=2)) with self.assertRaises(Exception): _ = cf.read('test_read_write.py')
def test_read_mask(self): f = self.f0.copy() N = f.size f.data[1, 1] = cf.masked f.data[2, 2] = cf.masked f.del_property("_FillValue", None) f.del_property("missing_value", None) cf.write(f, tmpfile) g = cf.read(tmpfile)[0] self.assertEqual(numpy.ma.count(g.data.array), N - 2) g = cf.read(tmpfile, mask=False)[0] self.assertEqual(numpy.ma.count(g.data.array), N) g.apply_masking(inplace=True) self.assertEqual(numpy.ma.count(g.data.array), N - 2) f.set_property("_FillValue", 999) f.set_property("missing_value", -111) cf.write(f, tmpfile) g = cf.read(tmpfile)[0] self.assertEqual(numpy.ma.count(g.data.array), N - 2) g = cf.read(tmpfile, mask=False)[0] self.assertEqual(numpy.ma.count(g.data.array), N) g.apply_masking(inplace=True) self.assertEqual(numpy.ma.count(g.data.array), N - 2)
def test_write_reference_datetime(self): if self.test_only and inspect.stack()[0][3] not in self.test_only: return for reference_datetime in ('1751-2-3', '1492-12-30'): for chunksize in self.chunk_sizes: cf.chunksize(chunksize) f = cf.read(self.filename)[0] t = cf.DimensionCoordinate( data=cf.Data([123], 'days since 1750-1-1') ) t.standard_name = 'time' axisT = f.set_construct(cf.DomainAxis(1)) f.set_construct(t, axes=[axisT]) cf.write(f, tmpfile, fmt='NETCDF4', reference_datetime=reference_datetime) g = cf.read(tmpfile)[0] t = g.dimension_coordinate('T') self.assertEqual( t.Units, cf.Units('days since ' + reference_datetime), ('Units written were ' + repr(t.Units.reftime) + ' not ' + repr(reference_datetime))) # --- End: for cf.chunksize(self.original_chunksize)
def test_read_write_string(self): f = cf.read(self.string_filename) n = int(len(f) / 2) for i in range(n): j = i + n self.assertTrue( f[i].data.equals(f[j].data, verbose=1), "{!r} {!r}".format(f[i], f[j]), ) self.assertTrue( f[j].data.equals(f[i].data, verbose=1), "{!r} {!r}".format(f[j], f[i]), ) # Note: Don't loop round all netCDF formats for better # performance. Just one netCDF3 and one netCDF4 format # is sufficient to test the functionality for string0 in (True, False): for fmt0 in ("NETCDF4", "NETCDF3_CLASSIC"): cf.write(f, tmpfile0, fmt=fmt0, string=string0) for string1 in (True, False): for fmt1 in ("NETCDF4", "NETCDF3_CLASSIC"): cf.write(f, tmpfile1, fmt=fmt1, string=string1) for i, j in zip(cf.read(tmpfile1), cf.read(tmpfile0)): self.assertTrue(i.equals(j, verbose=1))
def test_FieldList__mul__imul__(self): if self.test_only and inspect.stack()[0][3] not in self.test_only: return f = cf.FieldList() f = f * 4 self.assertEqual(len(f), 0) self.assertIsInstance(f, cf.FieldList) f = cf.FieldList() f *= 4 self.assertEqual(len(f), 0) self.assertIsInstance(f, cf.FieldList) f = cf.read(self.filename) f = f * 4 self.assertEqual(len(f), 4) self.assertIsInstance(f, cf.FieldList) f = cf.read(self.filename) f *= 4 self.assertEqual(len(f), 4) self.assertIsInstance(f, cf.FieldList) f = f * 2 self.assertEqual(len(f), 8) self.assertIsInstance(f, cf.FieldList) f *= 3 self.assertEqual(len(f), 24) self.assertIsInstance(f, cf.FieldList)
def test_read_aggregate(self): if self.test_only and inspect.stack()[0][3] not in self.test_only: return f = cf.read(self.filename, aggregate=True) f = cf.read(self.filename, aggregate=False) f = cf.read(self.filename, aggregate={})
def test_DSG_indexed_contiguous(self): if self.test_only and inspect.stack()[0][3] not in self.test_only: return f = cf.read(self.indexed_contiguous, verbose=0) self.assertEqual(len(f), 2) # Select the specific humidity field q = f.select('specific_humidity')[0] qa = q.data.array for n in range(qa.shape[0]): for m in range(qa.shape[1]): self.assertTrue( q._equals(qa.mask[n, m], self.b.mask[n, m]), str(n) + ' ' + str(m) + ' ' + str(qa[n, m]) + ' ' + str(self.b[n, m])) message = repr(qa - self.b) # ... +'\n'+repr(qa[2,0])+'\n'+repr(self.b[2, 0]) self.assertTrue(q._equals(qa, self.b), message) cf.write(f, self.tempfilename, verbose=0) g = cf.read(self.tempfilename, verbose=0) self.assertEqual(len(g), len(f)) for i in range(len(f)): self.assertTrue(g[i].equals(f[i], verbose=2))
def test_geometry_4(self): if self.test_only and inspect.stack()[0][3] not in self.test_only: return f = cf.read(self.geometry_4_file, verbose=0) self.assertEqual(len(f), 2, 'f = ' + repr(f)) for g in f: self.assertTrue(g.equals(g.copy(), verbose=2)) self.assertEqual(len(g.auxiliary_coordinates), 3) for axis in ('X', 'Y'): coord = g.construct('axis=' + axis) self.assertTrue(coord.has_node_count(), 'axis=' + axis) self.assertFalse(coord.has_part_node_count(), 'axis=' + axis) self.assertFalse(coord.has_interior_ring(), 'axis=' + axis) cf.write(f, self.tempfilename, Conventions='CF-' + VN, verbose=0) f2 = cf.read(self.tempfilename, verbose=0) self.assertEqual(len(f2), 2, 'f2 = ' + repr(f2)) for a, b in zip(f, f2): self.assertTrue(a.equals(b, verbose=2)) # Setting of node count properties coord = f[0].construct('axis=X') nc = coord.get_node_count() cf.write(f, self.tempfilename) nc.set_property('long_name', 'Node counts') cf.write(f, self.tempfilename, verbose=0) nc.nc_set_variable('new_var_name') cf.write(f, self.tempfilename, verbose=0)
def test_read_write_string(self): if self.test_only and inspect.stack()[0][3] not in self.test_only: return f = cf.read(self.string_filename) n = int(len(f) / 2) for i in range(0, n): j = i + n self.assertTrue( f[i].data.equals(f[j].data, verbose=1), "{!r} {!r}".format(f[i], f[j]), ) self.assertTrue( f[j].data.equals(f[i].data, verbose=1), "{!r} {!r}".format(f[j], f[i]), ) f0 = cf.read(self.string_filename) for string0 in (True, False): for fmt0 in ("NETCDF4", "NETCDF3_CLASSIC"): cf.write(f0, tmpfile0, fmt=fmt0, string=string0) for string1 in (True, False): for fmt1 in ("NETCDF4", "NETCDF3_CLASSIC"): cf.write(f0, tmpfile1, fmt=fmt1, string=string1) for i, j in zip(cf.read(tmpfile1), cf.read(tmpfile0)): self.assertTrue(i.equals(j, verbose=1))
def test_FieldList_append_extend(self): if self.test_only and inspect.stack()[0][3] not in self.test_only: return # Append f = cf.FieldList() f.append(cf.read(self.filename)[0]) self.assertEqual(len(f), 1) self.assertIsInstance(f, cf.FieldList) f.append(f[0].copy()) self.assertEqual(len(f), 2) self.assertIsInstance(f, cf.FieldList) f.append(f[0].copy()) self.assertEqual(len(f), 3) # Extend f = cf.FieldList() f.extend(cf.read(self.filename)) self.assertEqual(len(f), 1) self.assertIsInstance(f, cf.FieldList) f.extend(f.copy()) self.assertEqual(len(f), 2) self.assertIsInstance(f, cf.FieldList) f.extend(f.copy()) self.assertEqual(len(f), 4)
def test_read_write_format(self): if self.test_only and inspect.stack()[0][3] not in self.test_only: return for chunksize in self.chunk_sizes: cf.chunksize(chunksize) for fmt in ( "NETCDF3_CLASSIC", "NETCDF3_64BIT", "NETCDF3_64BIT_OFFSET", "NETCDF3_64BIT_DATA", "NETCDF4", "NETCDF4_CLASSIC", "CFA", ): # print (fmt, string) f = cf.read(self.filename)[0] f0 = f.copy() cf.write(f, tmpfile, fmt=fmt) g = cf.read(tmpfile, verbose=0) self.assertEqual(len(g), 1, "g = " + repr(g)) g0 = g[0] self.assertTrue( f0.equals(g0, verbose=1), "Bad read/write of format {!r}".format(fmt), )
def test_write_reference_datetime(self): if self.test_only and inspect.stack()[0][3] not in self.test_only: return for reference_datetime in ("1751-2-3", "1492-12-30"): for chunksize in self.chunk_sizes: cf.chunksize(chunksize) f = cf.read(self.filename)[0] t = cf.DimensionCoordinate( data=cf.Data([123], "days since 1750-1-1")) t.standard_name = "time" axisT = f.set_construct(cf.DomainAxis(1)) f.set_construct(t, axes=[axisT]) cf.write( f, tmpfile, fmt="NETCDF4", reference_datetime=reference_datetime, ) g = cf.read(tmpfile)[0] t = g.dimension_coordinate("T") self.assertEqual( t.Units, cf.Units("days since " + reference_datetime), ("Units written were " + repr(t.Units.reftime) + " not " + repr(reference_datetime)), ) # --- End: for cf.chunksize(self.original_chunksize)
def test_read_write_netCDF4_compress_shuffle(self): if self.test_only and inspect.stack()[0][3] not in self.test_only: return tmpfiles.append(tmpfile) for chunksize in self.chunk_sizes: cf.chunksize(chunksize) f = cf.read(self.filename)[0] for fmt in ("NETCDF4", "NETCDF4_CLASSIC", "CFA4"): for shuffle in (True, ): for compress in (1, ): # range(10): cf.write( f, tmpfile, fmt=fmt, compress=compress, shuffle=shuffle, ) g = cf.read(tmpfile)[0] self.assertTrue( f.equals(g, verbose=2), "Bad read/write with lossless compression: " "{0}, {1}, {2}".format(fmt, compress, shuffle), ) # --- End: for cf.chunksize(self.original_chunksize)
def test_PP_WGDOS_UNPACKING(self): f = cf.read(self.ppfilename)[0] self.assertTrue(f.minimum() > 221.71, 'Bad unpacking of WGDOS packed data') self.assertTrue(f.maximum() < 310.45, 'Bad unpacking of WGDOS packed data') array = f.array for chunksize in self.chunk_sizes: cf.chunksize(chunksize) f = cf.read(self.ppfilename)[0] for fmt in ('NETCDF4', 'CFA4'): # print (fmt) # f.dump() # print (repr(f.dtype)) # print (f._FillValue) # print (type(f._FillValue)) # f._FillValue = numpy.array(f._FillValue , dtype='float32') cf.write(f, tmpfile, fmt=fmt) g = cf.read(tmpfile)[0] self.assertTrue((f.array == array).all(), 'Bad unpacking of PP WGDOS packed data') self.assertTrue(f.equals(g, verbose=2), 'Bad writing/reading. fmt='+fmt) cf.chunksize(self.original_chunksize)
def test_DSG_indexed(self): f = cf.read(self.indexed) self.assertEqual(len(f), 2) # Select the specific humidity field q = [ g for g in f if g.get_property("standard_name") == "specific_humidity" ][0] self.assertTrue(q._equals(q.data.array.mask, self.a.mask)) self.assertTrue( q._equals(q.data.array, self.a), "\nself.a=\n" + str(self.a) + "\nq.array=\n" + str(q.array), ) cf.write(f, tmpfile, verbose=0) g = cf.read(tmpfile) self.assertEqual(len(g), len(f)) for i in range(len(f)): self.assertTrue(g[i].equals(f[i], verbose=2))
def test_DSG_indexed_contiguous(self): f = cf.read(self.indexed_contiguous, verbose=0) self.assertEqual(len(f), 2) # Select the specific humidity field q = f.select("specific_humidity")[0] qa = q.data.array for n in range(qa.shape[0]): for m in range(qa.shape[1]): self.assertTrue( q._equals(qa.mask[n, m], self.b.mask[n, m]), str(n) + " " + str(m) + " " + str(qa[n, m]) + " " + str(self.b[n, m]), ) message = repr(qa - self.b) # ... +'\n'+repr(qa[2,0])+'\n'+repr(self.b[2, 0]) self.assertTrue(q._equals(qa, self.b), message) cf.write(f, tmpfile, verbose=0) g = cf.read(tmpfile, verbose=0) self.assertEqual(len(g), len(f)) for i in range(len(f)): self.assertTrue(g[i].equals(f[i], verbose=2))
def test_read_select(self): if self.test_only and inspect.stack()[0][3] not in self.test_only: return # select on field list f = cf.read(self.filename, select="eastward_wind") g = cf.read(self.filename) self.assertTrue(f.equals(g, verbose=2), "Bad read with select keyword")
def test_write_datatype(self): if self.test_only and inspect.stack()[0][3] not in self.test_only: return tmpfiles.append(tmpfile) for chunksize in self.chunk_sizes: cf.chunksize(chunksize) f = cf.read(self.filename)[0] self.assertEqual(f.dtype, numpy.dtype(float)) cf.write( f, tmpfile, fmt="NETCDF4", datatype={numpy.dtype(float): numpy.dtype("float32")}, ) g = cf.read(tmpfile)[0] self.assertEqual( g.dtype, numpy.dtype("float32"), "datatype read in is " + str(g.dtype), ) cf.chunksize(self.original_chunksize) # Keyword single f = cf.read(self.filename)[0] self.assertEqual(f.dtype, numpy.dtype(float)) cf.write(f, tmpfile, fmt="NETCDF4", single=True) g = cf.read(tmpfile)[0] self.assertEqual( g.dtype, numpy.dtype("float32"), "datatype read in is " + str(g.dtype), ) tmpfiles.append(tmpfile2) # Keyword double f = g self.assertEqual(f.dtype, numpy.dtype("float32")) cf.write(f, tmpfile2, fmt="NETCDF4", double=True) g = cf.read(tmpfile2)[0] self.assertEqual(g.dtype, numpy.dtype(float), "datatype read in is " + str(g.dtype)) for single in (True, False): for double in (True, False): with self.assertRaises(Exception): _ = cf.write(g, double=double, single=single) # --- End: for datatype = {numpy.dtype(float): numpy.dtype("float32")} with self.assertRaises(Exception): _ = cf.write(g, datatype=datatype, single=True) with self.assertRaises(Exception): _ = cf.write(g, datatype=datatype, double=True)
def test_groups_compression(self): f = cf.example_field(4) ungrouped_file = ungrouped_file3 grouped_file = grouped_file3 f.compress('indexed_contiguous', inplace=True) f.data.get_count().nc_set_variable('count') f.data.get_index().nc_set_variable('index') cf.write(f, ungrouped_file, verbose=1) g = cf.read(ungrouped_file)[0] self.assertTrue(f.equals(g, verbose=2)) # ------------------------------------------------------------ # Move the field construct to the /forecast/model group # ------------------------------------------------------------ g.nc_set_variable_groups(['forecast', 'model']) # ------------------------------------------------------------ # Move the count variable to the /forecast group # ------------------------------------------------------------ g.data.get_count().nc_set_variable_groups(['forecast']) # ------------------------------------------------------------ # Move the index variable to the /forecast group # ------------------------------------------------------------ g.data.get_index().nc_set_variable_groups(['forecast']) # ------------------------------------------------------------ # Move the coordinates that span the element dimension to the # /forecast group # ------------------------------------------------------------ name = 'altitude' g.construct(name).nc_set_variable_groups(['forecast']) # ------------------------------------------------------------ # Move the sample dimension to the /forecast group # ------------------------------------------------------------ g.data.get_count().nc_set_sample_dimension_groups(['forecast']) cf.write(g, grouped_file, verbose=1) nc = netCDF4.Dataset(grouped_file, 'r') self.assertIn(f.nc_get_variable(), nc.groups['forecast'].groups['model'].variables) self.assertIn(f.data.get_count().nc_get_variable(), nc.groups['forecast'].variables) self.assertIn(f.data.get_index().nc_get_variable(), nc.groups['forecast'].variables) self.assertIn( f.construct('altitude').nc_get_variable(), nc.groups['forecast'].variables) nc.close() h = cf.read(grouped_file, verbose=1) self.assertEqual(len(h), 1, repr(h)) self.assertTrue(f.equals(h[0], verbose=2))
def test_read_squeeze(self): if self.test_only and inspect.stack()[0][3] not in self.test_only: return # select on field list f = cf.read(self.filename, squeeze=True) f = cf.read(self.filename, unsqueeze=True) with self.assertRaises(Exception): f = cf.read(self.filename, unsqueeze=True, squeeze=True)
def test_FieldList__contains__(self): if self.test_only and inspect.stack()[0][3] not in self.test_only: return f = cf.read(self.filename) f.append(f[0].copy()) f[1] *= 10 g = cf.read(self.filename)[0] * 10 self.assertIn(g, f) self.assertNotIn(34.6, f)
def test_FieldList_insert_pop_remove(self): if self.test_only and inspect.stack()[0][3] not in self.test_only: return # Insert f = cf.read(self.filename) g = f[0].copy() f.insert(0, g.copy()) self.assertEqual(len(f), 2) self.assertIsInstance(f, cf.FieldList) g = g + 10 f.insert(-1, g) self.assertEqual(len(f), 3) self.assertEqual(f[0].maximum(), (f[1].maximum() - 10)) self.assertIsInstance(f, cf.FieldList) # Pop f = cf.read(self.filename) g = f[0] h = f[0] + 10 f.append(h) z = f.pop(0) self.assertIs(z, g) self.assertEqual(len(f), 1) self.assertIsInstance(f, cf.FieldList) z = f.pop(-1) self.assertIs(z, h) self.assertEqual(len(f), 0) self.assertIsInstance(f, cf.FieldList) # Remove f = cf.read(self.filename) g = f[0].copy() g = g + 10 f.append(g) self.assertEqual(len(f), 2) f.remove(g) self.assertEqual(len(f), 1) self.assertIsInstance(f, cf.FieldList) with self.assertRaises(Exception): f.remove(f[0] * -99) f.remove(f[0].copy()) self.assertEqual(len(f), 0) self.assertIsInstance(f, cf.FieldList)
def test_read_pp(self): if self.test_only and inspect.stack()[0][3] not in self.test_only: return p = cf.read('wgdos_packed.pp')[0] p0 = cf.read('wgdos_packed.pp', um={'fmt': 'PP', 'endian': 'big', 'word_size': 4, 'version': 4.5, 'height_at_top_of_model': 23423.65})[0] self.assertTrue(p.equals(p0, verbose=2))
def test_read_pp(self): p = cf.read("wgdos_packed.pp")[0] p0 = cf.read( "wgdos_packed.pp", um={ "fmt": "PP", "endian": "big", "word_size": 4, "version": 4.5, "height_at_top_of_model": 23423.65, }, )[0] self.assertTrue(p.equals(p0, verbose=2))
def test_write_datatype(self): for chunksize in self.chunk_sizes: with cf.chunksize(chunksize): f = cf.read(self.filename)[0] self.assertEqual(f.dtype, numpy.dtype(float)) cf.write( f, tmpfile, fmt="NETCDF4", datatype={numpy.dtype(float): numpy.dtype("float32")}, ) g = cf.read(tmpfile)[0] self.assertEqual( g.dtype, numpy.dtype("float32"), "datatype read in is " + str(g.dtype), ) # Keyword single f = cf.read(self.filename)[0] self.assertEqual(f.dtype, numpy.dtype(float)) cf.write(f, tmpfile, fmt="NETCDF4", single=True) g = cf.read(tmpfile)[0] self.assertEqual( g.dtype, numpy.dtype("float32"), "datatype read in is " + str(g.dtype), ) # Keyword double f = g self.assertEqual(f.dtype, numpy.dtype("float32")) cf.write(f, tmpfile2, fmt="NETCDF4", double=True) g = cf.read(tmpfile2)[0] self.assertEqual( g.dtype, numpy.dtype(float), "datatype read in is " + str(g.dtype) ) for single in (True, False): for double in (True, False): with self.assertRaises(Exception): cf.write(g, double=double, single=single) datatype = {numpy.dtype(float): numpy.dtype("float32")} with self.assertRaises(Exception): cf.write(g, datatype=datatype, single=True) with self.assertRaises(Exception): cf.write(g, datatype=datatype, double=True)
def test_FieldList_index(self): if self.test_only and inspect.stack()[0][3] not in self.test_only: return f = cf.read(self.filename2)[0] a, b, c = [f[0], f[1:456], f[456:]] g = cf.FieldList([a, b, c]) self.assertEqual(g.index(a), 0) self.assertEqual(g.index(a, start=0), 0) self.assertEqual(g.index(a, stop=1), 0) self.assertEqual(g.index(a, stop=-2), 0) self.assertEqual(g.index(a, stop=2), 0) self.assertEqual(g.index(b), 1) self.assertEqual(g.index(b, start=0), 1) self.assertEqual(g.index(b, start=1, stop=2), 1) self.assertEqual(g.index(c), 2) self.assertEqual(g.index(c, start=0), 2) self.assertEqual(g.index(c, start=1), 2) self.assertEqual(g.index(c, start=2), 2) self.assertEqual(g.index(c, start=-1), 2) with self.assertRaises(Exception): _ = g.index(f) with self.assertRaises(Exception): _ = g.index(a, start=1)
def test_CellMeasure__repr__str__dump(self): f = cf.read(self.filename)[0] x = f.cell_measures("measure:area").value() _ = repr(x) _ = str(x) _ = x.dump(display=False)
def load_profiler_data(filename): f = cf.read(filename) temp = f.select('sea_water_temperature')[1] psal = f.select('sea_water_salinity')[1] depth = temp.coord('depth') time = temp.coord('time') lon = temp.coord('longitude') lat = temp.coord('latitude') f.close() return lon, lat, depth, time, temp.array, psal.array
def cfHandler(path): ''' Provides a view of a netcdf or pp file using the cf python interface ''' f,e=os.path.splitext(path) if e not in ['.pp','.nc']: raise ValueError('cfHandler cannot handle %s'%e) print 'opening [%s]'%path print path.__class__ ncf=cf.read(path) print 'opened %s'%path grids=[] results={'variables':[],'grids':grids,'properties':{}} ####### This next needs to be modified for k in ['tracking_id','history','input_file_format']: if hasattr(ncf,k): results['properties'][k]=getattr(ncf,k) # # now we need to find the CF fields in the file # print 'moving on' for v in ncf: detail={} for k,vv in [('name','name_in_file'), ('long_name','long_name'), ('units','units'), ('cell_methods','cell_methods')]: if hasattr(v,vv): detail[k]=getattr(v,vv) if e: stash={} for k,vv in [ ('item','stash_code'), ('model','source'), ('runid','runid')]: if hasattr(v,vv):stash[k]=getattr(v,vv)[0] if stash<>{}: detail['stash']=stash grid,time=cfExtentHandler(v) detail['size']=grid['size'] results['variables'].append(detail) if grid not in grids: grids.append(grid) if ftime is None: ftime=None elif time <> ftime: print ftime,time raise ValueError('Multiple time ranges in file') results['properties']['timeAxis']=time return results
def load_wmop_variable(datafile, variable, tindex, m): # Load variables and coordinates f = cf.read(datafile) field2plot = f.select(variable)[0] lon = field2plot[0].coord('lon').array lat = field2plot[0].coord('lat').array field2plot = field2plot.array[tindex] lon_ts, lat_ts = m(lon, lat) u = f.select('eastward_sea_surface_velocity')[0].array[tindex] v = f.select('northward_sea_surface_velocity')[0].array[tindex] lon = f.select('eastward_sea_surface_velocity')[0].coord('lon').array lat = f.select('eastward_sea_surface_velocity')[0].coord('lat').array llon, llat = np.meshgrid(lon, lat) lon_uv, lat_uv = m(llon, llat) print field2plot.shape return field2plot, lon_ts, lat_ts
def yield_cf_files(targets): """Yields CF files for further processing. :param str|sequence targets: Pointer(s) to file(s) and/or directorie(s). :returns: Generator yielding CF files. :rtype: generator """ for fpath in yield_files(targets): try: cf_files = cf.read(fpath, ignore_read_error=False, verbose=False, aggregate=False) except (IOError, OSError): logger.log_warning("Non netCDF file rejected: {}".format(fpath)) else: # Save the netCDF file name (from which we can extract the dataset version) for cf_file in cf_files: cf_file.fpath = fpath yield cf_files # ... close file to prevent a proliferation of open file handles cf.close_one_file()
def load_profiler_TS(filename): f = cf.read(filename) temp = f.select('sea_water_temperature')[1] psal = f.select('sea_water_salinity')[1] f.close() return temp.array, psal.array
__author__ = 'ctroupin' import numpy as np import cf #import netCDF4 as netcdf import matplotlib.pyplot as plt from mpl_toolkits.basemap import Basemap from matplotlib import colors import datetime, time, calendar datafile = 'http://thredds.socib.es/thredds/dodsC/research_vessel/ctd/socib_rv-scb_sbe9002/L1/2015/dep0011_socib-rv_scb-sbe9002_L1_2015-05-19.nc' f = cf.read(datafile) temp = 0.5*(f.select('sea_water_temperature')[0].array+ f.select('sea_water_temperature')[1].array) psal = 0.5*(f.select('sea_water_practical_salinity')[0].array+ f.select('sea_water_practical_salinity')[1].array) # Create the figure fig=plt.figure() ax = fig.add_subplot(111) # plt.clabel(cont,inline=True, fmt='%1.1f') # plt.xlim(smin, smax) # plt.ylim(tmin, tmax) plt.plot(psal, temp, 'k.', ms=1.5) plt.xlabel('Salinity', fontsize=20) plt.ylabel('Temperature', fontsize=20) plt.show() plt.close()
def readDataSet(self, fileName): '''Opens, reads, and stores attributes of filename into DataSet ''' self.DataSet = cf.read(fileName)
ax.add_artist(ab) # Compute min and max values sstmin, sstmax = 24.0, 28.0 normsst = colors.Normalize(vmin=sstmin, vmax=sstmax) boundsst = np.arange(sstmin, sstmax + 0.001, 1.0) # Load turtle data lonturtle1, latturtle1 = load_functions.load_turtle_coord(turtlefile1) lonturtle2, latturtle2 = load_functions.load_turtle_coord(turtlefile2) # Load model salinity if plotsalinity: f = cf.read(romsfile) field2plot = f.select(variable)[0] lon = field2plot[0].coord("lon").array lat = field2plot[0].coord("lat").array field2plot = field2plot.array[0].squeeze() lon_ts, lat_ts = np.meshgrid(lon, lat) # Load altimetry lon_alti, lat_alti, u, v = load_functions.load_altimetry_aviso_uv(altimetryfile, coordinates2) lon_alti2, lat_alti2, adt = load_functions.load_altimetry_aviso_adt(altimetryfile2, coordinates2) adt = adt - adt.mean() normalti = colors.Normalize(vmin=-0.15, vmax=0.15)
bounds = np.arange(vmin, vmax + .0001, dvar) coordinates = np.array((-1, 5.5001, 36.9, 41.5)) dlon, dlat = 1.0, 1.0 m = Basemap(projection='merc', llcrnrlon=coordinates[0], llcrnrlat=coordinates[2], urcrnrlon=coordinates[1], urcrnrlat=coordinates[3], lat_ts=0.5 * (coordinates[2] + coordinates[3]), resolution='f') findex = 0 landfile = "/data_local/Bathymetry/ne_10m_land" shp_info = m.readshapefile(landfile, 'scalerank', drawbounds=True) f = cf.read(drifterfile) lon_d = f.select('sea_water_temperature')[0].coord('lon').array lat_d = f.select('sea_water_temperature')[0].coord('lat').array f.close() lon_d = np.ma.masked_outside(lon_d, coordinates[0], coordinates[1], copy='True') print lon_d.min() print lon_d.max() lon_d, lat_d = m(lon_d, lat_d) def load_wmop_variable(datafile, variable, tindex, m): # Load variables and coordinates f = cf.read(datafile) field2plot = f.select(variable)[0] lon = field2plot[0].coord('lon').array
def test_2_cfread(self): """now test reading with cf-python """ nc2 = cf.read(self.__ofile__) print nc2