def test_wera_mask(): radial_file = data_path / "radials" / "ruv" / "WERA" / "RDL_csw_2019_10_24_162300.ruv" rad1 = Radial(radial_file, mask_over_land=False, replace_invalid=False) # Total points before masking assert len(rad1.data) == 6327 rad1.mask_over_land(subset=True) # Make sure we subset the land points assert len(rad1.data) == 5745
def test_codar_mask(): radial_file = data_path / "radials" / "ruv" / "SEAB" / "RDLi_SEAB_2019_01_01_0000.ruv" rad1 = Radial(radial_file, replace_invalid=False) # Total points before masking assert len(rad1.data) == 745 rad1.mask_over_land(subset=True) # Make sure we subset the land points assert len(rad1.data) == 592
def read_lluv_file(ifn): """Reads LLUV file using HFRadarPy toolbox""" if os.path.exists(ifn): r = Radial(ifn) else: print('File does not exist: ' + ifn) raise IOError('Error opening %s' % ifn) return r
def fix_empty_radial(r, table_type='LLUV RDL9'): """Fixes an empty radial (r) Radial object that be can be written out using r.to_ruv() to avoid an error when there is missing table information. return r (Radial object) for writing out empty LLUV files. Parameters ---------- r : Radial object read from LLUVMerger empty radial file table_type = 'LLUV RDL9' ensures correct file format used as template Returns ------- r: Radial object, fixed by using re._tables[1] """ if table_type == 'LLUV RDL9': formatfile = Path(__file__).parent.resolve( ) / 'file_formats' / 'radial_LLUV_RDL9.ruv' # formatfile = './test_empty_Radials_qcd/RDLi_HATY_2020_10_08_2000.ruv' re = Radial(formatfile, empty_radial=True) else: print('fix_empty_radial() : Unrecognized table_type "%s"' % (table_type, )) return r ## it would be easy if we knew for sure they would always be key==1 for both ## r._tables[1] = re._tables[1] ## r.data = re._tables[1]['data'] # all this to make sure index to r._tables and re._tables are getting 'LLUV' tables from both, # in case they are different keys # get all the table types r_tts = [ r._tables[key]['TableType'].split(' ')[0] for key in r._tables.keys() ] re_tts = [ re._tables[key]['TableType'].split(' ')[0] for key in re._tables.keys() ] # get list of keys -- r._tables is OrderedDict() and odict_keys are not subscriptable # so generate a list of keys r_keys = [key for key in r._tables.keys()] re_keys = [key for key in re._tables.keys()] idx = r_tts.index('LLUV') r_key = r_keys[idx] idx = re_tts.index('LLUV') re_key = re_keys[idx] # copy correctly formatted empty data table from re (Radial object) to r (Radial object) r._tables[r_key] = re._tables[re_key] r.data = re._tables[re_key]['data'] return r
def main(radial_file, save_dir, types=['tabular']): """ Main function to parse and qc radial files :param radial_file: Path to radial file :param save_path: Path to save quality controlled radial file """ save_dir = Path(save_dir) try: r = Radial(radial_file) except Exception: return if r.is_valid(): sname = save_dir / r.file_name try: for t in types: r.export(sname, 'netcdf-{}'.format(t), prepend_ext=True) except ValueError: pass
def setUp(self): self.file_paths = list( (data_path / "radials" / "ruv" / "SEAB").glob("*.ruv")) self.radial_files = [str(r) for r in self.file_paths] self.radial_objects = [Radial(str(r)) for r in self.radial_files] # Select even indexed file_paths and odd indexed radial objects # into one array of mixed content types for concating self.radial_mixed = self.radial_files[::2] + self.radial_objects[ 1:][::2]
def test_codar_radial_to_gridded_netcdf(): radial_file = data_path / "radials" / "ruv" / "SEAB" / "RDLi_SEAB_2019_01_01_0000.ruv" nc_file = output_path / "radials" / "nc" / "gridded" / "SEAB" / "RDLi_SEAB_2019_01_01_0000.nc" # Converts the underlying .data (natively a pandas DataFrame) # to an xarray object when `create_netcdf` is called. # This automatically 'enhances' the netCDF file # with better variable names and attributes. rad1 = Radial(radial_file) rad1.to_netcdf(str(nc_file), model="gridded") # Convert it to an xarray Dataset with no variable # or attribte enhancements xds2 = rad1.to_xarray("gridded", enhance=False) # Convert it to xarray Dataset with increased usability # by changing variables names, adding attributes, # and decoding the CF standards like scale_factor xds3 = rad1.to_xarray("gridded", enhance=True) with xr.open_dataset(nc_file) as xds1: # The two enhanced files should be identical assert xds1.identical(xds3) # Enhanced and non-enhanced files should not # be equal assert not xds1.identical(xds2)
def test_miami_radial_tabular_nc(): radial_file = data_path / "radials" / "ruv" / "WERA" / "RDL_UMiami_STF_2019_06_01_0000.hfrweralluv1.0" nc_file = output_path / "radials" / "nc" / "tabular" / "WERA" / "RDL_UMiami_STF_2019_06_01_0000.nc" # Converts the underlying .data (natively a pandas DataFrame) # to an xarray object when `create_netcdf` is called. # This automatically 'enhances' the netCDF file # with better variable names and attributes. rad1 = Radial(radial_file) rad1.to_netcdf(str(nc_file), model="tabular") # rad1.export(str(nc_file), file_type='netcdf', model='tabular') # Convert it to an xarray Dataset with no variable # or attribte enhancements xds2 = rad1.to_xarray("tabular", enhance=False) # xds2 = rad1.to_xarray_tabular(enhance=False) # Convert it to xarray Dataset with increased usability # by changing variables names, adding attributes, # and decoding the CF standards like scale_factor xds3 = rad1.to_xarray("tabular", enhance=True) # xds3 = rad1.to_xarray_tabular(enhance=True) with xr.open_dataset(nc_file) as xds1: # The two enhanced files should be identical assert xds1.identical(xds3) # Enhanced and non-enhanced files should not # be equal assert not xds1.identical(xds2)
def test_codar_qc(): radial_file = data_path / "radials" / "ruv" / "SEAB" / "RDLi_SEAB_2019_01_01_0100.ruv" radial_file_previous = data_path / "radials" / "ruv" / "SEAB" / "RDLi_SEAB_2019_01_01_0000.ruv" rad1 = Radial(radial_file, replace_invalid=False) rad1.initialize_qc() # assert len(rad1.data) == 733 # rad1.mask_over_land(subset=True) rad1.qc_qartod_radial_count() rad1.qc_qartod_valid_location() rad1.qc_qartod_maximum_velocity() rad1.qc_qartod_spatial_median() rad1.qc_qartod_temporal_gradient(radial_file_previous) rad1.qc_qartod_avg_radial_bearing(reference_bearing=180) rad1.qc_qartod_primary_flag() # assert len(rad1.data) == 587 assert "QC07" in rad1.data assert "QC08" in rad1.data assert "QC09" in rad1.data assert "QC10" in rad1.data assert "QC11" in rad1.data # temporal gradient test assert "QC12" in rad1.data assert "PRIM" in rad1.data
def test_wera_raw_to_quality_tabular_nc(): radial_file = data_path / "radials" / "ruv" / "WERA" / "RDL_csw_2019_10_24_162300.ruv" nc_file = output_path / "radials" / "qc" / "nc" / "tabular" / "WERA" / "RDL_csw_2019_10_24_162300.nc" rad1 = Radial(radial_file, replace_invalid=False) rad1.initialize_qc() # rad1.mask_over_land() rad1.qc_qartod_radial_count() rad1.qc_qartod_valid_location() rad1.qc_qartod_maximum_velocity() rad1.qc_qartod_spatial_median() rad1.to_netcdf(str(nc_file), model="tabular") xds2 = rad1.to_xarray("tabular", enhance=True) with xr.open_dataset(nc_file) as xds1: assert len(xds1.QCTest) == 3 # no VFLG column so one test not run # The two enhanced files should be identical assert xds1.identical(xds2)
def test_wera_qc(): radial_file = data_path / "radials" / "ruv" / "WERA" / "RDL_csw_2019_10_24_162300.ruv" rad1 = Radial(radial_file, replace_invalid=False) rad1.initialize_qc() # assert len(rad1.data) == 6327 # rad1.mask_over_land(subset=True) rad1.qc_qartod_radial_count() rad1.qc_qartod_valid_location() rad1.qc_qartod_maximum_velocity() rad1.qc_qartod_spatial_median() rad1.qc_qartod_avg_radial_bearing(reference_bearing=180) rad1.qc_qartod_primary_flag() # assert len(rad1.data) == 5745 assert "QC07" in rad1.data assert "QC08" not in rad1.data # no VFLG column so we can't run it assert "QC09" in rad1.data assert "QC10" in rad1.data # assert 'QC11' in rad1.data # temporal gradient test assert "QC12" in rad1.data assert "PRIM" in rad1.data
def generate_radialshort(r, table_type='LLUV RDL7', numdegrees=3, weight_parameter='MP'): """Generates radialshort (rsd) data array. This function generates radialshort data (rsd) array based on data from weighted average data (xd). It is the output CSV data table (middle) of CODAR LLUV format with header, middle, and footer. If xd is empty, return empty rsd for writing out empty LLUV files. Each LATD, LOND is computed using Vincenty's algorithm for destination along a BEAR (NCW) and a distance of RNGE (km) from point of origin (lat1,lon1). Vincenty's GC is great circle distance on an WGS-84 ellipsoid model between two points. This requires having the CODAR Site location, and range resolution from the header data or config data. Parameters ---------- xd : ndarray QC'd and weighted average radials for each range, bearing where data were found table_type : string Returns ------- rs: Radial object """ from qccodar.qcutils import weighted_velocities if table_type == 'LLUV RDL7': formatfile = Path(__file__).parent.resolve( ) / 'file_formats' / 'radialshort_LLUV_RDL7.ruv' rs = Radial(formatfile, empty_radial=True) else: print('generate_radialshort() : Unrecognized table_type "%s"' % (table_type, )) return numpy.array([]), '' # copy over the file information, header, name, tables rs.metadata = r.metadata # '% PatternMethod: 1 PatternVectors' should be added but I'm not sure how to # insert at a specific position in Ordered dictionary if hasattr(r, 'diagnostics_radial'): rs.diagnostics_radial = r.diagnostics_radial if hasattr(r, 'diagnostics_hardware'): rs.diagnostics_hardware = r.diagnostics_hardware # the range information table changes in the processing from radial metric to radial short # but I have simply copied over the metric version for now if hasattr(r, 'range_information'): rs.range_information = r.range_information # remove table in rs that is not in r r_tts = [ r._tables[key]['TableType'].split(' ')[0] for key in r._tables.keys() ] rs_tts = [ rs._tables[key]['TableType'].split(' ')[0] for key in rs._tables.keys() ] rs_keys = [key for key in rs._tables.keys()] to_be_removed = [tt for tt in rs_tts if tt not in r_tts] for tt in to_be_removed: idx = rs_tts.index(tt) rs._tables.pop(rs_keys[idx]) for key in rs._tables.keys(): table = rs._tables[key] if 'LLUV' in table['TableType']: rs._tables[key]['data'] = rs.data rs._tables[key]['TableRows'] = str(rs.data.shape[0]) elif 'rads' in table['TableType']: rs._tables[key]['data'] = rs.diagnostics_radial rs._tables[key]['TableRows'] = str(rs.diagnostics_radial.shape[0]) elif 'rcvr' in table['TableType']: rs._tables[key]['data'] = rs.diagnostics_hardware rs._tables[key]['TableRows'] = str( rs.diagnostics_hardware.shape[0]) elif 'RINF' in table['TableType']: rs._tables[key]['data'] = rs.range_information rs._tables[key]['TableRows'] = str(rs.range_information.shape[0]) #rs.file_path = r.file_path fn = r.file_name.replace('RDLv', 'RDLx') fn = fn.replace('RDLw', 'RDLy') rs.file_name = fn #rs.full_file = r.full_file rs.is_wera = False rs._iscorrupt = False rs.time = datetime.datetime( *[int(s) for s in r.metadata['TimeStamp'].split()]) origin = r.metadata['Origin'] lat1, lon1 = [float(x) for x in origin.split()] range_resolution = float(r.metadata['RangeResolutionKMeters']) if r.data.size == 0: return rs xd = weighted_velocities(r, numdegrees, weight_parameter) if xd.size == 0: return rs for key in rs._tables.keys(): table = rs._tables[key] if 'LLUV' in table['TableType']: rs._tables[key]['TableRows'] = str(xd.shape[0]) rs.data['VFLG'] = xd['VFLG'] rs.data['SPRC'] = xd['SPRC'] rs.data['BEAR'] = xd['BEAR'] rs.data['VELO'] = xd['VELO'] rs.data['ESPC'] = xd['ESPC'] rs.data['MAXV'] = xd['MAXV'] rs.data['MINV'] = xd['MINV'] rs.data['EDVC'] = xd['EDVC'] rs.data['ERSC'] = xd['ERSC'] ############################ # computations for filling in other columns of radialshort data ############################ # create HEAD column based on BEAR+180 rs.data['HEAD'] = numpy.mod(rs.data['BEAR'] + 180., 360.) # compute velocity components (rs.data['VELU'], rs.data['VELV']) = compass2uv(rs.data['VELO'], rs.data['HEAD']) rs.data['RNGE'] = range_resolution * rs.data['SPRC'] # # Vincenty Great Circle destination point (LATD, LOND) based on rnge, bear from site origin origin = geopy.Point(lat1, lon1) pts = numpy.array([ geopy.distance.geodesic(kilometers=r).destination(origin, b)[0:2] for (r, b) in zip(rs.data['RNGE'], rs.data['BEAR']) ]) rs.data['LATD'], rs.data['LOND'] = pts[:, 0], pts[:, 1] (rs.data['XDST'], rs.data['YDST']) = compass2uv(rs.data['RNGE'], rs.data['BEAR']) return rs
def plot_ruv(radial_file, save_path=None, fname=None, speed_display='color', redblue=True, plotflag=None, scale=50, vlims=(-100, 100)): """ Main function to plot radial files. Args: radial_file (str or Path): Path to radial file or a Radial object save_path (str or Path): Path to save figures fname (str): Output file name. If not specified, the radial object filename is used, Defaults to None speed_display (str, optional): 'color' or 'arrowlength' to specify whether current speed is depicted by color or arrow length, Defaults to color redblue (bool, optional): If True, colorbar scheme is redblue, Defaults to True plotflag (str, optional): QARTOD QC test code, fail and suspect flags for that test will be highlighted, Defaults to None scale (int, optional): Scaling factor for drawing the vectors, Default = 50 vlims (tuple, optional): Velocity limits for the colorbar, Default = (-100,100) """ if not isinstance(radial_file, Radial): r = Radial(radial_file) else: r = radial_file if not r.is_valid(): return if r._iscorrupt: return if fname == None: fname = r.file_name[0:-4] # Adjust some standard plotting settings to make them the size of a sheet of paper fig_size = plt.rcParams["figure.figsize"] fig_size[0] = 12 fig_size[1] = 8 plt.rcParams["figure.figsize"] = fig_size # Set colors of the land. edgecolor = 'black' landcolor = 'tan' LAND = cfeature.NaturalEarthFeature('physical', 'land', '10m', edgecolor='face', facecolor='tan') state_lines = cfeature.NaturalEarthFeature( category='cultural', name='admin_1_states_provinces_lines', scale='50m', facecolor='none') bf = 0.3 #degrees extent = [ r.data.LOND.min() - bf, r.data.LOND.max() + bf, r.data.LATD.min() - bf, r.data.LATD.max() + bf ] # Split out everything into seperate variables in order to pass them easier to the plotting functions time = r.time lon = r.data.LOND.to_numpy() lat = r.data.LATD.to_numpy() u = r.data.VELU.to_numpy() v = r.data.VELV.to_numpy() velocity = r.data.VELO.to_numpy() sitename = r.metadata['Site'][0:4] ptype = r.metadata['PatternType'] # Mask nans just in case there are any u = ma.masked_invalid(u) v = ma.masked_invalid(v) # convert U and V component velocities to angle and speed angle, speed = uv2spdir(u, v) # convert angle and speed right back back to U and V component velocities, # Passing speed as an array of 1's allows for the normalizing of the arrow sizes # if we pass the correct u, v = spdir2uv(np.ones_like(speed), angle, deg=True) # Get the receiver location receiver_location = [float(x) for x in r.metadata['Origin'].split(' ')] receiver_location.reverse() # Intialize an empty subplot using cartopy fig, ax = plt.subplots(figsize=(11, 8), subplot_kw=dict(projection=ccrs.Mercator())) #plt.quiver(lon, lat, u, v, transform=ccrs.PlateCarree()) plt.plot(receiver_location[0], receiver_location[1], 'o', markersize=10, markeredgecolor='black', color='red', transform=ccrs.PlateCarree()) map_features(ax, extent, LAND, edgecolor, landcolor, state_lines) # The next lines specify the arrow shapes. You can customize this to your preference, usually by trial and error. # scale = 50 headwidth = 2.5 headlength = 4 headaxislength = 4 sub = 1 # if user requested speed displayed as arrow length if speed_display == 'arrowlength': scale_units = 'width' width = 0.005 if not plotflag == None: fail = r.data[plotflag] == 4 suspect = r.data[plotflag] == 3 noteval = r.data[plotflag] == 2 away = r.data.VELO > 0 plt.quiver(lon, lat, u, v, transform=ccrs.PlateCarree(), scale=scale, scale_units=scale_units, width=width, color='lightpink') plt.quiver(lon[away], lat[away], u[away], v[away], transform=ccrs.PlateCarree(), scale=scale, scale_units=scale_units, width=width, color='lightblue') #plt.quiver(lon, lat, u, v, transform=ccrs.PlateCarree(), scale=scale, color='white') plt.quiver(lon[fail], lat[fail], u[fail], v[fail], transform=ccrs.PlateCarree(), scale=scale, scale_units=scale_units, width=width, color='red') plt.quiver(lon[suspect], lat[suspect], u[suspect], v[suspect], transform=ccrs.PlateCarree(), scale=scale, scale_units=scale_units, width=width, color='gold') plt.quiver(lon[noteval], lat[noteval], u[noteval], v[noteval], transform=ccrs.PlateCarree(), scale=scale, scale_units=scale_units, width=width, color='gray') plt.title( f'{sitename} {ptype} {plotflag}\nFail(red) Suspect(yellow) Not Evaluated(grey)\n{time}' ) plt.savefig(save_path + '/' + fname + '_' + plotflag) plt.close('all') elif redblue: away = r.data.VELO > 0 plt.quiver(lon, lat, u, v, transform=ccrs.PlateCarree(), scale=scale, scale_units=scale_units, width=width, color='red') plt.quiver(lon[away], lat[away], u[away], v[away], transform=ccrs.PlateCarree(), scale=scale, scale_units=scale_units, width=width, color='blue') plt.title(f'{sitename} {ptype}\n{time}') plt.savefig(save_path + '/' + fname + '_rb') plt.close('all') else: plt.quiver(lon, lat, u, v, transform=ccrs.PlateCarree(), scale=scale, scale_units=scale_units, width=width, color='wheat') plt.title(f'{sitename} {ptype}\n{time}') plt.savefig(save_path + '/' + fname) plt.close('all') # if user requested speed displayed as color else: if not plotflag == None: test = r.data[plotflag] velocity[np.where(test >= 0)] = 1 velocity[np.where(test == 4)] = -1 color_clipped = np.clip(r.data.VELO[::sub], -1, 1).squeeze() offset = TwoSlopeNorm(vmin=-1, vcenter=0, vmax=1) cmap = colors.ListedColormap(['red', 'wheat']) plt.title(f'{sitename} {ptype} {plotflag} Fail\n{time}') qargs = dict(cmap=cmap, scale=scale, headwidth=headwidth, headlength=headlength, headaxislength=headaxislength) qargs['transform'] = ccrs.PlateCarree() qargs['norm'] = offset # plot arrows over pcolor h = ax.quiver(lon[::sub], lat[::sub], u[::sub], v[::sub], color_clipped, **qargs) plt.savefig(save_path + '/' + fname + '_' + plotflag + '_fail') plt.close('all') elif redblue: cmap = 'bwr' # velocity_temp = velocity.where(velocity > 0, other=-1) # Going away from radar velocity[np.where(velocity < 0)] = -1 velocity[np.where(velocity >= 0)] = 1 # We will create temporary variable of velocities that sets any velocity less than 0 to 1 # color_clipped = velocity_temp.where(velocity < 0, other=1) # Going towards radar # Define arrow colors. Limited by velocity_min and velocity_max color_clipped = np.clip(r.data.VELO[::sub], -1, 1).squeeze() offset = TwoSlopeNorm(vmin=-1, vcenter=0, vmax=1) plt.title(f'{sitename} {ptype}\n{time}') qargs = dict(cmap=cmap, scale=scale, headwidth=headwidth, headlength=headlength, headaxislength=headaxislength) qargs['transform'] = ccrs.PlateCarree() qargs['norm'] = offset # plot arrows over pcolor h = ax.quiver(lon[::sub], lat[::sub], u[::sub], v[::sub], color_clipped, **qargs) # map_features(ax, extent, LAND, edgecolor, landcolor, state_lines) plt.savefig(save_path + '/' + fname + '_rb') plt.close('all') else: plt.title(f'{sitename} {ptype}\n{time}') cmap = cmocean.cm.balance # Colorbar options velocity_min = vlims[ 0] # The minimum speed that should be displayed on the colorbar velocity_max = vlims[ 1] # The maximum speed that should be displayed on the colorbar cbar_step = 10 # The step between each colorbar tick offset = Normalize(vmin=velocity_min, vmax=velocity_max, clip=True) # Define arrow colors. Limited by velocity_min and velocity_max color_clipped = np.clip(r.data.VELO[::sub], velocity_min, velocity_max).squeeze() ticks = np.append(np.arange(velocity_min, velocity_max, cbar_step), velocity_max) qargs = dict(cmap=cmap, scale=scale, headwidth=headwidth, headlength=headlength, headaxislength=headaxislength) qargs['transform'] = ccrs.PlateCarree() qargs['norm'] = offset # plot arrows over pcolor h = ax.quiver(lon[::sub], lat[::sub], u[::sub], v[::sub], color_clipped, **qargs) # map_features(ax, extent, LAND, edgecolor, landcolor, state_lines) # generate colorbar divider = make_axes_locatable(ax) cax = divider.new_horizontal(size='5%', pad=0.05, axes_class=plt.Axes) fig.add_axes(cax) cb = plt.colorbar(h, cax=cax, ticks=ticks) cb.ax.set_yticklabels([f'{s:d}' for s in ticks]) cb.set_label('cm/s') plt.savefig(save_path + '/' + fname) plt.close('all')