Beispiel #1
0
def test_convert_AZFP():
    # Read in the dataset that will be used to confirm working conversions. Generated from MATLAB code
    ds_test = xr.open_dataset(azfp_test_path)

    # Unpacking data
    # tmp = ConvertAZFP(azfp_01a_path, azfp_xml_path)
    # tmp.parse_raw()
    tmp = Convert(azfp_01a_path, azfp_xml_path)
    tmp.raw2nc()

    # Test beam group
    with xr.open_dataset(tmp.nc_path, group='Beam') as ds_beam:
        # Test frequency
        assert np.array_equal(ds_test.frequency, ds_beam.frequency)
        # Test sea absorption
        # assert np.array_equal(ds_test.sea_abs, ds_beam.sea_abs)
        # Test ping time
        assert np.array_equal(ds_test.ping_time, ds_beam.ping_time)
        # Test tilt x and y
        assert np.array_equal(ds_test.tilt_x, ds_beam.tilt_x)
        assert np.array_equal(ds_test.tilt_y, ds_beam.tilt_y)
        # Test backscatter_r
        assert np.array_equal(ds_test.backscatter, ds_beam.backscatter_r)

    # Test environment group
    with xr.open_dataset(tmp.nc_path, group='Environment') as ds_env:
        # Test temperature
        assert np.array_equal(ds_test.temperature, ds_env.temperature)
        # Test sound speed. 1 value is used because sound speed is the same across frequencies
        # assert ds_test.sound_speed == ds_env.sound_speed_indicative.values[0]

    ds_test.close()
    os.remove(tmp.nc_path)
    del tmp
Beispiel #2
0
def test_model_AZFP():
    # Read in the dataset that will be used to confirm working conversions. Generated from MATLAB code.
    Sv_test = xr.open_dataset(azfp_test_Sv_path)
    TS_test = xr.open_dataset(azfp_test_TS_path)

    # Convert to .nc file
    tmp_convert = Convert(azfp_01a_path, azfp_xml_path)
    tmp_convert.raw2nc()

    tmp_echo = EchoData(tmp_convert.nc_path)
    tmp_echo.calibrate(save=True)
    tmp_echo.calibrate_TS(save=True)
    tmp_echo.get_MVBS()

    # Check setters
    tmp_echo.pressure = 10
    tmp_echo.salinity = 20
    tmp_echo.temperature = 12

    with xr.open_dataset(tmp_echo.Sv_path) as ds_Sv:
        assert np.allclose(Sv_test.Sv, ds_Sv.Sv, atol=1e-15)

    # Test TS data
    with xr.open_dataset(tmp_echo.TS_path) as ds_TS:
        assert np.allclose(TS_test.TS, ds_TS.TS, atol=1e-15)

    Sv_test.close()
    TS_test.close()
    os.remove(tmp_echo.Sv_path)
    os.remove(tmp_echo.TS_path)
    os.remove(tmp_convert.nc_path)
    del tmp_convert
    del tmp_echo
Beispiel #3
0
def test_model_AZFP():
    # Read in the dataset that will be used to confirm working conversions. Generated from MATLAB code.
    Sv_test = xr.open_dataset(azfp_test_Sv_path)
    TS_test = xr.open_dataset(azfp_test_TS_path)

    # Convert to .nc file
    tmp_convert = Convert(azfp_01a_path, azfp_xml_path)
    tmp_convert.raw2nc()

    tmp_echo = EchoData(tmp_convert.nc_path)
    tmp_echo.calibrate(save=True)
    tmp_echo.calibrate_TS(save=True)
    tmp_echo.get_MVBS()

    # TODO: atol=1e-3 is a large number, need to track down which part
    #  of the calculation contributes to this large discrepancy.
    # Test Sv data
    with xr.open_dataset(tmp_echo.Sv_path) as ds_Sv:
        assert np.allclose(Sv_test.Sv, ds_Sv.Sv, atol=1e-3)

    # Test TS data
    with xr.open_dataset(tmp_echo.TS_path) as ds_TS:
        assert np.allclose(TS_test.TS, ds_TS.TS, atol=1e-3)

    Sv_test.close()
    TS_test.close()
    os.remove(tmp_echo.Sv_path)
    os.remove(tmp_echo.TS_path)
    os.remove(tmp_convert.nc_path)
    del tmp_convert
    del tmp_echo
Beispiel #4
0
def test_2in1_ek80_conversion():
    file = Path(
        "./echopype/test_data/ek80/Green2.Survey2.FM.short.slow.-D20191004-T211557.raw"
    ).resolve()
    nc_path = file.parent.joinpath(file.stem + ".nc")
    tmp = Convert(str(file), model="EK80")
    tmp.raw2nc()
    del tmp
    nc_path.unlink()
Beispiel #5
0
def test_calibrate_ek80_cw():
    """Check noise estimation and noise removal using xarray and brute force using numpy.
    """
    ek80_raw_path = ek80_path.joinpath('D20190822-T161221.raw')
    # Unpack data and convert to .nc file
    tmp = Convert(ek80_raw_path, model="EK80")
    tmp.raw2nc()

    # Read .nc file into an Process object and calibrate
    e_data = Process(tmp.nc_path)
    e_data.calibrate(save=True)
    os.remove(e_data.Sv_path)
def process_azfp(site, data_directory, xml_file, output_directory, dates,
                 tilt_correction):
    """
    Use echopype to convert and process the ASL AZFP bio-acoustic sonar data
    (in *.01A files) to generate echograms for use by the community

    :param site:
    :param data_directory:
    :param xml_file:
    :param output_directory:
    :param dates:
    :param tilt_correction:
    :return data:
    """
    # generate a list of data files given the input dates
    file_list = azfp_file_list(data_directory, dates)

    # reset the file_list to a single index
    file_list = [file for sub in file_list for file in sub]
    if not file_list:
        # if there are no files to process, exit cleanly
        return None

    # make sure the data output directory exists
    output_directory = os.path.join(output_directory,
                                    dates[0] + '-' + dates[1])
    if not os.path.isdir(output_directory):
        os.mkdir(output_directory)

    # convert the list of .01A files using echopype and save the output as NetCDF files
    dc = Convert(file_list, xml_file)
    dc.platform_name = site  # OOI site name
    dc.platform_type = 'Mooring'  # ICES platform type
    dc.platform_code_ICES = '48'  # ICES code: tethered collection of instruments at a fixed location that may
    # include seafloor, mid-water or surface components
    dc.raw2nc(save_path=output_directory)

    # process the data, calculating the volume acoustic backscatter strength and the vertical range
    echo = []
    nc_files = glob.glob(output_directory + '/[12]???????.nc')
    for nc in nc_files:
        tmp_echo = Process(nc)
        tmp_echo.calibrate()  # calculate Sv
        data = tmp_echo.Sv  # extract the Sv dataset
        echo.append(data.sortby('ping_time'))  # append to the echogram list

    # concatenate the data into a single dataset
    data = xr.concat(echo, dim='ping_time', join='outer')
    data = data.sortby(['frequency', 'ping_time'])
    data['frequency'] = data['frequency'].astype(np.float32)
    data['range_bin'] = data['range_bin'].astype(np.int32)
    data['range'] = data['range'].sel(ping_time=data.ping_time[0], drop=True)
    data = data.set_coords('range')

    if tilt_correction:
        range_correction(
            data, tilt_correction)  # apply a tilt correction, if applicable

    # pass the Sv data back for further processing
    return data
Beispiel #7
0
def test_process_AZFP_matlab():
    # Read in the dataset that will be used to confirm working conversions. Generated from MATLAB code.
    Sv_test = loadmat(
        str(azfp_path.joinpath('from_matlab/17082117_matlab_Output_Sv.mat')))
    TS_test = loadmat(
        str(azfp_path.joinpath('from_matlab/17082117_matlab_Output_TS.mat')))

    # Convert to .nc file
    tmp_convert = Convert(str(azfp_path.joinpath('17082117.01A')),
                          str(azfp_path.joinpath('17041823.XML')))
    tmp_convert.raw2nc()

    tmp_echo = Process(tmp_convert.nc_path,
                       salinity=27.9,
                       pressure=59,
                       temperature=None)
    tmp_echo.calibrate(save=True)
    tmp_echo.calibrate_TS(save=True)
    tmp_echo.remove_noise()
    tmp_echo.get_MVBS()

    # Tolerance lowered due to temperature not being averaged as is the case in the matlab code
    # Test Sv data
    def check_output(ds_base, ds_cmp, cal_type):
        for fidx in range(4):  # loop through all freq
            assert np.alltrue(
                ds_cmp.range.isel(frequency=fidx).values == ds_base['Output']
                [0]['Range'][fidx])
            assert np.allclose(ds_cmp[cal_type].isel(frequency=fidx).values,
                               ds_base['Output'][0][cal_type][fidx],
                               atol=1e-13,
                               rtol=0)

    # Check Sv
    check_output(ds_base=Sv_test, ds_cmp=tmp_echo.Sv, cal_type='Sv')

    # Check Sp
    check_output(ds_base=TS_test, ds_cmp=tmp_echo.TS, cal_type='TS')

    os.remove(tmp_echo.Sv_path)
    os.remove(tmp_echo.TS_path)
    del tmp_echo
    os.remove(tmp_convert.nc_path)
Beispiel #8
0
def test_convert_ek60():
    """Test converting """
    # Unpacking data
    # tmp = ConvertEK60(ek60_raw_path)
    # tmp.load_ek60_raw()

    # # Convert to .nc file
    # tmp.raw2nc()
    tmp = Convert(ek60_raw_path)

    # Test saving zarr file
    tmp.raw2zarr()
    shutil.rmtree(tmp.zarr_path, ignore_errors=True)  # delete non-empty folder
    # consider alternative using os.walk() if have os-specific errors

    # Test saving nc file and perform checks
    tmp.raw2nc()

    # Read .nc file into an xarray DataArray
    ds_beam = xr.open_dataset(tmp.nc_path, group='Beam')

    # Check if backscatter data from all channels are identical to those directly unpacked
    for idx in tmp.config_datagram['transceivers'].keys():
        # idx is channel index assigned by instrument, starting from 1
        assert np.any(tmp.power_dict_split[0][idx - 1, :, :]
                      ==  # idx-1 because power_dict_split[0] has a numpy array
                      ds_beam.backscatter_r.sel(
                          frequency=tmp.config_datagram['transceivers'][idx]
                          ['frequency']).data)
    ds_beam.close()
    os.remove(tmp.nc_path)
    del tmp
Beispiel #9
0
def test_calibration_ek60_echoview():
    ek60_raw_path = str(ek60_path.joinpath(
        'DY1801_EK60-D20180211-T164025.raw'))  # constant range_bin
    ek60_echoview_path = ek60_path.joinpath('from_echoview')

    tmp = Convert(ek60_raw_path)
    tmp.raw2nc(overwrite=True)

    # Read .nc file into an Process object and calibrate
    e_data = Process(tmp.nc_path)
    e_data.calibrate(save=True)

    # Compare with EchoView outputs
    channels = []
    for freq in [18, 38, 70, 120, 200]:
        fname = str(
            ek60_echoview_path.joinpath(
                'DY1801_EK60-D20180211-T164025-Sv%d.csv' % freq))
        channels.append(
            pd.read_csv(fname, header=None, skiprows=[0]).iloc[:, 13:])
    test_Sv = np.stack(channels)
    # Echoview data is missing 1 range. Also the first few ranges are handled differently
    assert np.allclose(test_Sv[:, :, 7:], e_data.Sv.Sv[:, :10, 8:], atol=1e-8)
Beispiel #10
0
def test_convert_ek60():
    """Test converting """
    # Unpacking data
    # tmp = ConvertEK60(ek60_raw_path)
    # tmp.load_ek60_raw()

    # # Convert to .nc file
    # tmp.raw2nc()
    tmp = Convert(ek60_raw_path)
    tmp.raw2nc()

    # Read .nc file into an xarray DataArray
    ds_beam = xr.open_dataset(tmp.nc_path, group='Beam')

    # Check if backscatter data from all channels are identical to those directly unpacked
    for idx in tmp.config_datagram['transceivers'].keys():
        # idx is channel index starting from 0
        assert np.any(
            tmp.power_dict_split[0][idx] == ds_beam.backscatter_r.sel(
                frequency=tmp.config_datagram['transceivers'][idx]
                ['frequency']).data)
    ds_beam.close()
    os.remove(tmp.nc_path)
    del tmp
def process_ek60(site, data_directory, output_directory, dates,
                 tilt_correction):
    """

    :param site:
    :param data_directory:
    :param output_directory:
    :param dates:
    :param tilt_correction:
    :return data:
    """
    # generate a list of data files given the input dates
    file_list = ek60_file_list(data_directory, dates)

    # reset the file_list to a single index
    file_list = [file for sub in file_list for file in sub]
    if not file_list:
        # if there are no files to process, exit cleanly
        return None

    # make sure the data output directory exists
    output_directory = os.path.join(output_directory,
                                    dates[0] + '-' + dates[1])
    if not os.path.isdir(output_directory):
        os.mkdir(output_directory)

    # convert the list of .raw files using echopype and save the output as NetCDF files
    dc = Convert(file_list)
    dc.platform_name = site  # OOI site name
    if site == 'CE02SHBP':
        dc.platform_type = 'Fixed Benthic Node'  # ICES platform type
        dc.platform_code_ICES = '11'  # ICES code
    else:
        dc.platform_type = 'Mooring'  # ICES platform type
        dc.platform_code_ICES = '48'  # ICES code: tethered collection of instruments at a fixed location that may
        # include seafloor, mid-water or surface components
    dc.raw2nc(save_path=output_directory)

    # process the data, calculating the volume acoustic backscatter strength and the vertical range
    echo = []
    sample_thickness = []
    tvg_correction_factor = []
    nc_files = glob.glob(output_directory + '/*OOI-D*.nc')
    for nc in nc_files:
        tmp_echo = Process(nc)
        tmp_echo.calibrate()  # calculate Sv
        data = tmp_echo.Sv  # extract the Sv dataset
        echo.append(data.sortby('ping_time'))  # append to the echogram list
        sample_thickness.append(tmp_echo.sample_thickness.values)
        tvg_correction_factor.append(tmp_echo.tvg_correction_factor)

    # concatenate the data into a single dataset
    data = xr.concat(echo, dim='ping_time', join='outer')
    data = data.sortby(['frequency', 'ping_time'])
    data['range_bin'] = data['range_bin'].astype(np.int32)
    data['range'] = data['range'].sel(ping_time=data.ping_time[0], drop=True)
    data = data.set_coords('range')

    # recalculate the range to deal with some discrepancies caused by the xarray concat
    thickness = np.max(np.array(sample_thickness), 0)
    correction_factor = np.max(tvg_correction_factor)
    range_meter = calc_range(data, thickness, correction_factor)
    data['range'] = data['range'].fillna(range_meter)

    if tilt_correction:
        range_correction(
            data, tilt_correction)  # apply a tilt correction, if applicable

    # pass the Sv data back for further processing
    return data
Beispiel #12
0
def test_noise_estimates_removal():
    """Check noise estimation and noise removal using xarray and brute force using numpy.
    """

    # Noise estimation via EchoData method =========
    # Unpack data and convert to .nc file
    tmp = Convert(ek60_raw_path)
    tmp.raw2nc()

    # Read .nc file into an EchoData object and calibrate
    e_data = EchoData(nc_path)
    e_data.calibrate(save=True)
    noise_est = e_data.noise_estimates()

    with xr.open_dataset(ek60_test_path) as ds_test:
        ds_Sv = ds_test.Sv

    assert np.allclose(
        ds_Sv.values, e_data.Sv['Sv'].values,
        atol=1e-10)  # TODO: now identical to 1e-5 with matlab output
    # assert np.allclose(ds_TS.values, e_data.TS.TS.values, atol=1e-10)
    # Noise estimation via numpy brute force =======
    proc_data = xr.open_dataset(Sv_path)

    # Get tile indexing parameters
    e_data.noise_est_range_bin_size, range_bin_tile_bin_edge, ping_tile_bin_edge = \
        e_data.get_tile_params(r_data_sz=proc_data.range_bin.size,
                               p_data_sz=proc_data.ping_time.size,
                               r_tile_sz=e_data.noise_est_range_bin_size,
                               p_tile_sz=e_data.noise_est_ping_size,
                               sample_thickness=e_data.sample_thickness)

    range_bin_tile_bin_edge = np.unique(range_bin_tile_bin_edge)

    range_meter = e_data.range
    TVG = np.real(20 * np.log10(range_meter.where(range_meter >= 1, other=1)))
    ABS = 2 * e_data.seawater_absorption * range_meter
    power_cal_test = (10**((proc_data.Sv - ABS - TVG) / 10)).values

    num_ping_bins = ping_tile_bin_edge.size - 1
    num_range_bins = range_bin_tile_bin_edge.size - 1
    noise_est_tmp = np.empty(
        (proc_data.frequency.size, num_range_bins, num_ping_bins))  # all tiles
    noise_est_test = np.empty(
        (proc_data.frequency.size, num_ping_bins))  # all columns
    p_sz = e_data.noise_est_ping_size
    p_idx = np.arange(p_sz, dtype=int)
    r_sz = (e_data.noise_est_range_bin_size.max() /
            e_data.sample_thickness[0].values).astype(int).values
    r_idx = np.arange(r_sz, dtype=int)

    # Get noise estimates manually
    for f_seq in np.arange(proc_data.frequency.size):
        for p_seq in np.arange(num_ping_bins):
            for r_seq in np.arange(num_range_bins):
                if p_idx[-1] + p_sz * p_seq < power_cal_test.shape[1]:
                    pp_idx = p_idx + p_sz * p_seq
                else:
                    pp_idx = np.arange(p_sz * p_seq, power_cal_test.shape[1])
                if r_idx[-1] + r_sz * r_seq < power_cal_test.shape[2]:
                    rr_idx = r_idx + r_sz * r_seq
                else:
                    rr_idx = np.arange(r_sz * r_seq, power_cal_test.shape[2])
                nn = power_cal_test[f_seq, :, :][np.ix_(pp_idx, rr_idx)]
                noise_est_tmp[f_seq, r_seq, p_seq] = 10 * np.log10(nn.mean())
            noise_est_test[f_seq, p_seq] = noise_est_tmp[f_seq, :, p_seq].min()

    # Check xarray and numpy noise estimates
    assert np.all(np.isclose(noise_est_test, noise_est.noise_est.values))

    # Remove noise using .remove_noise()
    e_data.remove_noise()

    # Remove noise manually
    Sv_clean_test = np.empty(proc_data.Sv.shape)
    for ff, freq in enumerate(proc_data.frequency.values):
        for pp in np.arange(num_ping_bins):
            if pp == num_ping_bins - 1:  # if the last ping bin
                pp_idx = np.arange(p_sz * pp, power_cal_test.shape[1])
            else:  # all other ping bins
                pp_idx = p_idx + p_sz * pp
            ss_tmp = proc_data['Sv'].sel(
                frequency=freq).values[pp_idx, :]  # all data in this ping bin
            nn_tmp = (
                noise_est['noise_est'].sel(frequency=freq).isel(ping_time=pp) +
                ABS.sel(frequency=freq) + TVG.sel(frequency=freq)).values
            Sv_clean_tmp = ss_tmp.copy()
            Sv_clean_tmp[Sv_clean_tmp <= nn_tmp] = np.nan
            Sv_clean_test[ff, pp_idx, :] = Sv_clean_tmp

    # Check xarray and numpy noise removal
    assert ~np.any(
        e_data.Sv_clean['Sv'].values[~np.isnan(e_data.Sv_clean['Sv'].values)]
        != Sv_clean_test[~np.isnan(Sv_clean_test)])

    proc_data.close()
    del tmp
    del e_data
    os.remove(nc_path)
    os.remove(Sv_path)
Beispiel #13
0
def test_noise_estimates_removal():
    """Check noise estimation and noise removal using xarray and brute force using numpy.
    """

    # Noise estimation via EchoData method =========
    # Unpack data and convert to .nc file
    tmp = Convert(ek60_raw_path)
    tmp.raw2nc()

    # Read .nc file into an EchoData object and calibrate
    e_data = EchoData(nc_path)
    e_data.calibrate(save=True)
    noise_est = e_data.noise_estimates()
    e_data.remove_noise()

    # Noise estimation via numpy brute force =======
    proc_data = xr.open_dataset(Sv_path)

    # Get tile indexing parameters
    e_data.noise_est_range_bin_size, add_idx, range_bin_tile_bin_edge = \
        e_data.get_tile_params(r_data_sz=proc_data.range_bin.size,
                               p_data_sz=proc_data.ping_time.size,
                               r_tile_sz=e_data.noise_est_range_bin_size,
                               p_tile_sz=e_data.noise_est_ping_size,
                               sample_thickness=e_data.sample_thickness)

    power_cal_test = (10 ** ((proc_data.Sv - e_data.ABS - e_data.TVG) / 10)).values

    num_ping_bins = np.unique(add_idx).size
    num_range_bins = range_bin_tile_bin_edge.size - 1
    noise_est_tmp = np.empty((proc_data.frequency.size, num_range_bins, num_ping_bins))  # all tiles
    noise_est_test = np.empty((proc_data.frequency.size, num_ping_bins))  # all columns
    p_sz = e_data.noise_est_ping_size
    p_idx = np.arange(p_sz, dtype=int)
    r_sz = (e_data.noise_est_range_bin_size.max() / e_data.sample_thickness[0].values).astype(int)
    r_idx = np.arange(r_sz, dtype=int)

    # Get noise estimates manually
    for f, f_seq in enumerate(np.arange(proc_data.frequency.size)):
        for p, p_seq in enumerate(np.arange(num_ping_bins)):
            for r, r_seq in enumerate(np.arange(num_range_bins)):
                if p_idx[-1] + p_sz * p_seq < power_cal_test.shape[1]:
                    pp_idx = p_idx + p_sz * p_seq
                else:
                    pp_idx = np.arange(p_sz * p_seq, power_cal_test.shape[1])
                if r_idx[-1] + r_sz * r_seq < power_cal_test.shape[2]:
                    rr_idx = r_idx + r_sz * r_seq
                else:
                    rr_idx = np.arange(r_sz * r_seq, power_cal_test.shape[2])
                nn = power_cal_test[f_seq, :, :][np.ix_(pp_idx, rr_idx)]
                noise_est_tmp[f_seq, r_seq, p_seq] = 10 * np.log10(nn.mean())
            noise_est_test[f_seq, p_seq] = noise_est_tmp[f_seq, :, p_seq].min()

    # Check xarray and numpy noise estimates
    assert np.all(np.isclose(noise_est_test, noise_est.noise_est.values))

    # Remove noise manually
    Sv_clean_test = np.empty(proc_data.Sv.shape)
    for f, f_seq in enumerate(np.arange(proc_data.frequency.size)):
        for p, p_seq in enumerate(np.arange(num_ping_bins)):
            if p_idx[-1] + p_sz * p_seq < power_cal_test.shape[1]:
                pp_idx = p_idx + p_sz * p_seq
            else:
                pp_idx = np.arange(p_sz * p_seq, power_cal_test.shape[1])
            ss_tmp = proc_data.Sv.values[f_seq, pp_idx, :]
            nn_tmp = (noise_est_test[f_seq, p_seq] +
                      e_data.ABS.isel(frequency=f_seq) + e_data.TVG.isel(frequency=f_seq)).values
            Sv_clean_tmp = ss_tmp.copy()
            Sv_clean_tmp[Sv_clean_tmp < nn_tmp] = np.nan
            Sv_clean_test[f_seq, pp_idx, :] = Sv_clean_tmp

    # Check xarray and numpy noise removal
    assert ~np.any(e_data.Sv_clean.Sv_clean.values[~np.isnan(e_data.Sv_clean.Sv_clean.values)]
                   != Sv_clean_test[~np.isnan(Sv_clean_test)])  

    proc_data.close()
    del tmp
    del e_data
    os.remove(nc_path)
    os.remove(Sv_path)