Ejemplo n.º 1
0
def test_model_AZFP():
    # Read in the dataset that will be used to confirm working conversions. Generated from MATLAB code.
    Sv_test = xr.open_dataset(azfp_test_Sv_path)
    TS_test = xr.open_dataset(azfp_test_TS_path)

    # Convert to .nc file
    tmp_convert = Convert(azfp_01a_path, azfp_xml_path)
    tmp_convert.raw2nc()

    tmp_echo = EchoData(tmp_convert.nc_path)
    tmp_echo.calibrate(save=True)
    tmp_echo.calibrate_TS(save=True)
    tmp_echo.get_MVBS()

    # TODO: atol=1e-3 is a large number, need to track down which part
    #  of the calculation contributes to this large discrepancy.
    # Test Sv data
    with xr.open_dataset(tmp_echo.Sv_path) as ds_Sv:
        assert np.allclose(Sv_test.Sv, ds_Sv.Sv, atol=1e-3)

    # Test TS data
    with xr.open_dataset(tmp_echo.TS_path) as ds_TS:
        assert np.allclose(TS_test.TS, ds_TS.TS, atol=1e-3)

    Sv_test.close()
    TS_test.close()
    os.remove(tmp_echo.Sv_path)
    os.remove(tmp_echo.TS_path)
    os.remove(tmp_convert.nc_path)
    del tmp_convert
    del tmp_echo
Ejemplo n.º 2
0
def get_processed_ek60(temp_file, clean_up=True):
    calibrated = temp_file.replace(".raw", "_Sv.nc")
    calibrated_cleaned = temp_file.replace(".raw", "_Sv_clean.nc")
    mvbs = temp_file.replace(".raw", "_MVBS.nc")

    data_tmp = ConvertEK60(temp_file)
    data_tmp.raw2nc()

    data = EchoData(temp_file.replace(".raw", ".nc"))

    # Calibration and echo-integration
    if os.path.exists(calibrated):
        os.unlink(calibrated)
    data.calibrate(save=True)

    # Denoising
    if os.path.exists(calibrated_cleaned):
        os.unlink(calibrated_cleaned)
    data.remove_noise(save=True)

    # Mean Volume Backscatter Strength
    if os.path.exists(mvbs):
        os.unlink(mvbs)
    data.get_MVBS(save=True)

    if os.path.exists(mvbs):
        if clean_up:
            print(
                f"{datetime.datetime.now().strftime('%H:%M:%S')}  cleaning up: {temp_file.replace('.raw', '.nc')}"
            )
            os.unlink(temp_file.replace(".raw", ".nc"))
            print(
                f"{datetime.datetime.now().strftime('%H:%M:%S')}  cleaning up: {temp_file.replace('.raw', '_Sv.nc')}"
            )
            os.unlink(temp_file.replace(".raw", "_Sv.nc"))
            print(
                f"{datetime.datetime.now().strftime('%H:%M:%S')}  cleaning up: {temp_file.replace('.raw', '_Sv_clean.nc')}"
            )
            os.unlink(temp_file.replace(".raw", "_Sv_clean.nc"))
        return mvbs
Ejemplo n.º 3
0
def test_model_AZFP():
    # Read in the dataset that will be used to confirm working conversions. Generated from MATLAB code.
    Sv_test = xr.open_dataset(azfp_test_Sv_path)
    TS_test = xr.open_dataset(azfp_test_TS_path)

    # Convert to .nc file
    tmp_convert = Convert(azfp_01a_path, azfp_xml_path)
    tmp_convert.raw2nc()

    tmp_echo = EchoData(tmp_convert.nc_path)
    tmp_echo.calibrate(save=True)
    tmp_echo.calibrate_TS(save=True)
    tmp_echo.get_MVBS()

    # Check setters
    tmp_echo.pressure = 10
    tmp_echo.salinity = 20
    tmp_echo.temperature = 12

    with xr.open_dataset(tmp_echo.Sv_path) as ds_Sv:
        assert np.allclose(Sv_test.Sv, ds_Sv.Sv, atol=1e-15)

    # Test TS data
    with xr.open_dataset(tmp_echo.TS_path) as ds_TS:
        assert np.allclose(TS_test.TS, ds_TS.TS, atol=1e-15)

    Sv_test.close()
    TS_test.close()
    os.remove(tmp_echo.Sv_path)
    os.remove(tmp_echo.TS_path)
    os.remove(tmp_convert.nc_path)
    del tmp_convert
    del tmp_echo
Ejemplo n.º 4
0
def azfp_calibrate(path):
        
    # Calibrate the data
    nc_filenames = glob.glob(path + '/*.nc')
    for filename in nc_filenames:
        print('calibrate ' + filename)
        data = EchoData(filename)
        data.calibrate()

        # Now we pass coeff for T=5, S=32, D=80 (should be improve in Echopype)
        if path.split('/')[-1] == '55139': 
            abs_coeff = data.calc_range().frequency*0+np.array([.009778, .019828, .030685, .042934])
        elif path.split('/')[-1] == '55140':
            abs_coeff = data.calc_range().frequency*0+np.array([.042934, .108859, .256768])

        
        data.ABS = 2*abs_coeff*data.calc_range()
        data.TVG = 20*np.log10(data.calc_range())

        data.remove_noise(noise_est_range_bin_size=5, noise_est_ping_size=20, save=True)  
        data.get_MVBS(source='Sv_clean',  MVBS_range_bin_size=5, MVBS_ping_size=12, save=True)  
Ejemplo n.º 5
0
def test_noise_estimates_removal():
    """Check noise estimation and noise removal using xarray and brute force using numpy.
    """

    # Noise estimation via EchoData method =========
    # Unpack data and convert to .nc file
    tmp = Convert(ek60_raw_path)
    tmp.raw2nc()

    # Read .nc file into an EchoData object and calibrate
    e_data = EchoData(nc_path)
    e_data.calibrate(save=True)
    noise_est = e_data.noise_estimates()

    with xr.open_dataset(ek60_test_path) as ds_test:
        ds_Sv = ds_test.Sv

    assert np.allclose(
        ds_Sv.values, e_data.Sv['Sv'].values,
        atol=1e-10)  # TODO: now identical to 1e-5 with matlab output
    # assert np.allclose(ds_TS.values, e_data.TS.TS.values, atol=1e-10)
    # Noise estimation via numpy brute force =======
    proc_data = xr.open_dataset(Sv_path)

    # Get tile indexing parameters
    e_data.noise_est_range_bin_size, range_bin_tile_bin_edge, ping_tile_bin_edge = \
        e_data.get_tile_params(r_data_sz=proc_data.range_bin.size,
                               p_data_sz=proc_data.ping_time.size,
                               r_tile_sz=e_data.noise_est_range_bin_size,
                               p_tile_sz=e_data.noise_est_ping_size,
                               sample_thickness=e_data.sample_thickness)

    range_bin_tile_bin_edge = np.unique(range_bin_tile_bin_edge)

    range_meter = e_data.range
    TVG = np.real(20 * np.log10(range_meter.where(range_meter >= 1, other=1)))
    ABS = 2 * e_data.seawater_absorption * range_meter
    power_cal_test = (10**((proc_data.Sv - ABS - TVG) / 10)).values

    num_ping_bins = ping_tile_bin_edge.size - 1
    num_range_bins = range_bin_tile_bin_edge.size - 1
    noise_est_tmp = np.empty(
        (proc_data.frequency.size, num_range_bins, num_ping_bins))  # all tiles
    noise_est_test = np.empty(
        (proc_data.frequency.size, num_ping_bins))  # all columns
    p_sz = e_data.noise_est_ping_size
    p_idx = np.arange(p_sz, dtype=int)
    r_sz = (e_data.noise_est_range_bin_size.max() /
            e_data.sample_thickness[0].values).astype(int).values
    r_idx = np.arange(r_sz, dtype=int)

    # Get noise estimates manually
    for f_seq in np.arange(proc_data.frequency.size):
        for p_seq in np.arange(num_ping_bins):
            for r_seq in np.arange(num_range_bins):
                if p_idx[-1] + p_sz * p_seq < power_cal_test.shape[1]:
                    pp_idx = p_idx + p_sz * p_seq
                else:
                    pp_idx = np.arange(p_sz * p_seq, power_cal_test.shape[1])
                if r_idx[-1] + r_sz * r_seq < power_cal_test.shape[2]:
                    rr_idx = r_idx + r_sz * r_seq
                else:
                    rr_idx = np.arange(r_sz * r_seq, power_cal_test.shape[2])
                nn = power_cal_test[f_seq, :, :][np.ix_(pp_idx, rr_idx)]
                noise_est_tmp[f_seq, r_seq, p_seq] = 10 * np.log10(nn.mean())
            noise_est_test[f_seq, p_seq] = noise_est_tmp[f_seq, :, p_seq].min()

    # Check xarray and numpy noise estimates
    assert np.all(np.isclose(noise_est_test, noise_est.noise_est.values))

    # Remove noise using .remove_noise()
    e_data.remove_noise()

    # Remove noise manually
    Sv_clean_test = np.empty(proc_data.Sv.shape)
    for ff, freq in enumerate(proc_data.frequency.values):
        for pp in np.arange(num_ping_bins):
            if pp == num_ping_bins - 1:  # if the last ping bin
                pp_idx = np.arange(p_sz * pp, power_cal_test.shape[1])
            else:  # all other ping bins
                pp_idx = p_idx + p_sz * pp
            ss_tmp = proc_data['Sv'].sel(
                frequency=freq).values[pp_idx, :]  # all data in this ping bin
            nn_tmp = (
                noise_est['noise_est'].sel(frequency=freq).isel(ping_time=pp) +
                ABS.sel(frequency=freq) + TVG.sel(frequency=freq)).values
            Sv_clean_tmp = ss_tmp.copy()
            Sv_clean_tmp[Sv_clean_tmp <= nn_tmp] = np.nan
            Sv_clean_test[ff, pp_idx, :] = Sv_clean_tmp

    # Check xarray and numpy noise removal
    assert ~np.any(
        e_data.Sv_clean['Sv'].values[~np.isnan(e_data.Sv_clean['Sv'].values)]
        != Sv_clean_test[~np.isnan(Sv_clean_test)])

    proc_data.close()
    del tmp
    del e_data
    os.remove(nc_path)
    os.remove(Sv_path)
Ejemplo n.º 6
0
filenames = glob.glob(path + '/*.01A')

# convert the data file-by-file
import echopype as ep
for filename in filenames:
    print('convert ' + filename)
    data_tmp = ep.convert.ConvertAZFP(filename, xml_path)
    data_tmp.raw2nc()


# Calibrate the data
nc_filenames = glob.glob(path + '/*.nc')
from echopype.model import EchoData
for filename in nc_filenames:
    print('calibrate ' + filename)
    data = EchoData(filename)
    data.calibrate()

    # Now we pass coeff for T=5, S=32, D=80 (should be improve in Echopype)
    abs_coeff = data.calc_range().frequency*0+np.array([.009778, .019828, .030685, .042934])
    data.ABS = 2*abs_coeff*data.calc_range()
    data.TVG = 20*np.log10(data.calc_range())
    
    data.remove_noise(noise_est_range_bin_size=5, noise_est_ping_size=20, save=True)  
    data.get_MVBS(source='Sv_clean',  MVBS_range_bin_size=5, MVBS_ping_size=12, save=True)  


keyboard


# Open entire dataset    
Ejemplo n.º 7
0
def test_noise_estimates_removal():
    """Check noise estimation and noise removal using xarray and brute force using numpy.
    """

    # Noise estimation via EchoData method =========
    # Unpack data and convert to .nc file
    tmp = Convert(ek60_raw_path)
    tmp.raw2nc()

    # Read .nc file into an EchoData object and calibrate
    e_data = EchoData(nc_path)
    e_data.calibrate(save=True)
    noise_est = e_data.noise_estimates()
    e_data.remove_noise()

    # Noise estimation via numpy brute force =======
    proc_data = xr.open_dataset(Sv_path)

    # Get tile indexing parameters
    e_data.noise_est_range_bin_size, add_idx, range_bin_tile_bin_edge = \
        e_data.get_tile_params(r_data_sz=proc_data.range_bin.size,
                               p_data_sz=proc_data.ping_time.size,
                               r_tile_sz=e_data.noise_est_range_bin_size,
                               p_tile_sz=e_data.noise_est_ping_size,
                               sample_thickness=e_data.sample_thickness)

    power_cal_test = (10 ** ((proc_data.Sv - e_data.ABS - e_data.TVG) / 10)).values

    num_ping_bins = np.unique(add_idx).size
    num_range_bins = range_bin_tile_bin_edge.size - 1
    noise_est_tmp = np.empty((proc_data.frequency.size, num_range_bins, num_ping_bins))  # all tiles
    noise_est_test = np.empty((proc_data.frequency.size, num_ping_bins))  # all columns
    p_sz = e_data.noise_est_ping_size
    p_idx = np.arange(p_sz, dtype=int)
    r_sz = (e_data.noise_est_range_bin_size.max() / e_data.sample_thickness[0].values).astype(int)
    r_idx = np.arange(r_sz, dtype=int)

    # Get noise estimates manually
    for f, f_seq in enumerate(np.arange(proc_data.frequency.size)):
        for p, p_seq in enumerate(np.arange(num_ping_bins)):
            for r, r_seq in enumerate(np.arange(num_range_bins)):
                if p_idx[-1] + p_sz * p_seq < power_cal_test.shape[1]:
                    pp_idx = p_idx + p_sz * p_seq
                else:
                    pp_idx = np.arange(p_sz * p_seq, power_cal_test.shape[1])
                if r_idx[-1] + r_sz * r_seq < power_cal_test.shape[2]:
                    rr_idx = r_idx + r_sz * r_seq
                else:
                    rr_idx = np.arange(r_sz * r_seq, power_cal_test.shape[2])
                nn = power_cal_test[f_seq, :, :][np.ix_(pp_idx, rr_idx)]
                noise_est_tmp[f_seq, r_seq, p_seq] = 10 * np.log10(nn.mean())
            noise_est_test[f_seq, p_seq] = noise_est_tmp[f_seq, :, p_seq].min()

    # Check xarray and numpy noise estimates
    assert np.all(np.isclose(noise_est_test, noise_est.noise_est.values))

    # Remove noise manually
    Sv_clean_test = np.empty(proc_data.Sv.shape)
    for f, f_seq in enumerate(np.arange(proc_data.frequency.size)):
        for p, p_seq in enumerate(np.arange(num_ping_bins)):
            if p_idx[-1] + p_sz * p_seq < power_cal_test.shape[1]:
                pp_idx = p_idx + p_sz * p_seq
            else:
                pp_idx = np.arange(p_sz * p_seq, power_cal_test.shape[1])
            ss_tmp = proc_data.Sv.values[f_seq, pp_idx, :]
            nn_tmp = (noise_est_test[f_seq, p_seq] +
                      e_data.ABS.isel(frequency=f_seq) + e_data.TVG.isel(frequency=f_seq)).values
            Sv_clean_tmp = ss_tmp.copy()
            Sv_clean_tmp[Sv_clean_tmp < nn_tmp] = np.nan
            Sv_clean_test[f_seq, pp_idx, :] = Sv_clean_tmp

    # Check xarray and numpy noise removal
    assert ~np.any(e_data.Sv_clean.Sv_clean.values[~np.isnan(e_data.Sv_clean.Sv_clean.values)]
                   != Sv_clean_test[~np.isnan(Sv_clean_test)])  

    proc_data.close()
    del tmp
    del e_data
    os.remove(nc_path)
    os.remove(Sv_path)