Esempio n. 1
0
def reject_err_data_2_dims(y, y_bad_beams, y_fill, r, sv):
    n_nan = np.sum(np.isnan(y))  # count nans in data
    y[y == y_fill] = np.nan  # replace fill_values by nans in data
    y[y == -9999] = np.nan  # replace -99999 by nans in data
    n_fv = np.sum(np.isnan(y)) - n_nan  # re-count nans in data
    y[y < -1e10] = np.nan  # replace extreme values by nans in data
    y[y > 1e10] = np.nan
    n_ev = np.sum(np.isnan(y)) - n_fv - n_nan  # re-count nans in data
    if type(
            y_bad_beams
    ) == dict:  # if it's a dictionary, it's actually the percent of good beams
        for k in list(y_bad_beams.keys()):
            y[y_bad_beams[k] < 75] = np.nan
    else:
        y[y_bad_beams > 25] = np.nan  # replace bad beams by nans in data
    n_bb = np.sum(np.isnan(y)) - n_ev - n_fv - n_nan  # re-count nans in data

    [g_min, g_max] = cf.get_global_ranges(r, sv)  # get global ranges
    if g_min is not None and g_max is not None:
        y[y < g_min] = np.nan  # replace extreme values by nans in data
        y[y > g_max] = np.nan
        n_grange = np.sum(
            np.isnan(y)) - n_bb - n_ev - n_fv - n_nan  # re-count nans in data
    else:
        n_grange = np.nan

    return y, n_nan, n_fv, n_ev, n_bb, n_grange, g_min, g_max
def index_dataset_2d(refdes, var_name, var_data, fv):
    [g_min, g_max] = cf.get_global_ranges(refdes, var_name)
    fdata = dict()
    n_nan = []
    n_fv = []
    n_grange = []
    for i in range(len(var_data)):
        vd = var_data[i]
        n_nani = np.sum(np.isnan(vd))

        # convert fill values to nans
        vd[vd == fv] = np.nan
        n_fvi = np.sum(np.isnan(vd)) - n_nani

        if g_min is not None and g_max is not None:
            vd[vd < g_min] = np.nan
            vd[vd > g_max] = np.nan
            n_grangei = np.sum(np.isnan(vd) - n_fvi - n_nani)
        else:
            n_grangei = 'no global ranges'

        fdata.update({i: vd})
        n_nan.append(int(n_nani))
        n_fv.append(int(n_fvi))
        n_grange.append(int(n_grangei))

    return [fdata, g_min, g_max, n_nan, n_fv, n_grange]
def reject_err_data_1_dims(y, y_fill, r, sv, n=None):
    n_nan = np.sum(np.isnan(y)) # count nans in data
    n_nan = n_nan.item()
    y = np.where(y != y_fill, y, np.nan) # replace fill_values by nans in data
    y = np.where(y != -9999, y, np.nan) # replace -9999 by nans in data
    n_fv = np.sum(np.isnan(y)) - n_nan# re-count nans in data
    n_fv = n_fv.item()
    y = np.where(y > -1e10, y, np.nan) # replace extreme values by nans in data
    y = np.where(y < 1e10, y, np.nan)
    n_ev = np.sum(np.isnan(y)) - n_fv - n_nan # re-count nans in data
    n_ev = n_ev.item()

    g_min, g_max = cf.get_global_ranges(r, sv) # get global ranges:
    if g_min and g_max:
        y = np.where(y >= g_min, y, np.nan) # replace extreme values by nans in data
        y = np.where(y <= g_max, y, np.nan)
        n_grange = np.sum(np.isnan(y)) - n_ev - n_fv - n_nan # re-count nans in data
        n_grange = n_grange.item()
    else:
        n_grange = np.nan

    stdev = np.nanstd(y)
    if stdev > 0.0:
        y = np.where(abs(y - np.nanmean(y)) < n * stdev, y, np.nan) # replace 5 STD by nans in data
        n_std = np.sum(np.isnan(y)) - ~np.isnan(n_grange) - n_ev - n_fv - n_nan # re-count nans in data
        n_std = n_std.item()

    err_count = pd.DataFrame({'n_nan':[n_nan],
                             'n_fv':[n_fv],
                             'n_ev':[n_ev],
                             'n_grange':[n_grange],
                             'g_min':[g_min],
                             'g_max':[g_max],
                             'n_std':[n_std]}, index=[0])
    return  y, err_count
def reject_err_data_1_dims(y, y_fill, r, sv, n=None):
    n_nan = np.sum(np.isnan(y))  # count nans in data
    n_nan = n_nan.item()
    y = np.where(y != y_fill, y, np.nan)  # replace fill_values by nans in data
    y = np.where(y != -9999, y, np.nan)  # replace -9999 by nans in data
    n_fv = np.sum(np.isnan(y)) - n_nan  # re-count nans in data
    n_fv = n_fv.item()
    y = np.where(y > -1e10, y, np.nan)  # replace extreme values by nans in data
    y = np.where(y < 1e10, y, np.nan)
    n_ev = np.sum(np.isnan(y)) - n_fv - n_nan  # re-count nans in data
    n_ev = n_ev.item()

    g_min, g_max = cf.get_global_ranges(r, sv)  # get global ranges:
    if g_min and g_max:
        y = np.where(y >= g_min, y, np.nan)  # replace extreme values by nans in data
        y = np.where(y <= g_max, y, np.nan)
        n_grange = np.sum(np.isnan(y)) - n_ev - n_fv - n_nan  # re-count nans in data
        n_grange = n_grange.item()
    else:
        n_grange = np.nan

    stdev = np.nanstd(y)
    if stdev > 0.0:
        y = np.where(abs(y - np.nanmean(y)) < n * stdev, y, np.nan) # replace 5 STD by nans in data
        n_std = np.sum(np.isnan(y)) - n_grange - n_ev - n_fv - n_nan # re-count nans in data
        n_std = n_std.item()
    else:
        n_std = 0

    return y, n_nan, n_fv, n_ev, n_grange, g_min, g_max, n_std
Esempio n. 5
0
def reject_erroneous_data(r, v, t, y, z, d, fz):
    """
    :param r: reference designator
    :param v: data parameter name
    :param t: time array
    :param y: pressure array
    :param z: data values
    :param d: deployment number
    :param fz: fill values defined in the data file
    :return: filtered data from fill values, NaNs, extreme values '|1e7|' and data outside global ranges
    """

    # reject fill values
    fv_ind = z != fz
    y_nofv = y[fv_ind]
    t_nofv = t[fv_ind]
    z_nofv = z[fv_ind]
    d_nofv = d[fv_ind]
    print(len(z) - len(z_nofv), ' fill values')

    # reject NaNs
    nan_ind = ~np.isnan(z_nofv)
    t_nofv_nonan = t_nofv[nan_ind]
    y_nofv_nonan = y_nofv[nan_ind]
    z_nofv_nonan = z_nofv[nan_ind]
    d_nofv_nonan = d_nofv[nan_ind]
    print(len(z_nofv) - len(z_nofv_nonan), ' NaNs')

    # reject extreme values
    ev_ind = cf.reject_extreme_values(z_nofv_nonan)
    t_nofv_nonan_noev = t_nofv_nonan[ev_ind]
    y_nofv_nonan_noev = y_nofv_nonan[ev_ind]
    z_nofv_nonan_noev = z_nofv_nonan[ev_ind]
    d_nofv_nonan_noev = d_nofv_nonan[ev_ind]
    print(
        len(z_nofv_nonan) - len(z_nofv_nonan_noev), ' Extreme Values', '|1e7|')

    # reject values outside global ranges:
    global_min, global_max = cf.get_global_ranges(r, v)
    if isinstance(global_min,
                  (int, float)) and isinstance(global_max, (int, float)):
        gr_ind = cf.reject_global_ranges(z_nofv_nonan_noev, global_min,
                                         global_max)
        dtime = t_nofv_nonan_noev[gr_ind]
        zpressure = y_nofv_nonan_noev[gr_ind]
        ndata = z_nofv_nonan_noev[gr_ind]
        ndeploy = d_nofv_nonan_noev[gr_ind]
    else:
        gr_ind = []
        dtime = t_nofv_nonan_noev
        zpressure = y_nofv_nonan_noev
        ndata = z_nofv_nonan_noev
        ndeploy = d_nofv_nonan_noev

    print('{} global ranges [{} - {}]'.format(
        len(ndata) - len(z_nofv_nonan_noev), global_min, global_max))

    return dtime, zpressure, ndata, ndeploy
Esempio n. 6
0
def add_global_ranges(ax, data):
    try:
        global_ranges = get_global_ranges(data['info']['platform'], data['info']['node'], data['info']['sensor'], data['info']['var'])
        ax.set_autoscale_on(False)
        g1 = plt.axhline(global_ranges[0], color='g', linestyle='--', label='Global $\min$')
        g2 = plt.axhline(global_ranges[1], color='g', linestyle='--', label='Global $\max$')
    except (IndexError, ValueError):
        print 'No global ranges exist for this reference designator yet.'
        global_ranges = [None, None]
    return global_ranges
def index_dataset(refdes, var_name, var_data, fv):
    n_nan = np.sum(np.isnan(var_data))
    n_fv = np.sum(var_data == fv)
    [g_min, g_max] = cf.get_global_ranges(refdes, var_name)
    if g_min is not None and g_max is not None:
        dataind = (~np.isnan(var_data)) & (var_data != fv) & (
            var_data >= g_min) & (var_data <= g_max)
        n_grange = np.sum((var_data < g_min) | (var_data > g_max))
    else:
        dataind = (~np.isnan(var_data)) & (var_data != fv)
        n_grange = 'no global ranges'

    return [dataind, g_min, g_max, n_nan, n_fv, n_grange]
Esempio n. 8
0
def index_dataset_2d(refdes, var_name, var_data, fv):
    [g_min, g_max] = cf.get_global_ranges(refdes, var_name)
    fdata = dict()
    for i in range(len(var_data)):
        vd = var_data[i]

        # convert fill values to nans
        vd[vd == fv] = np.nan

        if g_min is not None and g_max is not None:
            vd[vd < g_min] = np.nan
            vd[vd > g_max] = np.nan

        fdata.update({i: vd})

    return [fdata, g_min, g_max]
def reject_err_data(y, y_fill, r, sv):

    y = np.where(~np.isnan(y), y, np.nan)
    y = np.where(y != y_fill, y, np.nan)
    y = np.where(y > -1e10, y, np.nan)
    y = np.where(y < 1e10, y, np.nan)

    # reject values outside global ranges:
    global_min, global_max = cf.get_global_ranges(r, sv)
    print(sv, ': ', global_min, global_max)
    if global_min and global_max:
        y = np.where(y >= global_min, y, np.nan)
        y = np.where(y <= global_max, y, np.nan)

    stdev = np.nanstd(y)
    if stdev > 0.0:
        y = np.where(abs(y - np.nanmean(y)) < 5 * stdev, y, np.nan)

    return y
Esempio n. 10
0
def reject_err_data_2_dims(y, y_bad_beams, y_fill, r, sv):
    n_nan = np.sum(np.isnan(y))  # count nans in data
    y[y == y_fill] = np.nan  # replace fill_values by nans in data
    y[y == -9999] = np.nan  # replace -99999 by nans in data
    n_fv = np.sum(np.isnan(y)) - n_nan  # re-count nans in data
    y[y < -1e10] = np.nan  # replace extreme values by nans in data
    y[y > 1e10] = np.nan
    n_ev = np.sum(np.isnan(y)) - n_fv - n_nan  # re-count nans in data
    y[y_bad_beams > 25] = np.nan  # replace bad beams by nans in data
    n_bb = np.sum(np.isnan(y)) - n_ev - n_fv - n_nan  # re-count nans in data

    [g_min, g_max] = cf.get_global_ranges(r, sv)  # get global ranges
    if g_min is not None and g_max is not None:
        y[y < g_min] = np.nan  # replace extreme values by nans in data
        y[y > g_max] = np.nan
        n_grange = np.sum(
            np.isnan(y)) - n_bb - n_ev - n_fv - n_nan  # re-count nans in data
    else:
        n_grange = np.nan

    return y, n_nan, n_fv, n_ev, n_bb, n_grange, g_min, g_max
def main(sDir, url_list, start_time, end_time, deployment_num):
    rd_list = []
    for uu in url_list:
        elements = uu.split('/')[-2].split('-')
        rd = '-'.join((elements[1], elements[2], elements[3], elements[4]))
        if rd not in rd_list and 'OPTAA' in rd:
            rd_list.append(rd)

    for r in rd_list:
        print('\n{}'.format(r))
        datasets = []
        deployments = []
        for u in url_list:
            splitter = u.split('/')[-2].split('-')
            rd_check = '-'.join((splitter[1], splitter[2], splitter[3], splitter[4]))
            if rd_check == r:
                udatasets = cf.get_nc_urls([u])
                for ud in udatasets:  # filter out collocated data files
                    if 'OPTAA' in ud.split('/')[-1]:
                        datasets.append(ud)
                    if ud.split('/')[-1].split('_')[0] not in deployments:
                        deployments.append(ud.split('/')[-1].split('_')[0])
        deployments.sort()

        fdatasets = np.unique(datasets).tolist()
        for deploy in deployments:
            if deployment_num is not None:
                if int(deploy[-4:]) is not deployment_num:
                    print('\nskipping {}'.format(deploy))
                    continue

            rdatasets = [s for s in fdatasets if deploy in s]
            if len(rdatasets) > 0:
                sci_vars_dict = {'optical_absorption': dict(atts=dict(fv=[], units=[])),
                                 'beam_attenuation': dict(atts=dict(fv=[], units=[]))}
                for i in range(len(rdatasets)):
                #for i in range(0, 2):  ##### for testing
                    ds = xr.open_dataset(rdatasets[i], mask_and_scale=False)
                    ds = ds.swap_dims({'obs': 'time'})

                    if start_time is not None and end_time is not None:
                        ds = ds.sel(time=slice(start_time, end_time))
                        if len(ds['time'].values) == 0:
                            print('No data to plot for specified time range: ({} to {})'.format(start_time, end_time))
                            continue

                    if i == 0:
                        fname, subsite, refdes, method, stream, deployment = cf.nc_attributes(rdatasets[0])
                        array = subsite[0:2]
                        filename = '_'.join(fname.split('_')[:-1])
                        save_dir = os.path.join(sDir, array, subsite, refdes, 'timeseries_plots', deployment)
                        cf.create_dir(save_dir)

                    for k in sci_vars_dict.keys():
                        print('\nAppending data from {}: {}'.format(deploy, k))
                        vv = ds[k]
                        fv = vv._FillValue
                        vvunits = vv.units
                        if fv not in sci_vars_dict[k]['atts']['fv']:
                            sci_vars_dict[k]['atts']['fv'].append(fv)
                        if vvunits not in sci_vars_dict[k]['atts']['units']:
                            sci_vars_dict[k]['atts']['units'].append(vvunits)
                        if k == 'optical_absorption':
                            wavelengths = ds['wavelength_a'].values
                        elif k == 'beam_attenuation':
                            wavelengths = ds['wavelength_c'].values
                        for j in range(len(wavelengths)):
                            if (wavelengths[j] > 671.) and (wavelengths[j] < 679.):
                                wv = str(wavelengths[j])
                                try:
                                    sci_vars_dict[k][wv]
                                except KeyError:
                                    sci_vars_dict[k].update({wv: dict(values=np.array([]), time=np.array([], dtype=np.datetime64))})

                                v = vv.sel(wavelength=j).values
                                sci_vars_dict[k][wv]['values'] = np.append(sci_vars_dict[k][wv]['values'], v)
                                sci_vars_dict[k][wv]['time'] = np.append(sci_vars_dict[k][wv]['time'], ds['time'].values)

            title = ' '.join((deployment, refdes, method))

            colors = ['purple', 'green', 'orange']
            t0_array = np.array([], dtype=np.datetime64)
            t1_array = np.array([], dtype=np.datetime64)
            for var in sci_vars_dict.keys():
                print('Plotting {}'.format(var))
                plotting = []  # keep track if anything is plotted
                fig1, ax1 = plt.subplots()
                fig2, ax2 = plt.subplots()
                [g_min, g_max] = cf.get_global_ranges(r, var)
                for idk, dk in enumerate(sci_vars_dict[var]):
                    if dk != 'atts':
                        v = sci_vars_dict[var][dk]['values']
                        n_all = len(sci_vars_dict[var][dk]['values'])
                        n_nan = np.sum(np.isnan(v))

                        # convert fill values to nans
                        v[v == sci_vars_dict[var]['atts']['fv'][0]] = np.nan
                        n_fv = np.sum(np.isnan(v)) - n_nan

                        if n_nan + n_fv < n_all:
                            # plot before global ranges are removed
                            plotting.append('yes')
                            tm = sci_vars_dict[var][dk]['time']
                            t0_array = np.append(t0_array, tm.min())
                            t1_array = np.append(t1_array, tm.max())

                            ax1.scatter(tm, v, c=colors[idk - 1], label='{} nm'.format(dk), marker='.', s=1)

                            # reject data outside of global ranges
                            if g_min is not None and g_max is not None:
                                v[v < g_min] = np.nan
                                v[v > g_max] = np.nan
                                n_grange = np.sum(np.isnan(v)) - n_fv - n_nan
                            else:
                                n_grange = 'no global ranges'

                            # plot after global ranges are removed

                            ax2.scatter(tm, v, c=colors[idk - 1], label='{} nm: rm {} GR'.format(dk, n_grange),
                                        marker='.', s=1)

                if len(plotting) > 0:
                    t0 = pd.to_datetime(t0_array.min()).strftime('%Y-%m-%dT%H:%M:%S')
                    t1 = pd.to_datetime(t1_array.max()).strftime('%Y-%m-%dT%H:%M:%S')
                    ax1.grid()
                    pf.format_date_axis(ax1, fig1)
                    ax1.legend(loc='best', fontsize=7)
                    ax1.set_ylabel((var + " (" + sci_vars_dict[var]['atts']['units'][0] + ")"), fontsize=9)
                    ax1.set_title((title + '\n' + t0 + ' - ' + t1), fontsize=9)
                    sfile = '-'.join((filename, var, t0[:10]))
                    save_file = os.path.join(save_dir, sfile)
                    fig1.savefig(str(save_file), dpi=150)

                    ax2.grid()
                    pf.format_date_axis(ax2, fig2)
                    ax2.legend(loc='best', fontsize=7)
                    ax2.set_ylabel((var + " (" + vv.units + ")"), fontsize=9)
                    title_gr = 'GR: global ranges'
                    ax2.set_title((title + '\n' + t0 + ' - ' + t1 + '\n' + title_gr), fontsize=9)
                    sfile2 = '-'.join((filename, var, t0[:10], 'rmgr'))
                    save_file2 = os.path.join(save_dir, sfile2)
                    fig2.savefig(str(save_file2), dpi=150)

                plt.close('all')
Esempio n. 12
0
def main(sDir, url_list, start_time, end_time, preferred_only):
    rd_list = []
    for uu in url_list:
        elements = uu.split('/')[-2].split('-')
        rd = '-'.join((elements[1], elements[2], elements[3], elements[4]))
        if rd not in rd_list:
            rd_list.append(rd)

    for r in rd_list:
        print('\n{}'.format(r))
        datasets = []
        for u in url_list:
            splitter = u.split('/')[-2].split('-')
            rd_check = '-'.join((splitter[1], splitter[2], splitter[3], splitter[4]))
            if rd_check == r:
                udatasets = cf.get_nc_urls([u])
                datasets.append(udatasets)
        datasets = list(itertools.chain(*datasets))
        fdatasets = []
        if preferred_only == 'yes':
            # get the preferred stream information
            ps_df, n_streams = cf.get_preferred_stream_info(r)
            for index, row in ps_df.iterrows():
                for ii in range(n_streams):
                    rms = '-'.join((r, row[ii]))
                    for dd in datasets:
                        spl = dd.split('/')[-2].split('-')
                        catalog_rms = '-'.join((spl[1], spl[2], spl[3], spl[4], spl[5], spl[6]))
                        fdeploy = dd.split('/')[-1].split('_')[0]
                        if rms == catalog_rms and fdeploy == row['deployment']:
                            fdatasets.append(dd)
        else:
            fdatasets = datasets

        for fd in fdatasets:
            with xr.open_dataset(fd, mask_and_scale=False) as ds:
                ds = ds.swap_dims({'obs': 'time'})

                if start_time is not None and end_time is not None:
                    ds = ds.sel(time=slice(start_time, end_time))
                    if len(ds['time'].values) == 0:
                        print('No data to plot for specified time range: ({} to {})'.format(start_time, end_time))
                        continue

                fname, subsite, refdes, method, stream, deployment = cf.nc_attributes(fd)
                print('\nPlotting {} {}'.format(r, deployment))
                array = subsite[0:2]
                save_dir = os.path.join(sDir, array, subsite, refdes, 'ts_plots')
                cf.create_dir(save_dir)

                tme = ds['time'].values
                t0 = pd.to_datetime(tme.min()).strftime('%Y-%m-%dT%H:%M:%S')
                t1 = pd.to_datetime(tme.max()).strftime('%Y-%m-%dT%H:%M:%S')
                title = ' '.join((deployment, refdes, method))
                filename = '-'.join(('_'.join(fname.split('_')[:-1]), 'ts', t0[:10]))

                ds_vars = list(ds.data_vars.keys())
                raw_vars = cf.return_raw_vars(ds_vars)

                xvar = return_var(ds, raw_vars, 'salinity', 'Practical Salinity')
                sal = ds[xvar].values
                sal_fv = ds[xvar]._FillValue

                yvar = return_var(ds, raw_vars, 'temp', 'Seawater Temperature')
                temp = ds[yvar].values
                temp_fv = ds[yvar]._FillValue

                press = pf.pressure_var(ds, list(ds.coords.keys()))
                if press is None:
                    press = pf.pressure_var(ds, list(ds.data_vars.keys()))
                p = ds[press].values

                # get rid of nans, 0.0s, fill values
                sind1 = (~np.isnan(sal)) & (sal != 0.0) & (sal != sal_fv)
                sal = sal[sind1]
                temp = temp[sind1]
                tme = tme[sind1]
                p = p[sind1]
                tind1 = (~np.isnan(temp)) & (temp != 0.0) & (temp != temp_fv)
                sal = sal[tind1]
                temp = temp[tind1]
                tme = tme[tind1]
                p = p[tind1]

                # reject values outside global ranges:
                global_min, global_max = cf.get_global_ranges(r, xvar)
                if any(e is None for e in [global_min, global_max]):
                    sal = sal
                    temp = temp
                    tme = tme
                    p = p
                else:
                    sgr_ind = cf.reject_global_ranges(sal, global_min, global_max)
                    sal = sal[sgr_ind]
                    temp = temp[sgr_ind]
                    tme = tme[sgr_ind]
                    p = p[sgr_ind]

                global_min, global_max = cf.get_global_ranges(r, yvar)
                if any(e is None for e in [global_min, global_max]):
                    sal = sal
                    temp = temp
                    tme = tme
                    p = p
                else:
                    tgr_ind = cf.reject_global_ranges(temp, global_min, global_max)
                    sal = sal[tgr_ind]
                    temp = temp[tgr_ind]
                    tme = tme[tgr_ind]
                    p = p[tgr_ind]

                # get rid of outliers
                soind = cf.reject_outliers(sal, 5)
                sal = sal[soind]
                temp = temp[soind]
                tme = tme[soind]
                p = p[soind]

                toind = cf.reject_outliers(temp, 5)
                sal = sal[toind]
                temp = temp[toind]
                tme = tme[toind]
                p = p[toind]

                if len(sal) > 0:  # if there are any data to plot

                    colors = cm.rainbow(np.linspace(0, 1, len(tme)))

                    # Figure out boundaries (mins and maxes)
                    #smin = sal.min() - (0.01 * sal.min())
                    #smax = sal.max() + (0.01 * sal.max())
                    if sal.max() - sal.min() < 0.2:
                        smin = sal.min() - (0.0005 * sal.min())
                        smax = sal.max() + (0.0005 * sal.max())
                    else:
                        smin = sal.min() - (0.001 * sal.min())
                        smax = sal.max() + (0.001 * sal.max())

                    if temp.max() - temp.min() <= 1:
                        tmin = temp.min() - (0.01 * temp.min())
                        tmax = temp.max() + (0.01 * temp.max())
                    elif 1 < temp.max() - temp.min() < 1.5:
                        tmin = temp.min() - (0.05 * temp.min())
                        tmax = temp.max() + (0.05 * temp.max())
                    else:
                        tmin = temp.min() - (0.1 * temp.min())
                        tmax = temp.max() + (0.1 * temp.max())

                    # Calculate how many gridcells are needed in the x and y directions and
                    # Create temp and sal vectors of appropriate dimensions
                    xdim = int(round((smax-smin)/0.1 + 1, 0))
                    if xdim == 1:
                        xdim = 2
                    si = np.linspace(0, xdim - 1, xdim) * 0.1 + smin

                    if 1.1 <= temp.max() - temp.min() < 1.7:  # if the diff between min and max temp is small
                        ydim = int(round((tmax-tmin)/0.75 + 1, 0))
                        ti = np.linspace(0, ydim - 1, ydim) * 0.75 + tmin
                    elif temp.max() - temp.min() < 1.1:
                        ydim = int(round((tmax - tmin) / 0.1 + 1, 0))
                        ti = np.linspace(0, ydim - 1, ydim) * 0.1 + tmin
                    else:
                        ydim = int(round((tmax - tmin) + 1, 0))
                        ti = np.linspace(0, ydim - 1, ydim) + tmin

                    # Create empty grid of zeros
                    mdens = np.zeros((ydim, xdim))

                    # Loop to fill in grid with densities
                    for j in range(0, ydim):
                        for i in range(0, xdim):
                            mdens[j, i] = gsw.density.rho(si[i], ti[j], np.median(p))  # calculate density using median pressure value

                    fig, ax = pf.plot_ts(si, ti, mdens, sal, temp, colors)

                    ax.set_title((title + '\n' + t0 + ' - ' + t1 + '\ncolors = time (cooler: earlier)'), fontsize=9)
                    leg_text = ('Removed {} values (SD=5)'.format(len(ds[xvar].values) - len(sal)),)
                    ax.legend(leg_text, loc='best', fontsize=6)
                    pf.save_fig(save_dir, filename)
def main(sDir, url_list, deployment_num):
    reviewlist = pd.read_csv(
        'https://raw.githubusercontent.com/ooi-data-lab/data-review-prep/master/review_list/data_review_list.csv')

    rd_list = []
    for uu in url_list:
        elements = uu.split('/')[-2].split('-')
        rd = '-'.join((elements[1], elements[2], elements[3], elements[4]))
        if rd not in rd_list:
            rd_list.append(rd)

    json_file_list = []
    for r in rd_list:
        dependencies = []
        print('\n{}'.format(r))
        data = OrderedDict(deployments=OrderedDict())
        save_dir = os.path.join(sDir, r.split('-')[0], r)
        cf.create_dir(save_dir)

        # Deployment location test
        deploy_loc_test = cf.deploy_location_check(r)
        data['location_comparison'] = deploy_loc_test

        for u in url_list:
            splitter = u.split('/')[-2].split('-')
            rd_check = '-'.join((splitter[1], splitter[2], splitter[3], splitter[4]))
            catalog_rms = '-'.join((r, splitter[-2], splitter[-1]))

            # complete the analysis by reference designator
            if rd_check == r:
                udatasets = cf.get_nc_urls([u])

                # check for the OOI 1.0 datasets for review
                rl_filtered = reviewlist.loc[
                    (reviewlist['Reference Designator'] == r) & (reviewlist['status'] == 'for review')]
                review_deployments = rl_filtered['deploymentNumber'].tolist()
                review_deployments_int = ['deployment%04d' % int(x) for x in review_deployments]

                for rev_dep in review_deployments_int:
                    if deployment_num is not None:
                        if int(rev_dep[-4:]) is not deployment_num:
                            print('\nskipping {}'.format(rev_dep))
                            continue

                    rdatasets = [s for s in udatasets if rev_dep in s]
                    rdatasets.sort()
                    if len(rdatasets) > 0:
                        datasets = []
                        for dss in rdatasets:  # filter out collocated data files
                            if catalog_rms == dss.split('/')[-1].split('_20')[0][15:]:
                                datasets.append(dss)
                            else:
                                drd = dss.split('/')[-1].split('_20')[0][15:42]
                                if drd not in dependencies and drd != r:
                                    dependencies.append(drd)

                        notes = []
                        time_ascending = ''
                        sci_vars_dict = {}
                        #datasets = datasets[0:2]  #### for testing
                        for i in range(len(datasets)):
                            ds = xr.open_dataset(datasets[i], mask_and_scale=False)
                            ds = ds.swap_dims({'obs': 'time'})
                            print('\nAppending data from {}: file {} of {}'.format(rev_dep, i+1, len(datasets)))

                            # when opening multiple datasets, don't check that the timestamps are in ascending order
                            time_ascending = 'not_tested'

                            if i == 0:
                                fname, subsite, refdes, method, data_stream, deployment = cf.nc_attributes(datasets[0])
                                fname = fname.split('_20')[0]

                                # Get info from the data review database
                                dr_data = cf.refdes_datareview_json(refdes)
                                stream_vars = cf.return_stream_vars(data_stream)
                                sci_vars = cf.return_science_vars(data_stream)
                                node = refdes.split('-')[1]
                                if 'cspp' in data_stream or 'WFP' in node:
                                    sci_vars.append('int_ctd_pressure')

                                # Add pressure to the list of science variables
                                press = pf.pressure_var(ds, list(ds.coords.keys()))
                                if press is None:
                                    press = pf.pressure_var(ds, list(ds.data_vars.keys()))
                                if press is not None:
                                    sci_vars.append(press)
                                sci_vars.append('time')
                                sci_vars = list(np.unique(sci_vars))
                                if 'ADCP' in r:
                                    sci_vars = [x for x in sci_vars if 'beam' not in x]

                                for sci_var in sci_vars:
                                    if sci_var == 'time':
                                        sci_vars_dict.update(
                                            {sci_var: dict(values=np.array([], dtype=np.datetime64), units=[], fv=[])})
                                    else:
                                        sci_vars_dict.update({sci_var: dict(values=np.array([]), units=[], fv=[])})

                                deploy_info = get_deployment_information(dr_data, int(deployment[-4:]))

                                # Grab deployment Variables
                                deploy_start = str(deploy_info['start_date'])
                                deploy_stop = str(deploy_info['stop_date'])
                                deploy_lon = deploy_info['longitude']
                                deploy_lat = deploy_info['latitude']
                                deploy_depth = deploy_info['deployment_depth']

                                # Calculate days deployed
                                if deploy_stop != 'None':
                                    r_deploy_start = pd.to_datetime(deploy_start).replace(hour=0, minute=0, second=0)
                                    if deploy_stop.split('T')[1] == '00:00:00':
                                        r_deploy_stop = pd.to_datetime(deploy_stop)
                                    else:
                                        r_deploy_stop = (pd.to_datetime(deploy_stop) + timedelta(days=1)).replace(hour=0, minute=0, second=0)
                                    n_days_deployed = (r_deploy_stop - r_deploy_start).days
                                else:
                                    n_days_deployed = None

                                # Add reference designator to dictionary
                                try:
                                    data['refdes']
                                except KeyError:
                                    data['refdes'] = refdes

                            # append data for the deployment into a dictionary
                            for s_v in sci_vars_dict.keys():
                                vv = ds[s_v]
                                try:
                                    if vv.units not in sci_vars_dict[s_v]['units']:
                                        sci_vars_dict[s_v]['units'].append(vv.units)
                                except AttributeError:
                                    print('')
                                try:
                                    if vv._FillValue not in sci_vars_dict[s_v]['fv']:
                                        sci_vars_dict[s_v]['fv'].append(vv._FillValue)
                                except AttributeError:
                                    print('')
                                if len(vv.dims) == 1:
                                    if s_v in ['wavelength_a', 'wavelength_c']:
                                        # if the array is not same as the array that was already appended for these
                                        # two OPTAA variables, append. if it's already there, don't append
                                        if np.sum(vv.values == sci_vars_dict[s_v]['values']) != len(vv.values):
                                            sci_vars_dict[s_v]['values'] = np.append(sci_vars_dict[s_v]['values'],
                                                                                     vv.values)
                                    else:
                                        sci_vars_dict[s_v]['values'] = np.append(sci_vars_dict[s_v]['values'], vv.values)

                                elif len(vv.dims) == 2:  # appending 2D datasets
                                    vD = vv.values.T
                                    if len(sci_vars_dict[s_v]['values']) == 0:
                                        sci_vars_dict[s_v]['values'] = vD
                                    else:
                                        sci_vars_dict[s_v]['values'] = np.concatenate((sci_vars_dict[s_v]['values'], vD), axis=1)

                        deployments = data['deployments'].keys()
                        data_start = pd.to_datetime(min(sci_vars_dict['time']['values'])).strftime('%Y-%m-%dT%H:%M:%S')
                        data_stop = pd.to_datetime(max(sci_vars_dict['time']['values'])).strftime('%Y-%m-%dT%H:%M:%S')

                        # Add deployment and info to dictionary and initialize delivery method sub-dictionary
                        if deployment not in deployments:
                            data['deployments'][deployment] = OrderedDict(deploy_start=deploy_start,
                                                                          deploy_stop=deploy_stop,
                                                                          n_days_deployed=n_days_deployed,
                                                                          lon=deploy_lon,
                                                                          lat=deploy_lat,
                                                                          deploy_depth=deploy_depth,
                                                                          method=OrderedDict())

                        # Add delivery methods to dictionary and initialize stream sub-dictionary
                        methods = data['deployments'][deployment]['method'].keys()
                        if method not in methods:
                            data['deployments'][deployment]['method'][method] = OrderedDict(
                                stream=OrderedDict())

                        # Add streams to dictionary and initialize file sub-dictionary
                        streams = data['deployments'][deployment]['method'][method]['stream'].keys()

                        if data_stream not in streams:
                            data['deployments'][deployment]['method'][method]['stream'][
                                data_stream] = OrderedDict(file=OrderedDict())

                        # Get a list of data gaps >1 day
                        time_df = pd.DataFrame(sci_vars_dict['time']['values'], columns=['time'])
                        time_df = time_df.sort_values(by=['time'])
                        gap_list = cf.timestamp_gap_test(time_df)

                        # Calculate the sampling rate to the nearest second
                        time_df['diff'] = time_df['time'].diff().astype('timedelta64[s]')
                        rates_df = time_df.groupby(['diff']).agg(['count'])
                        n_diff_calc = len(time_df) - 1
                        rates = dict(n_unique_rates=len(rates_df), common_sampling_rates=dict())
                        for i, row in rates_df.iterrows():
                            percent = (float(row['time']['count']) / float(n_diff_calc))
                            if percent > 0.1:
                                rates['common_sampling_rates'].update({int(i): '{:.2%}'.format(percent)})

                        sampling_rt_sec = None
                        for k, v in rates['common_sampling_rates'].items():
                            if float(v.strip('%')) > 50.00:
                                sampling_rt_sec = k

                        if not sampling_rt_sec:
                            sampling_rt_sec = 'no consistent sampling rate: {}'.format(rates['common_sampling_rates'])

                        # Don't do : Check that the timestamps in the file are unique
                        time_test = ''

                        # Count the number of days for which there is at least 1 timestamp
                        n_days = len(np.unique(sci_vars_dict['time']['values'].astype('datetime64[D]')))

                        # Compare variables in file to variables in Data Review Database
                        ds_variables = list(ds.data_vars.keys()) + list(ds.coords.keys())
                        ds_variables = eliminate_common_variables(ds_variables)
                        ds_variables = [x for x in ds_variables if 'qc' not in x]
                        [_, unmatch1] = compare_lists(stream_vars, ds_variables)
                        [_, unmatch2] = compare_lists(ds_variables, stream_vars)

                        # calculate mean pressure from data, excluding outliers +/- 3 SD
                        try:
                            pressure = sci_vars_dict[press]
                            if len(pressure) > 1:
                                # reject NaNs
                                p_nonan = pressure['values'][~np.isnan(pressure['values'])]

                                # reject fill values
                                p_nonan_nofv = p_nonan[p_nonan != pressure['fv'][0]]

                                # reject data outside of global ranges
                                [pg_min, pg_max] = cf.get_global_ranges(r, press)
                                if pg_min is not None and pg_max is not None:
                                    pgr_ind = cf.reject_global_ranges(p_nonan_nofv, pg_min, pg_max)
                                    p_nonan_nofv_gr = p_nonan_nofv[pgr_ind]
                                else:
                                    p_nonan_nofv_gr = p_nonan_nofv

                                if (len(p_nonan_nofv_gr) > 0):
                                    [press_outliers, pressure_mean, _, pressure_max, _, _] = cf.variable_statistics(p_nonan_nofv_gr, 3)
                                    pressure_mean = round(pressure_mean, 2)
                                    pressure_max = round(pressure_max, 2)
                                else:
                                    press_outliers = None
                                    pressure_mean = None
                                    pressure_max = None
                                    if len(pressure) > 0 and len(p_nonan) == 0:
                                        notes.append('Pressure variable all NaNs')
                                    elif len(pressure) > 0 and len(p_nonan) > 0 and len(p_nonan_nofv) == 0:
                                        notes.append('Pressure variable all fill values')
                                    elif len(pressure) > 0 and len(p_nonan) > 0 and len(p_nonan_nofv) > 0 and len(p_nonan_nofv_gr) == 0:
                                        notes.append('Pressure variable outside of global ranges')

                            else:  # if there is only 1 data point
                                press_outliers = 0
                                pressure_mean = round(ds[press].values.tolist()[0], 2)
                                pressure_max = round(ds[press].values.tolist()[0], 2)

                            try:
                                pressure_units = pressure['units'][0]
                            except AttributeError:
                                pressure_units = 'no units attribute for pressure'

                            if pressure_mean:
                                if 'SF' in node:
                                    pressure_compare = int(round(pressure_max))
                                else:
                                    pressure_compare = int(round(pressure_mean))

                                if pressure_units == '0.001 dbar':
                                    pressure_max = round((pressure_max / 1000), 2)
                                    pressure_mean = round((pressure_mean / 1000), 2)
                                    pressure_compare = round((pressure_compare / 1000), 2)
                                    notes.append('Pressure converted from 0.001 dbar to dbar for pressure comparison')

                                elif pressure_units == 'daPa':
                                    pressure_max = round((pressure_max / 1000), 2)
                                    pressure_mean = round((pressure_mean / 1000), 2)
                                    pressure_compare = round((pressure_compare / 1000), 2)
                                    notes.append('Pressure converted from daPa to dbar for pressure comparison')

                            else:
                                pressure_compare = None

                            if (not deploy_depth) or (not pressure_mean):
                                pressure_diff = None
                            else:
                                pressure_diff = pressure_compare - deploy_depth

                        except KeyError:
                            press = 'no seawater pressure in file'
                            pressure_diff = None
                            pressure_mean = None
                            pressure_max = None
                            pressure_compare = None
                            press_outliers = None
                            pressure_units = None

                        # Add files and info to dictionary
                        filenames = data['deployments'][deployment]['method'][method]['stream'][data_stream][
                            'file'].keys()

                        if fname not in filenames:
                            data['deployments'][deployment]['method'][method]['stream'][data_stream]['file'][
                                fname] = OrderedDict(
                                file_downloaded=pd.to_datetime(splitter[0][0:15]).strftime('%Y-%m-%dT%H:%M:%S'),
                                file_coordinates=list(ds.coords.keys()),
                                sampling_rate_seconds=sampling_rt_sec,
                                sampling_rate_details=rates,
                                data_start=data_start,
                                data_stop=data_stop,
                                time_gaps=gap_list,
                                unique_timestamps=time_test,
                                n_timestamps=len(sci_vars_dict['time']['values']),
                                n_days=n_days,
                                notes=notes,
                                ascending_timestamps=time_ascending,
                                pressure_comparison=dict(pressure_mean=pressure_mean, units=pressure_units,
                                                         num_outliers=press_outliers, diff=pressure_diff,
                                                         pressure_max=pressure_max, variable=press,
                                                         pressure_compare=pressure_compare),
                                vars_in_file=ds_variables,
                                vars_not_in_file=[x for x in unmatch1 if 'time' not in x],
                                vars_not_in_db=unmatch2,
                                sci_var_stats=OrderedDict())

                        # calculate statistics for science variables, excluding outliers +/- 5 SD
                        for sv in sci_vars_dict.keys():
                            if sv != 't_max':  # for ADCP
                                if sv != 'time':
                                    print(sv)
                                    var = sci_vars_dict[sv]
                                    vD = var['values']
                                    var_units = var['units']
                                    #if 'timedelta' not in str(vD.dtype):
                                    vnum_dims = len(np.shape(vD))
                                    # for OPTAA wavelengths, print the array
                                    if sv == 'wavelength_a' or sv == 'wavelength_c':
                                        [g_min, g_max] = cf.get_global_ranges(r, sv)
                                        n_all = len(var)
                                        mean = list(vD)
                                        num_outliers = None
                                        vmin = None
                                        vmax = None
                                        sd = None
                                        n_stats = 'not calculated'
                                        n_nan = None
                                        n_fv = None
                                        n_grange = 'no global ranges'
                                        fv = var['fv'][0]
                                    else:
                                        if vnum_dims > 2:
                                            print('variable has more than 2 dimensions')
                                            num_outliers = None
                                            mean = None
                                            vmin = None
                                            vmax = None
                                            sd = None
                                            n_stats = 'variable has more than 2 dimensions'
                                            n_nan = None
                                            n_fv = None
                                            n_grange = None
                                            fv = None
                                            n_all = None
                                        else:
                                            if vnum_dims > 1:
                                                n_all = [len(vD), len(vD.flatten())]
                                            else:
                                                n_all = len(vD)
                                            n_nan = int(np.sum(np.isnan(vD)))
                                            fv = var['fv'][0]
                                            vD[vD == fv] = np.nan  # turn fill values to nans
                                            n_fv = int(np.sum(np.isnan(vD))) - n_nan

                                            [g_min, g_max] = cf.get_global_ranges(r, sv)
                                            if list(np.unique(np.isnan(vD))) != [True]:
                                                # reject data outside of global ranges
                                                if g_min is not None and g_max is not None:
                                                    # turn data outside of global ranges to nans
                                                    #var_gr = var_nofv.where((var_nofv >= g_min) & (var_nofv <= g_max))
                                                    vD[vD < g_min] = np.nan
                                                    vD[vD > g_max] = np.nan
                                                    n_grange = int(np.sum(np.isnan(vD)) - n_fv - n_nan)
                                                else:
                                                    n_grange = 'no global ranges'

                                                if list(np.unique(np.isnan(vD))) != [True]:
                                                    if sv == 'spkir_abj_cspp_downwelling_vector':
                                                        # don't remove outliers from dataset
                                                        [num_outliers, mean, vmin, vmax, sd, n_stats] = cf.variable_statistics_spkir(vD)
                                                    else:
                                                        if vnum_dims > 1:
                                                            var_gr = vD.flatten()
                                                        else:
                                                            var_gr = vD
                                                        # drop nans before calculating stats
                                                        var_gr = var_gr[~np.isnan(var_gr)]
                                                        [num_outliers, mean, vmin, vmax, sd, n_stats] = cf.variable_statistics(var_gr, 5)
                                                else:
                                                    num_outliers = None
                                                    mean = None
                                                    vmin = None
                                                    vmax = None
                                                    sd = None
                                                    n_stats = 0
                                                    n_grange = None
                                            else:
                                                num_outliers = None
                                                mean = None
                                                vmin = None
                                                vmax = None
                                                sd = None
                                                n_stats = 0
                                                n_grange = None

                                    if vnum_dims > 1:
                                        sv = '{} (dims: {})'.format(sv, list(np.shape(var['values'])))
                                    else:
                                        sv = sv
                                    #if 'timedelta' not in str(var.values.dtype):
                                    data['deployments'][deployment]['method'][method]['stream'][data_stream]['file'][
                                        fname]['sci_var_stats'][sv] = dict(n_outliers=num_outliers, mean=mean, min=vmin,
                                                                           max=vmax, stdev=sd, n_stats=n_stats, units=var_units,
                                                                           n_nans=n_nan, n_fillvalues=n_fv, fill_value=str(fv),
                                                                           global_ranges=[g_min, g_max], n_grange=n_grange,
                                                                           n_all=n_all)

                    sfile = os.path.join(save_dir, '{}-{}-file_analysis.json'.format(rev_dep, r))
                    with open(sfile, 'w') as outfile:
                        json.dump(data, outfile)
                    json_file_list.append(str(sfile))

        depfile = os.path.join(save_dir, '{}-dependencies.txt'.format(r))
        with open(depfile, 'w') as depf:
            depf.write(str(dependencies))

    return json_file_list
def main(sDir, url_list):
    reviewlist = pd.read_csv(
        'https://raw.githubusercontent.com/ooi-data-lab/data-review-prep/master/review_list/data_review_list.csv'
    )

    rd_list = []
    for uu in url_list:
        elements = uu.split('/')[-2].split('-')
        rd = '-'.join((elements[1], elements[2], elements[3], elements[4]))
        if rd not in rd_list:
            rd_list.append(rd)

    json_file_list = []
    for r in rd_list:
        dependencies = []
        print('\n{}'.format(r))
        data = OrderedDict(deployments=OrderedDict())
        save_dir = os.path.join(sDir, r.split('-')[0], r)
        cf.create_dir(save_dir)

        # Deployment location test
        deploy_loc_test = cf.deploy_location_check(r)
        data['location_comparison'] = deploy_loc_test

        for u in url_list:
            splitter = u.split('/')[-2].split('-')
            rd_check = '-'.join(
                (splitter[1], splitter[2], splitter[3], splitter[4]))
            catalog_rms = '-'.join((r, splitter[-2], splitter[-1]))

            # complete the analysis by reference designator
            if rd_check == r:
                udatasets = cf.get_nc_urls([u])

                # check for the OOI 1.0 datasets for review
                rl_filtered = reviewlist.loc[
                    (reviewlist['Reference Designator'] == r)
                    & (reviewlist['status'] == 'for review')]
                review_deployments = rl_filtered['deploymentNumber'].tolist()
                review_deployments_int = [
                    'deployment%04d' % int(x) for x in review_deployments
                ]
                for rev_dep in review_deployments_int:
                    rdatasets = [s for s in udatasets if rev_dep in s]
                    if len(rdatasets) > 0:
                        datasets = []
                        for dss in rdatasets:  # filter out collocated data files
                            if catalog_rms == dss.split('/')[-1].split(
                                    '_20')[0][15:]:
                                datasets.append(dss)
                            else:
                                drd = dss.split('/')[-1].split('_20')[0][15:42]
                                if drd not in dependencies and drd != r:
                                    dependencies.append(drd)

                        notes = []
                        time_ascending = ''
                        if len(datasets) == 1:
                            try:
                                ds = xr.open_dataset(datasets[0],
                                                     mask_and_scale=False)
                                ds = ds.swap_dims({'obs': 'time'})
                                fname, subsite, refdes, method, data_stream, deployment = cf.nc_attributes(
                                    datasets[0])
                            except OSError:
                                print('OSError - skipping file {}'.format(
                                    datasets[0]))
                                continue
                        elif len(datasets) > 1:
                            ds = xr.open_mfdataset(datasets,
                                                   mask_and_scale=False)
                            ds = ds.swap_dims({'obs': 'time'})
                            #ds = ds.chunk({'time': 100})
                            fname, subsite, refdes, method, data_stream, deployment = cf.nc_attributes(
                                datasets[0])
                            fname = fname.split('_20')[0]
                            notes.append('multiple deployment .nc files')
                            # when opening multiple datasets, don't check that the timestamps are in ascending order
                            time_ascending = 'not_tested'
                        else:
                            continue

                        print('\nAnalyzing file: {}'.format(fname))

                        # Get info from the data review database
                        dr_data = cf.refdes_datareview_json(refdes)
                        stream_vars = cf.return_stream_vars(data_stream)
                        sci_vars = cf.return_science_vars(data_stream)
                        node = refdes.split('-')[1]
                        if 'cspp' in data_stream or 'WFP' in node:
                            sci_vars.append('int_ctd_pressure')

                        # if 'FDCHP' in refdes:
                        #     remove_vars = ['fdchp_wind_x', 'fdchp_wind_y', 'fdchp_wind_z', 'fdchp_speed_of_sound_sonic',
                        #                    'fdchp_x_accel_g', 'fdchp_y_accel_g', 'fdchp_z_accel_g']
                        #     rv_regex = re.compile('|'.join(remove_vars))
                        #     rv_sci_vars = [nn for nn in sci_vars if not rv_regex.search(nn)]
                        #     sci_vars = rv_sci_vars

                        deploy_info = get_deployment_information(
                            dr_data, int(deployment[-4:]))

                        # Grab deployment Variables
                        deploy_start = str(deploy_info['start_date'])
                        deploy_stop = str(deploy_info['stop_date'])
                        deploy_lon = deploy_info['longitude']
                        deploy_lat = deploy_info['latitude']
                        deploy_depth = deploy_info['deployment_depth']

                        # Calculate days deployed
                        if deploy_stop != 'None':
                            r_deploy_start = pd.to_datetime(
                                deploy_start).replace(hour=0,
                                                      minute=0,
                                                      second=0)
                            if deploy_stop.split('T')[1] == '00:00:00':
                                r_deploy_stop = pd.to_datetime(deploy_stop)
                            else:
                                r_deploy_stop = (pd.to_datetime(deploy_stop) +
                                                 timedelta(days=1)).replace(
                                                     hour=0,
                                                     minute=0,
                                                     second=0)
                            n_days_deployed = (r_deploy_stop -
                                               r_deploy_start).days
                        else:
                            n_days_deployed = None

                        # Add reference designator to dictionary
                        try:
                            data['refdes']
                        except KeyError:
                            data['refdes'] = refdes

                        deployments = data['deployments'].keys()
                        data_start = pd.to_datetime(min(
                            ds['time'].values)).strftime('%Y-%m-%dT%H:%M:%S')
                        data_stop = pd.to_datetime(max(
                            ds['time'].values)).strftime('%Y-%m-%dT%H:%M:%S')

                        # Add deployment and info to dictionary and initialize delivery method sub-dictionary
                        if deployment not in deployments:
                            data['deployments'][deployment] = OrderedDict(
                                deploy_start=deploy_start,
                                deploy_stop=deploy_stop,
                                n_days_deployed=n_days_deployed,
                                lon=deploy_lon,
                                lat=deploy_lat,
                                deploy_depth=deploy_depth,
                                method=OrderedDict())

                        # Add delivery methods to dictionary and initialize stream sub-dictionary
                        methods = data['deployments'][deployment][
                            'method'].keys()
                        if method not in methods:
                            data['deployments'][deployment]['method'][
                                method] = OrderedDict(stream=OrderedDict())

                        # Add streams to dictionary and initialize file sub-dictionary
                        streams = data['deployments'][deployment]['method'][
                            method]['stream'].keys()

                        if data_stream not in streams:
                            data['deployments'][deployment]['method'][method][
                                'stream'][data_stream] = OrderedDict(
                                    file=OrderedDict())

                        # Get a list of data gaps >1 day
                        time_df = pd.DataFrame(ds['time'].values,
                                               columns=['time'])
                        gap_list = cf.timestamp_gap_test(time_df)

                        # Calculate the sampling rate to the nearest second
                        time_df['diff'] = time_df['time'].diff().astype(
                            'timedelta64[s]')
                        rates_df = time_df.groupby(['diff']).agg(['count'])
                        n_diff_calc = len(time_df) - 1
                        rates = dict(n_unique_rates=len(rates_df),
                                     common_sampling_rates=dict())
                        for i, row in rates_df.iterrows():
                            percent = (float(row['time']['count']) /
                                       float(n_diff_calc))
                            if percent > 0.1:
                                rates['common_sampling_rates'].update(
                                    {int(i): '{:.2%}'.format(percent)})

                        sampling_rt_sec = None
                        for k, v in rates['common_sampling_rates'].items():
                            if float(v.strip('%')) > 50.00:
                                sampling_rt_sec = k

                        if not sampling_rt_sec:
                            sampling_rt_sec = 'no consistent sampling rate: {}'.format(
                                rates['common_sampling_rates'])

                        # Check that the timestamps in the file are unique
                        time = ds['time']
                        len_time = time.__len__()
                        len_time_unique = np.unique(time).__len__()
                        if len_time == len_time_unique:
                            time_test = 'pass'
                        else:
                            time_test = 'fail'

                        # Check that the timestamps in the file are in ascending order
                        if time_ascending != 'not_tested':
                            # convert time to number
                            time_in = [
                                dt.datetime.utcfromtimestamp(
                                    np.datetime64(x).astype('O') / 1e9)
                                for x in ds['time'].values
                            ]
                            time_data = nc.date2num(
                                time_in, 'seconds since 1900-01-01')

                            # Create a list of True or False by iterating through the array of time and checking
                            # if every time stamp is increasing
                            result = [(time_data[k + 1] - time_data[k]) > 0
                                      for k in range(len(time_data) - 1)]

                            # Print outcome of the iteration with the list of indices when time is not increasing
                            if result.count(True) == len(time) - 1:
                                time_ascending = 'pass'
                            else:
                                ind_fail = {
                                    k: time_in[k]
                                    for k, v in enumerate(result) if v is False
                                }
                                time_ascending = 'fail: {}'.format(ind_fail)

                        # Count the number of days for which there is at least 1 timestamp
                        n_days = len(
                            np.unique(time.values.astype('datetime64[D]')))

                        # Compare variables in file to variables in Data Review Database
                        ds_variables = list(ds.data_vars.keys()) + list(
                            ds.coords.keys())
                        #ds_variables = [k for k in ds]
                        ds_variables = eliminate_common_variables(ds_variables)
                        ds_variables = [
                            x for x in ds_variables if 'qc' not in x
                        ]
                        [_, unmatch1] = compare_lists(stream_vars,
                                                      ds_variables)
                        [_, unmatch2] = compare_lists(ds_variables,
                                                      stream_vars)

                        # Check deployment pressure from asset management against pressure variable in file
                        press = pf.pressure_var(ds, list(ds.coords.keys()))
                        if press is None:
                            press = pf.pressure_var(ds,
                                                    list(ds.data_vars.keys()))

                        # calculate mean pressure from data, excluding outliers +/- 3 SD
                        try:
                            pressure = ds[press]
                            num_dims = len(pressure.dims)
                            if len(pressure) > 1:
                                # if the pressure variable is an array of all zeros (as in the case of pressure_depth
                                # for OPTAAs on surface piercing profilers
                                if (len(np.unique(pressure)) == 1) & (
                                        np.unique(pressure)[0] == 0.0):
                                    try:
                                        pressure = ds['int_ctd_pressure']
                                        press = 'int_ctd_pressure'
                                    except KeyError:
                                        pressure = pressure

                                # reject NaNs
                                p_nonan = pressure.values[~np.isnan(pressure.
                                                                    values)]

                                # reject fill values
                                p_nonan_nofv = p_nonan[
                                    p_nonan != pressure._FillValue]

                                # reject data outside of global ranges
                                [pg_min,
                                 pg_max] = cf.get_global_ranges(r, press)
                                if pg_min is not None and pg_max is not None:
                                    pgr_ind = cf.reject_global_ranges(
                                        p_nonan_nofv, pg_min, pg_max)
                                    p_nonan_nofv_gr = p_nonan_nofv[pgr_ind]
                                else:
                                    p_nonan_nofv_gr = p_nonan_nofv

                                if (len(p_nonan_nofv_gr) > 0) and (num_dims
                                                                   == 1):
                                    [
                                        press_outliers, pressure_mean, _,
                                        pressure_max, _, _
                                    ] = cf.variable_statistics(
                                        p_nonan_nofv_gr, 3)
                                    pressure_mean = round(pressure_mean, 2)
                                    pressure_max = round(pressure_max, 2)
                                elif (len(p_nonan_nofv_gr) > 0) and (num_dims >
                                                                     1):
                                    print('variable has more than 1 dimension')
                                    press_outliers = 'not calculated: variable has more than 1 dimension'
                                    pressure_mean = round(
                                        np.nanmean(p_nonan_nofv_gr), 2)
                                    pressure_max = round(
                                        np.nanmax(p_nonan_nofv_gr), 2)
                                else:
                                    press_outliers = None
                                    pressure_mean = None
                                    pressure_max = None
                                    if len(pressure) > 0 and len(p_nonan) == 0:
                                        notes.append(
                                            'Pressure variable all NaNs')
                                    elif len(pressure) > 0 and len(
                                            p_nonan) > 0 and len(
                                                p_nonan_nofv) == 0:
                                        notes.append(
                                            'Pressure variable all fill values'
                                        )
                                    elif len(pressure) > 0 and len(
                                            p_nonan) > 0 and len(
                                                p_nonan_nofv) > 0 and len(
                                                    p_nonan_nofv_gr) == 0:
                                        notes.append(
                                            'Pressure variable outside of global ranges'
                                        )

                            else:  # if there is only 1 data point
                                press_outliers = 0
                                pressure_mean = round(
                                    ds[press].values.tolist()[0], 2)
                                pressure_max = round(
                                    ds[press].values.tolist()[0], 2)

                            try:
                                pressure_units = pressure.units
                            except AttributeError:
                                pressure_units = 'no units attribute for pressure'

                            if pressure_mean:
                                if ('WFP' in node) or ('MOAS' in subsite) or (
                                        'SP' in node):
                                    pressure_compare = int(round(pressure_max))
                                else:
                                    pressure_compare = int(
                                        round(pressure_mean))

                                if pressure_units == '0.001 dbar':
                                    pressure_max = round((pressure_max / 1000),
                                                         2)
                                    pressure_mean = round(
                                        (pressure_mean / 1000), 2)
                                    pressure_compare = round(
                                        (pressure_compare / 1000), 2)
                                    notes.append(
                                        'Pressure converted from 0.001 dbar to dbar for pressure comparison'
                                    )

                                elif pressure_units == 'daPa':
                                    pressure_max = round((pressure_max / 1000),
                                                         2)
                                    pressure_mean = round(
                                        (pressure_mean / 1000), 2)
                                    pressure_compare = round(
                                        (pressure_compare / 1000), 2)
                                    notes.append(
                                        'Pressure converted from daPa to dbar for pressure comparison'
                                    )

                            else:
                                pressure_compare = None

                            if (not deploy_depth) or (not pressure_mean):
                                pressure_diff = None
                            else:
                                pressure_diff = pressure_compare - deploy_depth

                        except KeyError:
                            press = 'no seawater pressure in file'
                            pressure_diff = None
                            pressure_mean = None
                            pressure_max = None
                            pressure_compare = None
                            press_outliers = None
                            pressure_units = None

                        # Add files and info to dictionary
                        filenames = data['deployments'][deployment]['method'][
                            method]['stream'][data_stream]['file'].keys()

                        if fname not in filenames:
                            data['deployments'][deployment]['method'][method][
                                'stream'][data_stream]['file'][
                                    fname] = OrderedDict(
                                        file_downloaded=pd.to_datetime(
                                            splitter[0][0:15]).strftime(
                                                '%Y-%m-%dT%H:%M:%S'),
                                        file_coordinates=list(
                                            ds.coords.keys()),
                                        sampling_rate_seconds=sampling_rt_sec,
                                        sampling_rate_details=rates,
                                        data_start=data_start,
                                        data_stop=data_stop,
                                        time_gaps=gap_list,
                                        unique_timestamps=time_test,
                                        n_timestamps=len_time,
                                        n_days=n_days,
                                        notes=notes,
                                        ascending_timestamps=time_ascending,
                                        pressure_comparison=dict(
                                            pressure_mean=pressure_mean,
                                            units=pressure_units,
                                            num_outliers=press_outliers,
                                            diff=pressure_diff,
                                            pressure_max=pressure_max,
                                            variable=press,
                                            pressure_compare=pressure_compare),
                                        vars_in_file=ds_variables,
                                        vars_not_in_file=[
                                            x for x in unmatch1
                                            if 'time' not in x
                                        ],
                                        vars_not_in_db=unmatch2,
                                        sci_var_stats=OrderedDict())

                        # calculate statistics for science variables, excluding outliers +/- 5 SD
                        for sv in sci_vars:
                            if sv != 't_max':  # for ADCP
                                if sv != 'wavss_a_buoymotion_time':
                                    print(sv)
                                    try:
                                        var = ds[sv]
                                        # need to round SPKIR values to 1 decimal place to match the global ranges.
                                        # otherwise, values that round to zero (e.g. 1.55294e-05) will be excluded by
                                        # the global range test
                                        # if 'spkir' in sv:
                                        #     vD = np.round(var.values, 1)
                                        # else:
                                        #     vD = var.values
                                        vD = var.values
                                        if 'timedelta' not in str(
                                                var.values.dtype):
                                            # for OPTAA wavelengths: when multiple files are opened with xr.open_mfdataset
                                            # xarray automatically forces all variables to have the same number of
                                            # dimensions. So in this case wavelength_a and wavelength_c have 1 dimension
                                            # in the individual files, so I'm forcing the analysis to treat them like
                                            # they have 1 dimension (when there are multiple files for 1 deployment)
                                            if sv == 'wavelength_a' or sv == 'wavelength_c':
                                                [g_min,
                                                 g_max] = cf.get_global_ranges(
                                                     r, sv)
                                                vnum_dims = len(var.dims)
                                                if vnum_dims == 1:
                                                    n_all = len(var)
                                                    mean = list(vD)
                                                else:
                                                    vnum_dims = 1
                                                    n_all = len(vD[0])
                                                    mean = list(vD[0])
                                                num_outliers = None
                                                vmin = None
                                                vmax = None
                                                sd = None
                                                n_stats = 'not calculated'
                                                var_units = var.units
                                                n_nan = None
                                                n_fv = None
                                                n_grange = 'no global ranges'
                                                fv = var._FillValue

                                            else:
                                                vnum_dims = len(var.dims)
                                                if vnum_dims > 2:
                                                    print(
                                                        'variable has more than 2 dimensions'
                                                    )
                                                    num_outliers = None
                                                    mean = None
                                                    vmin = None
                                                    vmax = None
                                                    sd = None
                                                    n_stats = 'variable has more than 2 dimensions'
                                                    var_units = var.units
                                                    n_nan = None
                                                    n_fv = None
                                                    n_grange = None
                                                    fv = None
                                                    n_all = None
                                                else:
                                                    if vnum_dims > 1:
                                                        n_all = [
                                                            len(vD),
                                                            len(vD.flatten())
                                                        ]
                                                    else:
                                                        n_all = len(vD)
                                                    n_nan = int(
                                                        np.sum(np.isnan(vD)))
                                                    fv = var._FillValue
                                                    var_nofv = var.where(
                                                        var != fv)
                                                    n_fv = int(
                                                        np.sum(
                                                            np.isnan(
                                                                var_nofv.values
                                                            ))) - n_nan

                                                    try:
                                                        var_units = var.units
                                                    except AttributeError:
                                                        var_units = 'no_units'
                                                    [g_min, g_max
                                                     ] = cf.get_global_ranges(
                                                         r, sv)
                                                    if list(
                                                            np.unique(
                                                                np.isnan(
                                                                    var_nofv))
                                                    ) != [True]:
                                                        # reject data outside of global ranges
                                                        if g_min is not None and g_max is not None:
                                                            var_gr = var_nofv.where(
                                                                (var_nofv >=
                                                                 g_min)
                                                                & (var_nofv <=
                                                                   g_max))
                                                            n_grange = int(
                                                                np.sum(
                                                                    np.isnan(
                                                                        var_gr)
                                                                ) - n_fv -
                                                                n_nan)
                                                        else:
                                                            n_grange = 'no global ranges'
                                                            var_gr = var_nofv

                                                        if list(
                                                                np.unique(
                                                                    np.isnan(
                                                                        var_gr)
                                                                )) != [True]:
                                                            if sv == 'spkir_abj_cspp_downwelling_vector':
                                                                # don't remove outliers from dataset
                                                                [
                                                                    num_outliers,
                                                                    mean, vmin,
                                                                    vmax, sd,
                                                                    n_stats
                                                                ] = cf.variable_statistics_spkir(
                                                                    var_gr)
                                                            else:
                                                                if vnum_dims > 1:
                                                                    var_gr = var_gr.values.flatten(
                                                                    )
                                                                # drop nans before calculating stats
                                                                var_gr = var_gr[
                                                                    ~np.isnan(
                                                                        var_gr
                                                                    )]
                                                                [
                                                                    num_outliers,
                                                                    mean, vmin,
                                                                    vmax, sd,
                                                                    n_stats
                                                                ] = cf.variable_statistics(
                                                                    var_gr, 5)
                                                        else:
                                                            num_outliers = None
                                                            mean = None
                                                            vmin = None
                                                            vmax = None
                                                            sd = None
                                                            n_stats = 0
                                                            n_grange = None
                                                    else:
                                                        num_outliers = None
                                                        mean = None
                                                        vmin = None
                                                        vmax = None
                                                        sd = None
                                                        n_stats = 0
                                                        n_grange = None

                                    except KeyError:
                                        if sv == 'int_ctd_pressure':
                                            continue
                                        else:
                                            num_outliers = None
                                            mean = None
                                            vmin = None
                                            vmax = None
                                            sd = None
                                            n_stats = 'variable not found in file'
                                            var_units = None
                                            n_nan = None
                                            n_fv = None
                                            fv = None
                                            n_grange = None
                                            n_all = None

                                    if vnum_dims > 1:
                                        sv = '{} (dims: {})'.format(
                                            sv, list(var.dims))
                                    else:
                                        sv = sv
                                    if 'timedelta' not in str(
                                            var.values.dtype):
                                        data['deployments'][deployment][
                                            'method'][method]['stream'][
                                                data_stream]['file'][fname][
                                                    'sci_var_stats'][sv] = dict(
                                                        n_outliers=num_outliers,
                                                        mean=mean,
                                                        min=vmin,
                                                        max=vmax,
                                                        stdev=sd,
                                                        n_stats=n_stats,
                                                        units=var_units,
                                                        n_nans=n_nan,
                                                        n_fillvalues=n_fv,
                                                        fill_value=str(fv),
                                                        global_ranges=[
                                                            g_min, g_max
                                                        ],
                                                        n_grange=n_grange,
                                                        n_all=n_all)

        sfile = os.path.join(save_dir, '{}-file_analysis.json'.format(r))
        with open(sfile, 'w') as outfile:
            json.dump(data, outfile)

        depfile = os.path.join(save_dir, '{}-dependencies.txt'.format(r))
        with open(depfile, 'w') as depf:
            depf.write(str(dependencies))

        json_file_list.append(str(sfile))

    return json_file_list
def main(sDir, ncdir):
    rd_list = [ncdir.split('/')[-2]]

    for r in rd_list:
        print('\n{}'.format(r))
        subsite = r.split('-')[0]
        array = subsite[0:2]

        ps_df, n_streams = cf.get_preferred_stream_info(r)

        # get end times of deployments
        dr_data = cf.refdes_datareview_json(r)
        deployments = []
        end_times = []
        for index, row in ps_df.iterrows():
            deploy = row['deployment']
            deploy_info = get_deployment_information(dr_data, int(deploy[-4:]))
            deployments.append(int(deploy[-4:]))
            end_times.append(pd.to_datetime(deploy_info['stop_date']))

        # filter datasets
        fdatasets = []
        for root, dirs, files in os.walk(ncdir):
            for f in files:
                if f.endswith('.nc'):
                    fdatasets.append(f)
        # for u in url_list:
        #     splitter = u.split('/')[-2].split('-')
        #     rd_check = '-'.join((splitter[1], splitter[2], splitter[3], splitter[4]))
        #     if rd_check == r:
        #         udatasets = cf.get_nc_urls([u])
        #         datasets.append(udatasets)
        # datasets = list(itertools.chain(*datasets))
        # main_sensor = r.split('-')[-1]
        # fdatasets = cf.filter_collocated_instruments(main_sensor, datasets)
        methodstream = []
        for f in fdatasets:
            strm = '_'.join((f.split('-')[-2].split('_')[0], f.split('-')[-2].split('_')[1]))
            methodstream.append('-'.join((f.split('-')[-3], strm)))

        for ms in np.unique(methodstream):
            fdatasets_sel = [x for x in fdatasets if ms in x]
            save_dir = os.path.join(sDir, array, subsite, r, 'timeseries_plots_all')
            cf.create_dir(save_dir)

            stream_sci_vars_dict = dict()
            for x in dr_data['instrument']['data_streams']:
                dr_ms = '-'.join((x['method'], x['stream_name']))
                if ms == dr_ms:
                    stream_sci_vars_dict[dr_ms] = dict(vars=dict())
                    sci_vars = dict()
                    for y in x['stream']['parameters']:
                        if y['data_product_type'] == 'Science Data':
                            sci_vars.update({y['name']: dict(db_units=y['unit'])})
                    if len(sci_vars) > 0:
                        stream_sci_vars_dict[dr_ms]['vars'] = sci_vars

            sci_vars_dict = cd.initialize_empty_arrays(stream_sci_vars_dict, ms)
            print('\nAppending data from files: {}'.format(ms))
            for fd in fdatasets_sel:
                ds = xr.open_dataset(os.path.join(ncdir, fd), mask_and_scale=False)
                for var in list(sci_vars_dict[ms]['vars'].keys()):
                    sh = sci_vars_dict[ms]['vars'][var]
                    if ds[var].units == sh['db_units']:
                        if ds[var]._FillValue not in sh['fv']:
                            sh['fv'].append(ds[var]._FillValue)
                        if ds[var].units not in sh['units']:
                            sh['units'].append(ds[var].units)
                        tD = ds['time'].values
                        varD = ds[var].values
                        sh['t'] = np.append(sh['t'], tD)
                        sh['values'] = np.append(sh['values'], varD)

            print('\nPlotting data')
            for m, n in sci_vars_dict.items():
                for sv, vinfo in n['vars'].items():
                    print(sv)
                    if len(vinfo['t']) < 1:
                        print('no variable data to plot')
                    else:
                        sv_units = vinfo['units'][0]
                        t0 = pd.to_datetime(min(vinfo['t'])).strftime('%Y-%m-%dT%H:%M:%S')
                        t1 = pd.to_datetime(max(vinfo['t'])).strftime('%Y-%m-%dT%H:%M:%S')
                        x = vinfo['t']
                        y = vinfo['values']

                        # reject NaNs
                        nan_ind = ~np.isnan(y)
                        x_nonan = x[nan_ind]
                        y_nonan = y[nan_ind]

                        # reject fill values
                        fv_ind = y_nonan != vinfo['fv'][0]
                        x_nonan_nofv = x_nonan[fv_ind]
                        y_nonan_nofv = y_nonan[fv_ind]

                        # reject extreme values
                        Ev_ind = cf.reject_extreme_values(y_nonan_nofv)
                        y_nonan_nofv_nE = y_nonan_nofv[Ev_ind]
                        x_nonan_nofv_nE = x_nonan_nofv[Ev_ind]

                        # reject values outside global ranges:
                        global_min, global_max = cf.get_global_ranges(r, sv)
                        if global_min is not None and global_max is not None:
                            gr_ind = cf.reject_global_ranges(y_nonan_nofv_nE, global_min, global_max)
                            y_nonan_nofv_nE_nogr = y_nonan_nofv_nE[gr_ind]
                            x_nonan_nofv_nE_nogr = x_nonan_nofv_nE[gr_ind]
                        else:
                            y_nonan_nofv_nE_nogr = y_nonan_nofv_nE
                            x_nonan_nofv_nE_nogr = x_nonan_nofv_nE

                        title = ' '.join((r, ms.split('-')[0]))

                        if len(y_nonan_nofv) > 0:
                            if m == 'common_stream_placeholder':
                                sname = '-'.join((r, sv))
                            else:
                                sname = '-'.join((r, m, sv))

                            # Plot all data
                            fig, ax = pf.plot_timeseries_all(x_nonan_nofv, y_nonan_nofv, sv, sv_units, stdev=None)
                            ax.set_title((title + '\nDeployments: ' + str(sorted(deployments)) + '\n' + t0 + ' - ' + t1),
                                         fontsize=8)
                            for etimes in end_times:
                                ax.axvline(x=etimes,  color='b', linestyle='--', linewidth=.6)

                            # if global_min is not None and global_max is not None:
                            #     ax.axhline(y=global_min, color='r', linestyle='--', linewidth=.6)
                            #     ax.axhline(y=global_max, color='r', linestyle='--', linewidth=.6)

                            pf.save_fig(save_dir, sname)

                            # Plot data with extreme values, data outside global ranges and outliers removed
                            fig, ax = pf.plot_timeseries_all(x_nonan_nofv_nE_nogr, y_nonan_nofv_nE_nogr, sv, sv_units, stdev=5)
                            ax.set_title((title + '\nDeployments: ' + str(sorted(deployments)) + '\n' + t0 + ' - ' + t1),
                                         fontsize=8)
                            for etimes in end_times:
                                ax.axvline(x=etimes,  color='b', linestyle='--', linewidth=.6)

                            # if global_min is not None and global_max is not None:
                            #     ax.axhline(y=global_min, color='r', linestyle='--', linewidth=.6)
                            #     ax.axhline(y=global_max, color='r', linestyle='--', linewidth=.6)

                            sfile = '_'.join((sname, 'rmoutliers'))
                            pf.save_fig(save_dir, sfile)
Esempio n. 16
0
def main(sDir, url_list, start_time, end_time, preferred_only):
    rd_list = []
    for uu in url_list:
        elements = uu.split('/')[-2].split('-')
        rd = '-'.join((elements[1], elements[2], elements[3], elements[4]))
        if rd not in rd_list:
            rd_list.append(rd)

    for r in rd_list:
        print('\n{}'.format(r))
        datasets = []
        for u in url_list:
            splitter = u.split('/')[-2].split('-')
            rd_check = '-'.join(
                (splitter[1], splitter[2], splitter[3], splitter[4]))
            if rd_check == r:
                udatasets = cf.get_nc_urls([u])
                datasets.append(udatasets)
        datasets = list(itertools.chain(*datasets))
        fdatasets = []
        if preferred_only == 'yes':
            # get the preferred stream information
            ps_df, n_streams = cf.get_preferred_stream_info(r)
            for index, row in ps_df.iterrows():
                for ii in range(n_streams):
                    try:
                        rms = '-'.join((r, row[ii]))
                    except TypeError:
                        continue
                    for dd in datasets:
                        spl = dd.split('/')[-2].split('-')
                        catalog_rms = '-'.join(
                            (spl[1], spl[2], spl[3], spl[4], spl[5], spl[6]))
                        fdeploy = dd.split('/')[-1].split('_')[0]
                        if rms == catalog_rms and fdeploy == row['deployment']:
                            fdatasets.append(dd)
        else:
            fdatasets = datasets

        fdatasets = np.unique(fdatasets).tolist()
        for fd in fdatasets:
            ds = xr.open_dataset(fd, mask_and_scale=False)
            ds = ds.swap_dims({'obs': 'time'})

            if start_time is not None and end_time is not None:
                ds = ds.sel(time=slice(start_time, end_time))
                if len(ds['time'].values) == 0:
                    print(
                        'No data to plot for specified time range: ({} to {})'.
                        format(start_time, end_time))
                    continue

            fname, subsite, refdes, method, stream, deployment = cf.nc_attributes(
                fd)
            sci_vars = cf.return_science_vars(stream)
            print('\nPlotting {} {}'.format(r, deployment))
            array = subsite[0:2]
            filename = '_'.join(fname.split('_')[:-1])
            save_dir = os.path.join(sDir, array, subsite, refdes,
                                    'timeseries_plots')
            cf.create_dir(save_dir)

            tm = ds['time'].values
            t0 = pd.to_datetime(tm.min()).strftime('%Y-%m-%dT%H:%M:%S')
            t1 = pd.to_datetime(tm.max()).strftime('%Y-%m-%dT%H:%M:%S')
            title = ' '.join((deployment, refdes, method))

            # -------- plot entire deployment --------

            for var in sci_vars:
                print(var)
                vv = ds[var]
                fv = vv._FillValue
                # need to round SPKIR values to 1 decimal place to match the global ranges. otherwise, values that
                # round to zero (e.g. 1.55294e-05) will be excluded by the global range test
                # v = np.round(vv.values.T, 1)  # .T = transpose 2D array
                v = vv.values.T
                n_nan = np.sum(np.isnan(v))

                # convert fill values to nans
                v[v == fv] = np.nan
                n_fv = np.sum(np.isnan(v)) - n_nan

                # plot before global ranges are removed
                fig, ax = pf.plot_spkir(tm, v, vv.name, vv.units)
                ax.set_title((title + '\n' + t0 + ' - ' + t1), fontsize=9)
                sfile = '-'.join((filename, var, t0[:10]))
                pf.save_fig(save_dir, sfile)

                # reject data outside of global ranges
                [g_min, g_max] = cf.get_global_ranges(r, var)
                if g_min is not None and g_max is not None:
                    v[v < g_min] = np.nan
                    v[v > g_max] = np.nan
                    n_grange = np.sum(np.isnan(v)) - n_fv - n_nan
                else:
                    n_grange = 'no global ranges'

                # plot after global ranges are removed
                fig, ax = pf.plot_spkir(tm, v, vv.name, vv.units)
                title2 = 'removed: {} global ranges [{}, {}]'.format(
                    n_grange, g_min, g_max)
                ax.set_title((title + '\n' + t0 + ' - ' + t1 + '\n' + title2),
                             fontsize=9)
                sfile = '-'.join((filename, var, t0[:10], 'rmgr'))
                pf.save_fig(save_dir, sfile)

            # -------- break the deployment into months and plot --------

            save_dir = os.path.join(sDir, array, subsite, refdes,
                                    'timeseries_plots', 'monthly')
            cf.create_dir(save_dir)

            # create list of start and end dates
            dt_start = dt.datetime.strptime(t0, '%Y-%m-%dT%H:%M:%S')
            dt_end = dt.datetime.strptime(t1, '%Y-%m-%dT%H:%M:%S')
            start_dates = [dt_start.strftime('%m-%d-%YT00:00:00')]
            end_dates = []
            ts1 = dt_start
            while ts1 <= dt_end:
                ts2 = ts1 + dt.timedelta(days=1)
                if ts2.month != ts1.month:
                    start_dates.append(ts2.strftime('%m-%d-%YT00:00:00'))
                    end_dates.append(ts1.strftime('%m-%d-%YT23:59:59'))
                ts1 = ts2
            end_dates.append(dt_end.strftime('%m-%d-%YT23:59:59'))

            for sd, ed in zip(start_dates, end_dates):
                sd_format = dt.datetime.strptime(sd, '%m-%d-%YT%H:%M:%S')
                ed_format = dt.datetime.strptime(ed, '%m-%d-%YT%H:%M:%S')
                ds_month = ds.sel(time=slice(sd_format, ed_format))
                if len(ds_month['time'].values) == 0:
                    print(
                        'No data to plot for specified time range: ({} to {})'.
                        format(sd, ed))
                    continue
                tm = ds_month['time'].values
                t0 = pd.to_datetime(tm.min()).strftime('%Y-%m-%dT%H:%M:%S')
                t1 = pd.to_datetime(tm.max()).strftime('%Y-%m-%dT%H:%M:%S')

                for var in sci_vars:
                    print(var)
                    vv = ds_month[var]
                    fv = vv._FillValue
                    v = vv.values.T  # transpose 2D array
                    n_nan = np.sum(np.isnan(v))

                    # convert fill values to nans
                    v[v == fv] = np.nan
                    n_fv = np.sum(np.isnan(v)) - n_nan

                    # reject data outside of global ranges
                    [g_min, g_max] = cf.get_global_ranges(r, var)
                    if g_min is not None and g_max is not None:
                        v[v < g_min] = np.nan
                        v[v > g_max] = np.nan
                        n_grange = np.sum(np.isnan(v)) - n_fv - n_nan
                    else:
                        n_grange = 'no global ranges'

                    # plot after global ranges are removed
                    fig, ax = pf.plot_spkir(tm, v, vv.name, vv.units)
                    title2 = 'removed: {} global ranges [{}, {}]'.format(
                        n_grange, g_min, g_max)
                    ax.set_title(
                        (title + '\n' + t0 + ' - ' + t1 + '\n' + title2),
                        fontsize=9)
                    sfile = '-'.join((filename, var, t0[:7], 'rmgr'))
                    pf.save_fig(save_dir, sfile)
Esempio n. 17
0
def main(nc, save_dir, display=False):
    cf.create_dir(save_dir)

    with xr.open_dataset(nc, mask_and_scale=False) as ds:
        subsite = ds.subsite
        node = ds.node
        sensor = ds.sensor
        stream = ds.stream
        deployment = 'D0000{}'.format(str(np.unique(ds.deployment)[0]))
        t0 = ds.time_coverage_start
        t1 = ds.time_coverage_end
        sub_dir = os.path.join(save_dir, subsite, '{}-{}-{}'.format(subsite, node, sensor), stream, deployment)

        cf.create_dir(sub_dir)

        misc = ['quality', 'string', 'timestamp', 'deployment', 'id', 'provenance', 'qc', 'time', 'mission', 'obs',
                'volt', 'ref', 'sig', 'amp', 'rph', 'calphase', 'phase', 'therm']
        reg_ex = re.compile(r'\b(?:%s)\b' % '|'.join(misc))

        #  keep variables that are not in the regular expression
        vars = [s for s in ds.data_vars if not reg_ex.search(s)]

        x = ds['time'].data

        for v in vars:  # List of dataset variables
            # print v
            # Filter out variables that are strings, datetimes, or qc related
            if ds[v].dtype.kind == 'S' or ds[v].dtype == np.dtype('datetime64[ns]') or 'time' in v or 'qc_results' in v or 'qc_executed' in v:
                continue
            y = ds[v]
            try:
                y_units = y.units
            except AttributeError:
                y_units = None

            y_data = y.data

            if y_data.ndim > 1:
                continue

            source = ColumnDataSource(
                data=dict(
                    x=x,
                    y=y_data,
                )
            )
            gr = cf.get_global_ranges(subsite, node, sensor, v)

            output_file('{}/{}-{}-{}.html'.format(sub_dir, v, ds.time_coverage_start.replace(':', ''), ds.time_coverage_end.replace(':', '')))

            p = figure(width=1200,
                       height=800,
                       title='{}-{}-{}: {} - {} - {}, Stream: {}'.format(subsite, node, sensor, deployment, t0, t1, stream),
                       x_axis_label='Time (GMT)', y_axis_label='{} ({})'.format(v, y_units),
                       x_axis_type='datetime',
                       tools=[tools])
            p.line('x', 'y', legend=v, line_width=3, source=source)
            p.circle('x', 'y', fill_color='white', size=4, source=source)
            if gr:
                low_box = BoxAnnotation(top=gr[0], fill_alpha=0.05, fill_color='red')
                mid_box = BoxAnnotation(top=gr[1], bottom=gr[0], fill_alpha=0.1, fill_color='green')
                high_box = BoxAnnotation(bottom=gr[1], fill_alpha=0.05, fill_color='red')
                p.add_layout(low_box)
                p.add_layout(mid_box)
                p.add_layout(high_box)

            if display:
                show(p)
            else:
                save(p)
            reset_output()
Esempio n. 18
0
def main(nc, save_dir, display=False):
    cf.create_dir(save_dir)

    with xr.open_dataset(nc, mask_and_scale=False) as ds:
        subsite = ds.subsite
        node = ds.node
        sensor = ds.sensor
        stream = ds.stream
        deployment = 'D0000{}'.format(str(np.unique(ds.deployment)[0]))
        t0 = ds.time_coverage_start
        t1 = ds.time_coverage_end
        sub_dir = os.path.join(save_dir, subsite,
                               '{}-{}-{}'.format(subsite, node,
                                                 sensor), stream, deployment)

        cf.create_dir(sub_dir)

        misc = [
            'quality', 'string', 'timestamp', 'deployment', 'id', 'provenance',
            'qc', 'time', 'mission', 'obs', 'volt', 'ref', 'sig', 'amp', 'rph',
            'calphase', 'phase', 'therm'
        ]
        reg_ex = re.compile(r'\b(?:%s)\b' % '|'.join(misc))

        #  keep variables that are not in the regular expression
        vars = [s for s in ds.data_vars if not reg_ex.search(s)]

        x = ds['time'].data

        for v in vars:  # List of dataset variables
            # print v
            # Filter out variables that are strings, datetimes, or qc related
            if ds[v].dtype.kind == 'S' or ds[v].dtype == np.dtype(
                    'datetime64[ns]'
            ) or 'time' in v or 'qc_results' in v or 'qc_executed' in v:
                continue
            y = ds[v]
            try:
                y_units = y.units
            except AttributeError:
                y_units = None

            y_data = y.data

            if y_data.ndim > 1:
                continue

            source = ColumnDataSource(data=dict(
                x=x,
                y=y_data,
            ))
            gr = cf.get_global_ranges(subsite, node, sensor, v)

            output_file('{}/{}-{}-{}.html'.format(
                sub_dir, v, ds.time_coverage_start.replace(':', ''),
                ds.time_coverage_end.replace(':', '')))

            p = figure(width=1200,
                       height=800,
                       title='{}-{}-{}: {} - {} - {}, Stream: {}'.format(
                           subsite, node, sensor, deployment, t0, t1, stream),
                       x_axis_label='Time (GMT)',
                       y_axis_label='{} ({})'.format(v, y_units),
                       x_axis_type='datetime',
                       tools=[tools])
            p.line('x', 'y', legend=v, line_width=3, source=source)
            p.circle('x', 'y', fill_color='white', size=4, source=source)
            if gr:
                low_box = BoxAnnotation(top=gr[0],
                                        fill_alpha=0.05,
                                        fill_color='red')
                mid_box = BoxAnnotation(top=gr[1],
                                        bottom=gr[0],
                                        fill_alpha=0.1,
                                        fill_color='green')
                high_box = BoxAnnotation(bottom=gr[1],
                                         fill_alpha=0.05,
                                         fill_color='red')
                p.add_layout(low_box)
                p.add_layout(mid_box)
                p.add_layout(high_box)

            if display:
                show(p)
            else:
                save(p)
            reset_output()
Esempio n. 19
0
def main(url_list, sDir, plot_type):
    """""
    URL : path to instrument data by methods
    sDir : path to the directory on your machine to save files
    plot_type: folder name for a plot type
    
    """ ""
    rd_list = []
    ms_list = []
    for uu in url_list:
        elements = uu.split('/')[-2].split('-')
        rd = '-'.join((elements[1], elements[2], elements[3], elements[4]))
        ms = uu.split(rd + '-')[1].split('/')[0]
        if rd not in rd_list:
            rd_list.append(rd)
        if ms not in ms_list:
            ms_list.append(ms)
    ''' 
    separate different instruments
    '''
    for r in rd_list:
        print('\n{}'.format(r))
        subsite = r.split('-')[0]
        array = subsite[0:2]
        main_sensor = r.split('-')[-1]

        ps_df, n_streams = cf.get_preferred_stream_info(r)

        # read in the analysis file
        dr_data = cf.refdes_datareview_json(r)

        # get preferred stream
        ps_df, n_streams = cf.get_preferred_stream_info(r)

        # get end times of deployments
        deployments = []
        end_times = []
        for index, row in ps_df.iterrows():
            deploy = row['deployment']
            deploy_info = cf.get_deployment_information(
                dr_data, int(deploy[-4:]))
            deployments.append(int(deploy[-4:]))
            end_times.append(pd.to_datetime(deploy_info['stop_date']))

        # get the list of data files and filter out collocated instruments and other streams
        datasets = []
        for u in url_list:
            print(u)
            splitter = u.split('/')[-2].split('-')
            rd_check = '-'.join(
                (splitter[1], splitter[2], splitter[3], splitter[4]))
            if rd_check == r:
                udatasets = cf.get_nc_urls([u])
                datasets.append(udatasets)

        datasets = list(itertools.chain(*datasets))
        fdatasets = cf.filter_collocated_instruments(main_sensor, datasets)
        fdatasets = cf.filter_other_streams(r, ms_list, fdatasets)
        '''
        separate data files by methods
        '''
        for ms in ms_list:
            fdatasets_sel = [x for x in fdatasets if ms in x]

            # create a folder to save figures
            save_dir = os.path.join(sDir, array, subsite, r, plot_type,
                                    ms.split('-')[0])
            cf.create_dir(save_dir)

            # create a dictionary for science variables from analysis file
            stream_sci_vars_dict = dict()
            for x in dr_data['instrument']['data_streams']:
                dr_ms = '-'.join((x['method'], x['stream_name']))
                if ms == dr_ms:
                    stream_sci_vars_dict[dr_ms] = dict(vars=dict())
                    sci_vars = dict()
                    for y in x['stream']['parameters']:
                        if y['data_product_type'] == 'Science Data':
                            sci_vars.update(
                                {y['name']: dict(db_units=y['unit'])})
                    if len(sci_vars) > 0:
                        stream_sci_vars_dict[dr_ms]['vars'] = sci_vars

            # initialize an empty data array for science variables in dictionary
            sci_vars_dict = cd.initialize_empty_arrays(stream_sci_vars_dict,
                                                       ms)

            print('\nAppending data from files: {}'.format(ms))
            y_unit = []
            y_name = []
            for fd in fdatasets_sel:
                ds = xr.open_dataset(fd, mask_and_scale=False)
                print('\nAppending data file: {}'.format(fd.split('/')[-1]))
                for var in list(sci_vars_dict[ms]['vars'].keys()):
                    sh = sci_vars_dict[ms]['vars'][var]
                    if ds[var].units == sh['db_units']:
                        if ds[var]._FillValue not in sh['fv']:
                            sh['fv'].append(ds[var]._FillValue)
                        if ds[var].units not in sh['units']:
                            sh['units'].append(ds[var].units)

                        # time
                        t = ds['time'].values
                        t0 = pd.to_datetime(
                            t.min()).strftime('%Y-%m-%dT%H:%M:%S')
                        t1 = pd.to_datetime(
                            t.max()).strftime('%Y-%m-%dT%H:%M:%S')

                        # sci variable
                        z = ds[var].values
                        sh['t'] = np.append(sh['t'], t)
                        sh['values'] = np.append(sh['values'], z)

                        # add pressure to dictionary of sci vars
                        if 'MOAS' in subsite:
                            if 'CTD' in main_sensor:  # for glider CTDs, pressure is a coordinate
                                pressure = 'sci_water_pressure_dbar'
                                y = ds[pressure].values
                                if ds[pressure].units not in y_unit:
                                    y_unit.append(ds[pressure].units)
                                if ds[pressure].long_name not in y_name:
                                    y_name.append(ds[pressure].long_name)
                            else:
                                pressure = 'int_ctd_pressure'
                                y = ds[pressure].values
                                if ds[pressure].units not in y_unit:
                                    y_unit.append(ds[pressure].units)
                                if ds[pressure].long_name not in y_name:
                                    y_name.append(ds[pressure].long_name)
                        else:
                            pressure = pf.pressure_var(ds, ds.data_vars.keys())
                            y = ds[pressure].values

                        sh['pressure'] = np.append(sh['pressure'], y)

                        try:
                            ds[pressure].units
                            if ds[pressure].units not in y_unit:
                                y_unit.append(ds[pressure].units)
                        except AttributeError:
                            print('pressure attributes missing units')
                            if 'pressure unit missing' not in y_unit:
                                y_unit.append('pressure unit missing')

                        try:
                            ds[pressure].long_name
                            if ds[pressure].long_name not in y_name:
                                y_name.append(ds[pressure].long_name)
                        except AttributeError:
                            print('pressure attributes missing long_name')
                            if 'pressure long name missing' not in y_name:
                                y_name.append('pressure long name missing')

            # create a csv file with diagnostic results:

                if len(y_unit) != 1:
                    print('pressure unit varies')
                    if 'dbar' in y_unit:
                        y_unit = 'dbar'
                    print(y_unit)
                else:
                    y_unit = y_unit[0]

                if len(y_name) != 1:
                    print('pressure long name varies')
                    if 'Seawater Pressure' in y_name:
                        y_name = 'Seawater Pressure'
                    print(y_name)
                else:
                    y_name = y_name[0]

                # create a folder to save variables statistics
                mDir = '/Users/leila/Documents/NSFEduSupport/github/data-review-tools/data_review/final_stats'
                save_dir_stat = os.path.join(mDir, array, subsite)
                cf.create_dir(save_dir_stat)
                stat_df = pd.DataFrame()
                for m, n in sci_vars_dict.items():
                    for sv, vinfo in n['vars'].items():
                        print(sv)
                        if len(vinfo['t']) < 1:
                            print('no variable data to plot')
                        else:
                            sv_units = vinfo['units'][0]
                            fv = vinfo['fv'][0]
                            t0 = pd.to_datetime(min(
                                vinfo['t'])).strftime('%Y-%m-%dT%H:%M:%S')
                            t1 = pd.to_datetime(max(
                                vinfo['t'])).strftime('%Y-%m-%dT%H:%M:%S')
                            t = vinfo['t']
                            z = vinfo['values']
                            y = vinfo['pressure']

                            title = ' '.join((r, ms))

                        # Check if the array is all NaNs
                        if sum(np.isnan(z)) == len(z):
                            print('Array of all NaNs - skipping plot.')
                            continue

                        # Check if the array is all fill values
                        elif len(z[z != fv]) == 0:
                            print('Array of all fill values - skipping plot.')
                            continue

                        else:
                            # reject fill values
                            fv_ind = z != fv
                            y_nofv = y[fv_ind]
                            t_nofv = t[fv_ind]
                            z_nofv = z[fv_ind]
                            print(len(z) - len(fv_ind), ' fill values')

                            # reject NaNs
                            nan_ind = ~np.isnan(z_nofv)
                            t_nofv_nonan = t_nofv[nan_ind]
                            y_nofv_nonan = y_nofv[nan_ind]
                            z_nofv_nonan = z_nofv[nan_ind]
                            print(len(z) - len(nan_ind), ' NaNs')

                            # reject extreme values
                            ev_ind = cf.reject_extreme_values(z_nofv_nonan)
                            t_nofv_nonan_noev = t_nofv_nonan[ev_ind]
                            y_nofv_nonan_noev = y_nofv_nonan[ev_ind]
                            z_nofv_nonan_noev = z_nofv_nonan[ev_ind]
                            print(
                                len(z) - len(ev_ind), ' Extreme Values',
                                '|1e7|')

                            # reject values outside global ranges:
                            global_min, global_max = cf.get_global_ranges(
                                r, sv)
                            # platform not in qc-table (parad_k_par)
                            # global_min = 0
                            # global_max = 2500
                            print('global ranges for : {}-{}  {} - {}'.format(
                                r, sv, global_min, global_max))
                            if isinstance(global_min,
                                          (int, float)) and isinstance(
                                              global_max, (int, float)):
                                gr_ind = cf.reject_global_ranges(
                                    z_nofv_nonan_noev, global_min, global_max)
                                t_nofv_nonan_noev_nogr = t_nofv_nonan_noev[
                                    gr_ind]
                                y_nofv_nonan_noev_nogr = y_nofv_nonan_noev[
                                    gr_ind]
                                z_nofv_nonan_noev_nogr = z_nofv_nonan_noev[
                                    gr_ind]
                            else:
                                t_nofv_nonan_noev_nogr = t_nofv_nonan_noev
                                y_nofv_nonan_noev_nogr = y_nofv_nonan_noev
                                z_nofv_nonan_noev_nogr = z_nofv_nonan_noev

                        if len(z_nofv_nonan_noev) > 0:
                            if m == 'common_stream_placeholder':
                                sname = '-'.join((r, sv))
                            else:
                                sname = '-'.join((r, m, sv))

                        # group by depth range
                        sname = '_'.join((sname, sv_units))

                        # if sv != 'pressure':
                        #     columns = ['tsec', 'dbar', str(sv)]
                        #
                        #     # select depth bin size for the data group function
                        #     bin_size = 10
                        #     min_r = int(round(min(y_nofv_nonan_noev) - bin_size))
                        #     max_r = int(round(max(y_nofv_nonan_noev) + bin_size))
                        #     ranges = list(range(min_r, max_r, bin_size))
                        #     groups, d_groups = gt.group_by_depth_range(t_nofv_nonan_noev_nogr, y_nofv_nonan_noev_nogr,
                        #                                                z_nofv_nonan_noev_nogr, columns, ranges)
                        #

                        # if (ms.split('-')[0]) == (ps_df[0].values[0].split('-')[0]):
                        #     if 'pressure' not in sv:
                        #         print('final_stats_{}-{}-{}-{}'.format(r,
                        #                                                ms.split('-')[0],
                        #                                                ps_df[0].values[0].split('-')[0],
                        #                                                sv))
                        #         stat_data = groups.describe()[sv]
                        #         stat_data.insert(loc=0, column='parameter', value=sv, allow_duplicates=False)
                        #         stat_df = stat_df.append(stat_data)

                        # if sv == 'optical_backscatter':
                        #     less_ind = z_nofv_nonan_noev < 0.0004
                        #     print(sv, ' < 0.0004', len(less_ind))
                        #     more_ind = z_nofv_nonan_noev > 0.01
                        #     print(sv, ' > 0.01', len(more_ind))

                        # Plot all data
                        clabel = sv + " (" + sv_units + ")"
                        ylabel = y_name + " (" + y_unit + ")"

                        fig, ax = pf.plot_xsection(subsite,
                                                   t_nofv_nonan_noev,
                                                   y_nofv_nonan_noev,
                                                   z_nofv_nonan_noev,
                                                   clabel,
                                                   ylabel,
                                                   stdev=None)

                        ax.set_title((title + '\n' + t0 + ' - ' + t1),
                                     fontsize=9)

                        pf.save_fig(save_dir, sname)

                        # Plot data with outliers removed
                        fig, ax = pf.plot_xsection(subsite,
                                                   t_nofv_nonan_noev_nogr,
                                                   y_nofv_nonan_noev_nogr,
                                                   z_nofv_nonan_noev_nogr,
                                                   clabel,
                                                   ylabel,
                                                   stdev=5)
                        ax.set_title((title + '\n' + t0 + ' - ' + t1),
                                     fontsize=9)
                        sfile = '_'.join((sname, 'rmoutliers'))
                        pf.save_fig(save_dir, sfile)

                        # plot data with excluded time range removed
                        dr = pd.read_csv(
                            'https://datareview.marine.rutgers.edu/notes/export'
                        )
                        drn = dr.loc[dr.type == 'exclusion']
                        if len(drn) != 0:
                            subsite_node = '-'.join((subsite, r.split('-')[1]))
                            drne = drn.loc[drn.reference_designator.isin(
                                [subsite, subsite_node, r])]

                            t_ex = t_nofv_nonan_noev_nogr
                            y_ex = y_nofv_nonan_noev_nogr
                            z_ex = z_nofv_nonan_noev_nogr
                            for i, row in drne.iterrows():
                                sdate = cf.format_dates(row.start_date)
                                edate = cf.format_dates(row.end_date)
                                ts = np.datetime64(sdate)
                                te = np.datetime64(edate)
                                ind = np.where((t_ex < ts) | (t_ex > te), True,
                                               False)
                                if len(ind) != 0:
                                    t_ex = t_ex[ind]
                                    z_ex = z_ex[ind]
                                    y_ex = y_ex[ind]

                            fig, ax = pf.plot_xsection(subsite,
                                                       t_ex,
                                                       y_ex,
                                                       z_ex,
                                                       clabel,
                                                       ylabel,
                                                       stdev=None)
                            ax.set_title((title + '\n' + t0 + ' - ' + t1),
                                         fontsize=9)

                            sfile = '_'.join((sname, 'rmsuspectdata'))
                            pf.save_fig(save_dir, sfile)
Esempio n. 20
0
def main(sDir, url_list):
    rd_list = []
    for uu in url_list:
        elements = uu.split('/')[-2].split('-')
        rd = '-'.join((elements[1], elements[2], elements[3], elements[4]))
        if rd not in rd_list:
            rd_list.append(rd)

    for r in rd_list:
        print('\n{}'.format(r))
        subsite = r.split('-')[0]
        array = subsite[0:2]

        ps_df, n_streams = cf.get_preferred_stream_info(r)

        # get end times of deployments
        dr_data = cf.refdes_datareview_json(r)
        deployments = []
        end_times = []
        for index, row in ps_df.iterrows():
            deploy = row['deployment']
            deploy_info = get_deployment_information(dr_data, int(deploy[-4:]))
            deployments.append(int(deploy[-4:]))
            end_times.append(pd.to_datetime(deploy_info['stop_date']))

        # filter datasets
        datasets = []
        for u in url_list:
            splitter = u.split('/')[-2].split('-')
            rd_check = '-'.join(
                (splitter[1], splitter[2], splitter[3], splitter[4]))
            if rd_check == r:
                udatasets = cf.get_nc_urls([u])
                datasets.append(udatasets)
        datasets = list(itertools.chain(*datasets))
        main_sensor = r.split('-')[-1]
        fdatasets = cf.filter_collocated_instruments(main_sensor, datasets)
        methodstream = []
        for f in fdatasets:
            methodstream.append('-'.join((f.split('/')[-2].split('-')[-2],
                                          f.split('/')[-2].split('-')[-1])))

        for ms in np.unique(methodstream):
            fdatasets_sel = [x for x in fdatasets if ms in x]
            save_dir = os.path.join(sDir, array, subsite, r,
                                    'timeseries_daily_plots',
                                    ms.split('-')[0])
            cf.create_dir(save_dir)

            stream_sci_vars_dict = dict()
            for x in dr_data['instrument']['data_streams']:
                dr_ms = '-'.join((x['method'], x['stream_name']))
                if ms == dr_ms:
                    stream_sci_vars_dict[dr_ms] = dict(vars=dict())
                    sci_vars = dict()
                    for y in x['stream']['parameters']:
                        if y['data_product_type'] == 'Science Data':
                            sci_vars.update(
                                {y['name']: dict(db_units=y['unit'])})
                    if len(sci_vars) > 0:
                        stream_sci_vars_dict[dr_ms]['vars'] = sci_vars

            sci_vars_dict = cd.initialize_empty_arrays(stream_sci_vars_dict,
                                                       ms)
            print('\nAppending data from files: {}'.format(ms))
            for fd in fdatasets_sel:
                ds = xr.open_dataset(fd, mask_and_scale=False)
                for var in list(sci_vars_dict[ms]['vars'].keys()):
                    sh = sci_vars_dict[ms]['vars'][var]
                    if ds[var].units == sh['db_units']:
                        if ds[var]._FillValue not in sh['fv']:
                            sh['fv'].append(ds[var]._FillValue)
                        if ds[var].units not in sh['units']:
                            sh['units'].append(ds[var].units)
                        tD = ds['time'].values
                        varD = ds[var].values
                        sh['t'] = np.append(sh['t'], tD)
                        sh['values'] = np.append(sh['values'], varD)

            print('\nPlotting data')
            for m, n in sci_vars_dict.items():
                for sv, vinfo in n['vars'].items():
                    print(sv)
                    if len(vinfo['t']) < 1:
                        print('no variable data to plot')
                    else:
                        sv_units = vinfo['units'][0]
                        t0 = pd.to_datetime(min(
                            vinfo['t'])).strftime('%Y-%m-%dT%H:%M:%S')
                        t1 = pd.to_datetime(max(
                            vinfo['t'])).strftime('%Y-%m-%dT%H:%M:%S')
                        x = vinfo['t']
                        y = vinfo['values']

                        # reject NaNs
                        nan_ind = ~np.isnan(y)
                        x_nonan = x[nan_ind]
                        y_nonan = y[nan_ind]

                        # reject fill values
                        fv_ind = y_nonan != vinfo['fv'][0]
                        x_nonan_nofv = x_nonan[fv_ind]
                        y_nonan_nofv = y_nonan[fv_ind]

                        # reject extreme values
                        Ev_ind = cf.reject_extreme_values(y_nonan_nofv)
                        y_nonan_nofv_nE = y_nonan_nofv[Ev_ind]
                        x_nonan_nofv_nE = x_nonan_nofv[Ev_ind]

                        # reject values outside global ranges:
                        global_min, global_max = cf.get_global_ranges(r, sv)
                        gr_ind = cf.reject_global_ranges(
                            y_nonan_nofv_nE, global_min, global_max)
                        y_nonan_nofv_nE_nogr = y_nonan_nofv_nE[gr_ind]
                        x_nonan_nofv_nE_nogr = x_nonan_nofv_nE[gr_ind]

                        if len(y_nonan_nofv) > 0:
                            if m == 'common_stream_placeholder':
                                sname = '-'.join((r, sv))
                            else:
                                sname = '-'.join((r, m, sv))

                            # 1st group by year
                            ygroups, gy_data = gt.group_by_timerange(
                                x_nonan_nofv_nE_nogr, y_nonan_nofv_nE_nogr,
                                'A')

                            tn = 1
                            for n in range(len(ygroups)):
                                x_time = gy_data[n + tn].dropna(axis=0)
                                y_data = gy_data[n + (tn + 1)].dropna(axis=0)

                                # 2nd group by month
                                mgroups, gm_data = gt.group_by_timerange(
                                    x_time.values, y_data.values, 'M')

                                if len(x_time) == 0:
                                    continue

                                td = 1
                                for jj in range(len(mgroups)):
                                    x_time = gm_data[jj + td].dropna(axis=0)
                                    y_data = gm_data[jj +
                                                     (td + 1)].dropna(axis=0)

                                    if len(x_time) == 0:
                                        continue

                                    # 3rd group by day
                                    dgroups, gd_data = gt.group_by_timerange(
                                        x_time.values, y_data.values, 'D')

                                    x_year = x_time[0].year
                                    x_month = x_time[0].month
                                    month_name = calendar.month_abbr[x_month]
                                    print(x_year, x_month)

                                    sfile = '_'.join(
                                        (str(x_year), str(x_month), sname))

                                    # prepare plot layout

                                    fig, ax = pyplot.subplots(nrows=7,
                                                              ncols=5,
                                                              sharey=True)
                                    title_in = month_name + '-' + str(x_year) + \
                                                  ' calendar days \n Parameter: ' + \
                                                  sv + " (" + sv_units + ")"

                                    ax[0][2].text(0.5,
                                                  1.5,
                                                  title_in,
                                                  horizontalalignment='center',
                                                  fontsize=8,
                                                  transform=ax[0][2].transAxes)
                                    num_i = 0
                                    day_i = {}
                                    for kk in list(range(0, 7)):
                                        for ff in list(range(0, 5)):
                                            num_i += 1
                                            day_i[num_i] = [kk, ff]
                                            ax[kk][ff].tick_params(
                                                axis='both',
                                                which='both',
                                                color='r',
                                                labelsize=7,
                                                labelcolor='m',
                                                rotation=0)

                                            ax[kk][ff].text(
                                                0.1,
                                                0.75,
                                                str(num_i),
                                                horizontalalignment='center',
                                                fontsize=7,
                                                transform=ax[kk][ff].transAxes,
                                                bbox=dict(
                                                    boxstyle="round",
                                                    ec=(0., 0.5, 0.5),
                                                    fc=(1., 1., 1.),
                                                ))

                                            if kk is not 6:
                                                ax[kk][ff].tick_params(
                                                    labelbottom=False)
                                            if ff is not 0:
                                                ax[kk][ff].tick_params(
                                                    labelright=False)

                                            if kk is 6 and ff is 0:
                                                ax[kk][ff].set_xlabel(
                                                    'Hours',
                                                    rotation=0,
                                                    fontsize=8,
                                                    color='b')

                                            if kk is 6 and ff in list(
                                                    range(1, 5)):
                                                fig.delaxes(ax[kk][ff])

                                    tm = 1
                                    for mt in range(len(dgroups)):
                                        x_time = gd_data[mt +
                                                         tm].dropna(axis=0)
                                        y_DO = gd_data[mt +
                                                       (tm + 1)].dropna(axis=0)

                                        series_m = pd.DataFrame(
                                            columns=['DO_n'], index=x_time)
                                        series_m['DO_n'] = list(y_DO[:])

                                        if len(x_time) == 0:
                                            continue

                                        x_day = x_time[0].day

                                        print(x_time[0].year, x_time[0].month,
                                              x_day)

                                        i0 = day_i[x_day][0]
                                        i1 = day_i[x_day][1]

                                        # Plot data
                                        series_m.plot(ax=ax[i0][i1],
                                                      linestyle='None',
                                                      marker='.',
                                                      markersize=1)
                                        ax[i0][i1].legend().set_visible(False)

                                        ma = series_m.rolling('3600s').mean()
                                        mstd = series_m.rolling('3600s').std()

                                        ax[i0][i1].plot(ma.index,
                                                        ma.DO_n,
                                                        'b',
                                                        linewidth=0.25)
                                        ax[i0][i1].fill_between(
                                            mstd.index,
                                            ma.DO_n - 3 * mstd.DO_n,
                                            ma.DO_n + 3 * mstd.DO_n,
                                            color='b',
                                            alpha=0.2)

                                        # prepare the time axis parameters
                                        datemin = datetime.datetime(
                                            x_year, x_month, x_day, 0)
                                        datemax = datetime.datetime(
                                            x_year, x_month, x_day, 23)

                                        ax[i0][i1].set_xlim(datemin, datemax)
                                        xLocator = mdates.HourLocator(
                                            interval=4)  # every hour
                                        myFmt = mdates.DateFormatter('%H')
                                        ax[i0][i1].xaxis.set_minor_locator(
                                            xLocator)
                                        ax[i0][i1].xaxis.set_minor_formatter(
                                            myFmt)
                                        ax[i0][i1].xaxis.set_major_locator(
                                            pyplot.NullLocator())
                                        ax[i0][i1].xaxis.set_major_formatter(
                                            pyplot.NullFormatter())
                                        yLocator = MaxNLocator(prune='both',
                                                               nbins=3)
                                        ax[i0][i1].yaxis.set_major_locator(
                                            yLocator)

                                        if x_day is not 31:
                                            ax[i0][i1].tick_params(
                                                labelbottom=False)
                                            ax[i0][i1].set_xlabel(' ')
                                        else:
                                            ax[i0][i1].tick_params(
                                                which='both',
                                                color='r',
                                                labelsize=7,
                                                labelcolor='m',
                                                length=0.1,
                                                pad=0.1)
                                            ax[i0][i1].set_xlabel('Hours',
                                                                  rotation=0,
                                                                  fontsize=8,
                                                                  color='b')

                                        ymin, ymax = ax[i0][i1].get_ylim()
                                        dep = 1
                                        for etimes in end_times:
                                            ax[i0][i1].axvline(x=etimes,
                                                               color='b',
                                                               linestyle='--',
                                                               linewidth=.6)
                                            ax[i0][i1].text(
                                                etimes,
                                                ymin + 50,
                                                str(dep),
                                                fontsize=6,
                                                style='italic',
                                                bbox=dict(
                                                    boxstyle="round",
                                                    ec=(0., 0.5, 0.5),
                                                    fc=(1., 1., 1.),
                                                ))

                                            dep += 1
                                        tm += 1
                                    td += 1
                                    pf.save_fig(save_dir, sfile)
                                tn += 1
Esempio n. 21
0
def main(sDir, url_list, start_time, end_time, preferred_only):
    rd_list = []
    for uu in url_list:
        elements = uu.split('/')[-2].split('-')
        rd = '-'.join((elements[1], elements[2], elements[3], elements[4]))
        if rd not in rd_list and 'OPTAA' in rd:
            rd_list.append(rd)

    for r in rd_list:
        print('\n{}'.format(r))
        datasets = []
        for u in url_list:
            splitter = u.split('/')[-2].split('-')
            rd_check = '-'.join((splitter[1], splitter[2], splitter[3], splitter[4]))
            if rd_check == r:
                udatasets = cf.get_nc_urls([u])
                for ud in udatasets:  # filter out collocated data files
                    if 'OPTAA' in ud.split('/')[-1]:
                        datasets.append(ud)
        fdatasets = []
        if preferred_only == 'yes':
            # get the preferred stream information
            ps_df, n_streams = cf.get_preferred_stream_info(r)
            for index, row in ps_df.iterrows():
                for ii in range(n_streams):
                    try:
                        rms = '-'.join((r, row[ii]))
                    except TypeError:
                        continue
                    for dd in datasets:
                        spl = dd.split('/')[-2].split('-')
                        catalog_rms = '-'.join((spl[1], spl[2], spl[3], spl[4], spl[5], spl[6]))
                        fdeploy = dd.split('/')[-1].split('_')[0]
                        if rms == catalog_rms and fdeploy == row['deployment']:
                            fdatasets.append(dd)
        else:
            fdatasets = datasets

        fdatasets = np.unique(fdatasets).tolist()
        for fd in fdatasets:
            ds = xr.open_dataset(fd, mask_and_scale=False)
            ds = ds.swap_dims({'obs': 'time'})

            if start_time is not None and end_time is not None:
                ds = ds.sel(time=slice(start_time, end_time))
                if len(ds['time'].values) == 0:
                    print('No data to plot for specified time range: ({} to {})'.format(start_time, end_time))
                    continue

            fname, subsite, refdes, method, stream, deployment = cf.nc_attributes(fd)
            #sci_vars = cf.return_science_vars(stream)
            sci_vars = ['optical_absorption', 'beam_attenuation']
            print('\nPlotting {} {}'.format(r, deployment))
            array = subsite[0:2]
            filename = '_'.join(fname.split('_')[:-1])
            save_dir = os.path.join(sDir, array, subsite, refdes, 'timeseries_plots')
            cf.create_dir(save_dir)

            tm = ds['time'].values
            t0 = pd.to_datetime(tm.min()).strftime('%Y-%m-%dT%H:%M:%S')
            t1 = pd.to_datetime(tm.max()).strftime('%Y-%m-%dT%H:%M:%S')
            title = ' '.join((deployment, refdes, method))

            # # add chl-a data from the collocated fluorometer
            # flor_url = [s for s in url_list if r.split('-')[0] in s and 'FLOR' in s]
            # if len(flor_url) == 1:
            #     flor_datasets = cf.get_nc_urls(flor_url)
            #     # filter out collocated datasets
            #     flor_dataset = [j for j in flor_datasets if ('FLOR' in j.split('/')[-1] and deployment in j.split('/')[-1])]
            #     if len(flor_dataset) > 0:
            #         ds_flor = xr.open_dataset(flor_dataset[0], mask_and_scale=False)
            #         ds_flor = ds_flor.swap_dims({'obs': 'time'})
            #         flor_t0 = dt.datetime.strptime(t0, '%Y-%m-%dT%H:%M:%S')
            #         flor_t1 = dt.datetime.strptime(t1, '%Y-%m-%dT%H:%M:%S')
            #         ds_flor = ds_flor.sel(time=slice(flor_t0, flor_t1))
            #         t_flor = ds_flor['time'].values
            #         flor_sci_vars = cf.return_science_vars(ds_flor.stream)
            #         for fsv in flor_sci_vars:
            #             if ds_flor[fsv].long_name == 'Chlorophyll-a Concentration':
            #                 chla = ds_flor[fsv]

            for var in sci_vars:
                print(var)
                if var == 'optical_absorption':
                    wv = ds['wavelength_a'].values
                else:
                    wv = ds['wavelength_c'].values
                vv = ds[var]
                fv = vv._FillValue
                fig1, ax1 = plt.subplots()
                fig2, ax2 = plt.subplots()
                plotting = []  # keep track if anything is plotted
                wavelengths = []
                iwavelengths = []
                for i in range(len(wv)):
                    if (wv[i] > 671.) and (wv[i] < 679.):
                        wavelengths.append(wv[i])
                        iwavelengths.append(i)

                colors = ['purple', 'green', 'orange']
                for iw in range(len(iwavelengths)):
                    v = vv.sel(wavelength=iwavelengths[iw]).values
                    n_all = len(v)
                    n_nan = np.sum(np.isnan(v))

                    # convert fill values to nans
                    v[v == fv] = np.nan
                    n_fv = np.sum(np.isnan(v)) - n_nan

                    if n_nan + n_fv < n_all:
                        # plot before global ranges are removed
                        #fig, ax = pf.plot_optaa(tm, v, vv.name, vv.units)
                        plotting.append('yes')
                        ax1.scatter(tm, v, c=colors[iw], label='{} nm'.format(wavelengths[iw]),
                                    marker='.', s=1)

                        # reject data outside of global ranges
                        [g_min, g_max] = cf.get_global_ranges(r, var)
                        if g_min is not None and g_max is not None:
                            v[v < g_min] = np.nan
                            v[v > g_max] = np.nan
                            n_grange = np.sum(np.isnan(v)) - n_fv - n_nan
                        else:
                            n_grange = 'no global ranges'

                        # plot after global ranges are removed

                        ax2.scatter(tm, v, c=colors[iw], label='{} nm: rm {} GR'.format(wavelengths[iw], n_grange),
                                    marker='.', s=1)
                        # if iw == len(wavelengths) - 1:
                        #     ax2a = ax2.twinx()
                        #     ax2a.scatter(t_flor, chla.values, c='lime', marker='.', s=1)
                        #     ax2a.set_ylabel('Fluorometric Chl-a ({})'.format(chla.units))

                if len(plotting) > 0:
                    ax1.grid()
                    pf.format_date_axis(ax1, fig1)
                    ax1.legend(loc='best', fontsize=7)
                    ax1.set_ylabel((var + " (" + vv.units + ")"), fontsize=9)
                    ax1.set_title((title + '\n' + t0 + ' - ' + t1), fontsize=9)
                    sfile = '-'.join((filename, var, t0[:10]))
                    save_file = os.path.join(save_dir, sfile)
                    fig1.savefig(str(save_file), dpi=150)

                    ax2.grid()
                    pf.format_date_axis(ax2, fig2)
                    ax2.legend(loc='best', fontsize=7)
                    ax2.set_ylabel((var + " (" + vv.units + ")"), fontsize=9)
                    title_gr = 'GR: global ranges'
                    ax2.set_title((title + '\n' + t0 + ' - ' + t1 + '\n' + title_gr), fontsize=9)
                    sfile2 = '-'.join((filename, var, t0[:10], 'rmgr'))
                    save_file2 = os.path.join(save_dir, sfile2)
                    fig2.savefig(str(save_file2), dpi=150)

            plt.close('all')
def main(url_list, sDir, plot_type):
    """""
    URL : path to instrument data by methods
    sDir : path to the directory on your machine to save files
    plot_type: folder name for a plot type

    """ ""
    rd_list = []
    ms_list = []
    for uu in url_list:
        elements = uu.split('/')[-2].split('-')
        rd = '-'.join((elements[1], elements[2], elements[3], elements[4]))
        ms = uu.split(rd + '-')[1].split('/')[0]
        if rd not in rd_list:
            rd_list.append(rd)
        if ms not in ms_list:
            ms_list.append(ms)
    ''' 
    separate different instruments
    '''
    for r in rd_list:
        print('\n{}'.format(r))
        subsite = r.split('-')[0]
        array = subsite[0:2]
        main_sensor = r.split('-')[-1]

        ps_df, n_streams = cf.get_preferred_stream_info(r)

        # read in the analysis file
        dr_data = cf.refdes_datareview_json(r)

        # get end times of deployments
        deployments = []
        end_times = []
        for index, row in ps_df.iterrows():
            deploy = row['deployment']
            deploy_info = get_deployment_information(dr_data, int(deploy[-4:]))
            deployments.append(int(deploy[-4:]))
            end_times.append(pd.to_datetime(deploy_info['stop_date']))

        # get the list of data files and filter out collocated instruments and other streams chat
        datasets = []
        for u in url_list:
            print(u)
            splitter = u.split('/')[-2].split('-')
            rd_check = '-'.join(
                (splitter[1], splitter[2], splitter[3], splitter[4]))
            if rd_check == r:
                udatasets = cf.get_nc_urls([u])
                datasets.append(udatasets)

        datasets = list(itertools.chain(*datasets))
        fdatasets = cf.filter_collocated_instruments(main_sensor, datasets)
        fdatasets = cf.filter_other_streams(r, ms_list, fdatasets)
        '''
        separate the data files by methods
        '''
        for ms in ms_list:  # np.unique(methodstream)
            fdatasets_sel = [x for x in fdatasets if ms in x]

            # create a folder to save figures
            save_dir = os.path.join(sDir, array, subsite, r, plot_type,
                                    ms.split('-')[0])
            cf.create_dir(save_dir)

            # create a dictionary for science variables from analysis file
            stream_sci_vars_dict = dict()
            for x in dr_data['instrument']['data_streams']:
                dr_ms = '-'.join((x['method'], x['stream_name']))
                if ms == dr_ms:
                    stream_sci_vars_dict[dr_ms] = dict(vars=dict())
                    sci_vars = dict()
                    for y in x['stream']['parameters']:
                        if y['data_product_type'] == 'Science Data':
                            sci_vars.update(
                                {y['name']: dict(db_units=y['unit'])})
                    if len(sci_vars) > 0:
                        stream_sci_vars_dict[dr_ms]['vars'] = sci_vars

            # initialize an empty data array for science variables in dictionary
            sci_vars_dict = cd.initialize_empty_arrays(stream_sci_vars_dict,
                                                       ms)

            y_unit = []
            y_name = []
            for fd in fdatasets_sel:
                ds = xr.open_dataset(fd, mask_and_scale=False)
                print('\nAppending data file: {}'.format(fd.split('/')[-1]))
                for var in list(sci_vars_dict[ms]['vars'].keys()):
                    sh = sci_vars_dict[ms]['vars'][var]
                    if ds[var].units == sh['db_units']:
                        if ds[var]._FillValue not in sh['fv']:
                            sh['fv'].append(ds[var]._FillValue)
                        if ds[var].units not in sh['units']:
                            sh['units'].append(ds[var].units)

                        # time
                        t = ds['time'].values
                        t0 = pd.to_datetime(
                            t.min()).strftime('%Y-%m-%dT%H:%M:%S')
                        t1 = pd.to_datetime(
                            t.max()).strftime('%Y-%m-%dT%H:%M:%S')

                        # sci variable
                        z = ds[var].values
                        sh['t'] = np.append(sh['t'], t)
                        sh['values'] = np.append(sh['values'], z)

                        # add pressure to dictionary of sci vars
                        if 'MOAS' in subsite:
                            if 'CTD' in main_sensor:  # for glider CTDs, pressure is a coordinate
                                pressure = 'sci_water_pressure_dbar'
                                y = ds[pressure].values
                                if ds[pressure].units not in y_unit:
                                    y_unit.append(ds[pressure].units)
                                if ds[pressure].long_name not in y_name:
                                    y_name.append(ds[pressure].long_name)
                            else:
                                pressure = 'int_ctd_pressure'
                                y = ds[pressure].values
                                if ds[pressure].units not in y_unit:
                                    y_unit.append(ds[pressure].units)
                                if ds[pressure].long_name not in y_name:
                                    y_name.append(ds[pressure].long_name)
                        else:
                            pressure = pf.pressure_var(ds, ds.data_vars.keys())
                            y = ds[pressure].values
                            if ds[pressure].units not in y_unit:
                                y_unit.append(ds[pressure].units)
                            if ds[pressure].long_name not in y_name:
                                y_name.append(ds[pressure].long_name)

                        sh['pressure'] = np.append(sh['pressure'], y)

            if len(y_unit) != 1:
                print('pressure unit varies!')
            else:
                y_unit = y_unit[0]

            if len(y_name) != 1:
                print('pressure long name varies!')
            else:
                y_name = y_name[0]

            for m, n in sci_vars_dict.items():
                for sv, vinfo in n['vars'].items():
                    print('\nWorking on variable: {}'.format(sv))
                    if len(vinfo['t']) < 1:
                        print('no variable data to plot')
                    else:
                        sv_units = vinfo['units'][0]
                        fv = vinfo['fv'][0]
                        t0 = pd.to_datetime(min(
                            vinfo['t'])).strftime('%Y-%m-%dT%H:%M:%S')
                        t1 = pd.to_datetime(max(
                            vinfo['t'])).strftime('%Y-%m-%dT%H:%M:%S')
                        t = vinfo['t']
                        x = vinfo['values']
                        y = vinfo['pressure']

                    # Check if the array is all NaNs
                    if sum(np.isnan(x)) == len(x):
                        print('Array of all NaNs - skipping plot.')
                        continue

                    # Check if the array is all fill values
                    elif len(x[x != fv]) == 0:
                        print('Array of all fill values - skipping plot.')
                        continue

                    else:
                        # reject fill values
                        fv_ind = x != fv
                        y_nofv = y[fv_ind]
                        t_nofv = t[fv_ind]
                        c_nofv = cm.rainbow(np.linspace(0, 1, len(t[fv_ind])))
                        x_nofv = x[fv_ind]
                        print(len(x) - len(fv_ind), ' fill values')

                        # reject NaNs
                        nan_ind = ~np.isnan(x)
                        t_nofv_nonan = t_nofv[nan_ind]
                        c_nofv_nonan = c_nofv[nan_ind]
                        y_nofv_nonan = y_nofv[nan_ind]
                        x_nofv_nonan = x_nofv[nan_ind]
                        print(len(x) - len(nan_ind), ' NaNs')

                        # reject extreme values
                        ev_ind = cf.reject_extreme_values(x_nofv_nonan)
                        t_nofv_nonan_noev = t_nofv_nonan[ev_ind]
                        c_nofv_nonan_noev = c_nofv_nonan[ev_ind]
                        y_nofv_nonan_noev = y_nofv_nonan[ev_ind]
                        x_nofv_nonan_noev = x_nofv_nonan[ev_ind]
                        print(len(z) - len(ev_ind), ' Extreme Values', '|1e7|')

                        # reject values outside global ranges:
                        global_min, global_max = cf.get_global_ranges(r, sv)
                        # platform not in qc-table (parad_k_par)
                        # global_min = 0
                        # global_max = 2500
                        print('global ranges for : {}-{}  {} - {}'.format(
                            r, sv, global_min, global_max))
                        if isinstance(global_min, (int, float)) and isinstance(
                                global_max, (int, float)):
                            gr_ind = cf.reject_global_ranges(
                                x_nofv_nonan_noev, global_min, global_max)
                            t_nofv_nonan_noev_nogr = t_nofv_nonan_noev[gr_ind]
                            y_nofv_nonan_noev_nogr = y_nofv_nonan_noev[gr_ind]
                            x_nofv_nonan_noev_nogr = x_nofv_nonan_noev[gr_ind]
                        else:
                            t_nofv_nonan_noev_nogr = t_nofv_nonan_noev
                            y_nofv_nonan_noev_nogr = y_nofv_nonan_noev
                            x_nofv_nonan_noev_nogr = x_nofv_nonan_noev

                    if len(x_nofv_nonan_noev) > 0:
                        if m == 'common_stream_placeholder':
                            sname = '-'.join((r, sv))
                        else:
                            sname = '-'.join((r, m, sv))

                    if sv != 'pressure':
                        columns = ['tsec', 'dbar', str(sv)]
                        bin_size = 10
                        min_r = int(round(min(y_nofv_nonan_noev) - bin_size))
                        max_r = int(round(max(y_nofv_nonan_noev) + bin_size))
                        ranges = list(range(min_r, max_r, bin_size))
                        groups, d_groups = gt.group_by_depth_range(
                            t_nofv_nonan_noev_nogr, y_nofv_nonan_noev_nogr,
                            x_nofv_nonan_noev_nogr, columns, ranges)

                    y_avg, n_avg, n_min, n_max, n0_std, n1_std, l_arr = [], [], [], [], [], [], []
                    tm = 1
                    for ii in range(len(groups)):
                        nan_ind = d_groups[ii + tm].notnull()
                        xtime = d_groups[ii + tm][nan_ind]
                        colors = cm.rainbow(np.linspace(0, 1, len(xtime)))
                        ypres = d_groups[ii + tm + 1][nan_ind]
                        nval = d_groups[ii + tm + 2][nan_ind]
                        tm += 2

                        l_arr.append(len(
                            nval))  # count of data to filter out small groups
                        y_avg.append(ypres.mean())
                        n_avg.append(nval.mean())
                        n_min.append(nval.min())
                        n_max.append(nval.max())
                        n_std = 3
                        n0_std.append(nval.mean() + n_std * nval.std())
                        n1_std.append(nval.mean() - n_std * nval.std())

                    # Plot all data
                    ylabel = y_name + " (" + y_unit + ")"
                    xlabel = sv + " (" + sv_units + ")"
                    clabel = 'Time'

                    fig, ax = pf.plot_profiles(x_nofv_nonan_noev_nogr,
                                               y_nofv_nonan_noev_nogr,
                                               t_nofv_nonan_noev_nogr,
                                               ylabel,
                                               xlabel,
                                               clabel,
                                               end_times,
                                               deployments,
                                               stdev=None)

                    title_text = ' '.join((r, ms.split('-')[-1])) + '\n' \
                                 + t0 + ' - ' + t1 + '\n' + str(bin_size) +\
                                 ' m average and ' + str(n_std) + ' std shown'

                    ax.set_title(title_text, fontsize=9)
                    ax.plot(n_avg, y_avg, '-k')

                    ax.fill_betweenx(y_avg,
                                     n0_std,
                                     n1_std,
                                     color='m',
                                     alpha=0.2)
                    pf.save_fig(save_dir, sname)

                    # Plot data with outliers removed

                    fig, ax = pf.plot_profiles(x_nofv_nonan_noev_nogr,
                                               y_nofv_nonan_noev_nogr,
                                               t_nofv_nonan_noev_nogr,
                                               ylabel,
                                               xlabel,
                                               clabel,
                                               end_times,
                                               deployments,
                                               stdev=5)
                    ax.set_title(' '.join((r, ms.split('-')[-1])) + '\n' \
                                 + t0 + ' - ' + t1, fontsize=9)
                    sfile = '_'.join((sname, 'rmoutliers'))
                    pf.save_fig(save_dir, sfile)
def main(sDir, url_list):
    rd_list = []
    for uu in url_list:
        elements = uu.split('/')[-2].split('-')
        rd = '-'.join((elements[1], elements[2], elements[3], elements[4]))
        if rd not in rd_list:
            rd_list.append(rd)

    for r in rd_list:
        print('\n{}'.format(r))
        subsite = r.split('-')[0]
        array = subsite[0:2]

        ps_df, n_streams = cf.get_preferred_stream_info(r)

        # get end times of deployments
        dr_data = cf.refdes_datareview_json(r)
        deployments = []
        end_times = []
        for index, row in ps_df.iterrows():
            deploy = row['deployment']
            deploy_info = get_deployment_information(dr_data, int(deploy[-4:]))
            deployments.append(int(deploy[-4:]))
            end_times.append(pd.to_datetime(deploy_info['stop_date']))

        # filter datasets
        datasets = []
        for u in url_list:
            splitter = u.split('/')[-2].split('-')
            rd_check = '-'.join((splitter[1], splitter[2], splitter[3], splitter[4]))
            if rd_check == r:
                udatasets = cf.get_nc_urls([u])
                datasets.append(udatasets)
        datasets = list(itertools.chain(*datasets))
        main_sensor = r.split('-')[-1]
        fdatasets = cf.filter_collocated_instruments(main_sensor, datasets)
        methodstream = []
        for f in fdatasets:
            methodstream.append('-'.join((f.split('/')[-2].split('-')[-2], f.split('/')[-2].split('-')[-1])))

        for ms in np.unique(methodstream):
            fdatasets_sel = [x for x in fdatasets if ms in x]

            check_ms = ms.split('-')[1]
            if 'recovered' in check_ms:
                check_ms = check_ms.split('_recovered')

            save_dir = os.path.join(sDir, array, subsite, r, 'timeseries_monthly_plot',
                                    check_ms[0], ms.split('-')[0])
            cf.create_dir(save_dir)

            stream_sci_vars_dict = dict()
            for x in dr_data['instrument']['data_streams']:
                dr_ms = '-'.join((x['method'], x['stream_name']))
                if ms == dr_ms:
                    stream_sci_vars_dict[dr_ms] = dict(vars=dict())
                    sci_vars = dict()
                    for y in x['stream']['parameters']:
                        if y['data_product_type'] == 'Science Data':
                            sci_vars.update({y['name']: dict(db_units=y['unit'])})
                    if len(sci_vars) > 0:
                        stream_sci_vars_dict[dr_ms]['vars'] = sci_vars

            sci_vars_dict = cd.initialize_empty_arrays(stream_sci_vars_dict, ms)
            print('\nAppending data from files: {}'.format(ms))
            for fd in fdatasets_sel:
                ds = xr.open_dataset(fd, mask_and_scale=False)
                for var in list(sci_vars_dict[ms]['vars'].keys()):
                    sh = sci_vars_dict[ms]['vars'][var]
                    if ds[var].units == sh['db_units']:
                        if ds[var]._FillValue not in sh['fv']:
                            sh['fv'].append(ds[var]._FillValue)
                        if ds[var].units not in sh['units']:
                            sh['units'].append(ds[var].units)
                        tD = ds['time'].values
                        varD = ds[var].values
                        sh['t'] = np.append(sh['t'], tD)
                        sh['values'] = np.append(sh['values'], varD)

            print('\nPlotting data')
            for m, n in sci_vars_dict.items():
                for sv, vinfo in n['vars'].items():
                    print(sv)
                    if len(vinfo['t']) < 1:
                        print('no variable data to plot')
                    else:
                        sv_units = vinfo['units'][0]
                        t0 = pd.to_datetime(min(vinfo['t'])).strftime('%Y-%m-%dT%H:%M:%S')
                        t1 = pd.to_datetime(max(vinfo['t'])).strftime('%Y-%m-%dT%H:%M:%S')
                        x = vinfo['t']
                        y = vinfo['values']

                        # reject NaNs
                        nan_ind = ~np.isnan(y)
                        x_nonan = x[nan_ind]
                        y_nonan = y[nan_ind]

                        # reject fill values
                        fv_ind = y_nonan != vinfo['fv'][0]
                        x_nonan_nofv = x_nonan[fv_ind]
                        y_nonan_nofv = y_nonan[fv_ind]

                        # reject extreme values
                        Ev_ind = cf.reject_extreme_values(y_nonan_nofv)
                        y_nonan_nofv_nE = y_nonan_nofv[Ev_ind]
                        x_nonan_nofv_nE = x_nonan_nofv[Ev_ind]

                        # reject values outside global ranges:
                        global_min, global_max = cf.get_global_ranges(r, sv)
                        gr_ind = cf.reject_global_ranges(y_nonan_nofv_nE, global_min, global_max)
                        y_nonan_nofv_nE_nogr = y_nonan_nofv_nE[gr_ind]
                        x_nonan_nofv_nE_nogr = x_nonan_nofv_nE[gr_ind]

                        title = ' '.join((r, ms.split('-')[0]))

                        if len(y_nonan_nofv) > 0:
                            if m == 'common_stream_placeholder':
                                sname = '-'.join((r, sv))
                            else:
                                sname = '-'.join((r, m, sv))

                            # 1st group by year
                            ygroups, gy_data = gt.group_by_timerange(x_nonan_nofv_nE_nogr, y_nonan_nofv_nE_nogr, 'A')

                            tn = 1
                            for n in range(len(ygroups)):
                                x_time = gy_data[n+tn].dropna(axis=0)
                                y_data = gy_data[n+(tn+1)].dropna(axis=0)
                                y_data = y_data.astype(float)
                                # 2nd group by month
                                mgroups, gm_data = gt.group_by_timerange(x_time.values, y_data.values, 'M')

                                x_year = x_time[0].year
                                print(x_year)
                                #
                                # create bins for histogram
                                mgroups_min = min(mgroups.describe()['DO']['min'])
                                mgroups_max = max(mgroups.describe()['DO']['max'])
                                lower_bound = int(round(mgroups_min))
                                upper_bound = int(round(mgroups_max + (mgroups_max / 50)))
                                step_bound = int(round((mgroups_max - mgroups_min) / 10))

                                lower_bound = int(round(global_min))
                                upper_bound = int(round(global_max + (global_max / 50)))
                                step_bound = int(round((global_max - global_min) / 10))

                                if step_bound == 0:
                                    step_bound += 1

                                if (upper_bound - lower_bound) == step_bound:
                                    lower_bound -= 1
                                    upper_bound += 1
                                if (upper_bound - lower_bound) < step_bound:
                                    step_bound = int(round(step_bound / 10))

                                bin_range = list(range(lower_bound, upper_bound, step_bound))
                                print(bin_range)

                                # create color palette

                                colors = color_names[:len(mgroups)]
                                print('1--- ', len(colors))
                                print(colors)


                                fig0, ax0 = pyplot.subplots(nrows=2, ncols=1)

                                # # subplot for  histogram and basic statistics table
                                ax0[0].axis('off')
                                ax0[0].axis('tight')

                                the_table = ax0[0].table(cellText=mgroups.describe().round(2).values,
                                                         rowLabels=mgroups.describe().index.month,
                                                         rowColours=colors,
                                                         colLabels=mgroups.describe().columns.levels[1], loc='center')
                                the_table.set_fontsize(5)

                                fig, ax = pyplot.subplots(nrows=12, ncols=1, sharey=True)

                                for kk in list(range(0, 12)):
                                    ax[kk].tick_params(axis='both', which='both', color='r', labelsize=7,
                                                       labelcolor='m', rotation=0, pad=0.1, length=1)
                                    month_name = calendar.month_abbr[kk + 1]
                                    ax[kk].set_ylabel(month_name, rotation=0, fontsize=8, color='b', labelpad=20)
                                    if kk == 0:
                                        ax[kk].set_title(str(x_year) + '\n ' + sv + " (" + sv_units + ")" +
                                                         ' Global Range: [' + str(int(global_min)) + ',' + str(int(global_max)) + ']' +
                                                         '\n End of deployments are marked with a vertical line \n ' +
                                                         'Plotted: Data, Mean and STD (Method: 1 day' +
                                                         ' rolling window calculations)',
                                                         fontsize=8)

                                    if kk < 11:
                                        ax[kk].tick_params(labelbottom=False)
                                    if kk == 11:
                                        ax[kk].set_xlabel('Days', rotation=0, fontsize=8, color='b')

                                tm = 1
                                for mt in range(len(mgroups)):
                                    x_time = gm_data[mt+tm].dropna(axis=0)
                                    y_data = gm_data[mt+(tm+1)].dropna(axis=0)

                                    if len(x_time) == 0:
                                        # ax[plt_index].tick_params(which='both', labelbottom=False, labelleft=False,
                                        #                    pad=0.1, length=1)
                                        continue

                                    x_month = x_time[0].month
                                    col_name = str(x_month)

                                    series_m = pd.DataFrame(columns=[col_name], index=x_time)
                                    series_m[col_name] = list(y_data[:])


                                    # serie_n.plot.hist(ax=ax0[0], bins=bin_range,
                                    #                   histtype='bar', color=colors[ny], stacked=True)
                                    series_m.plot.kde(ax=ax0[0], color=colors[mt])
                                    ax0[0].legend(fontsize=8, bbox_to_anchor=(0., 1.12, 1., .102), loc=3,
                                                  ncol=len(mgroups), mode="expand", borderaxespad=0.)

                                    # ax0[0].set_xticks(bin_range)
                                    ax0[0].set_xlabel('Observation Ranges' + ' (' + sv + ', ' + sv_units + ')', fontsize=8)
                                    ax0[0].set_ylabel('Density', fontsize=8)  # 'Number of Observations'
                                    ax0[0].set_title('Kernel Density Estimates', fontsize=8)
                                    ax0[0].tick_params(which='both', labelsize=7, pad=0.1, length=1, rotation=0)

                                    plt_index = x_month - 1

                                    # Plot data
                                    series_m.plot(ax=ax[plt_index], linestyle='None', marker='.', markersize=1)
                                    ax[plt_index].legend().set_visible(False)

                                    ma = series_m.rolling('86400s').mean()
                                    mstd = series_m.rolling('86400s').std()

                                    ax[plt_index].plot(ma.index, ma[col_name].values, 'b')
                                    ax[plt_index].fill_between(mstd.index, ma[col_name].values-3*mstd[col_name].values,
                                                               ma[col_name].values+3*mstd[col_name].values,
                                                               color='b', alpha=0.2)

                                    # prepare the time axis parameters
                                    mm, nod = monthrange(x_year, x_month)
                                    datemin = datetime.date(x_year, x_month, 1)
                                    datemax = datetime.date(x_year, x_month, nod)
                                    ax[plt_index].set_xlim(datemin, datemax)
                                    xlocator = mdates.DayLocator()  # every day
                                    myFmt = mdates.DateFormatter('%d')
                                    ax[plt_index].xaxis.set_major_locator(xlocator)
                                    ax[plt_index].xaxis.set_major_formatter(myFmt)
                                    ax[plt_index].xaxis.set_minor_locator(pyplot.NullLocator())
                                    ax[plt_index].xaxis.set_minor_formatter(pyplot.NullFormatter())

                                    # data_min = min(ma.DO_n.dropna(axis=0) - 5 * mstd.DO_n.dropna(axis=0))
                                    # 0data_max = max(ma.DO_n.dropna(axis=0) + 5 * mstd.DO_n.dropna(axis=0))
                                    # ax[plt_index].set_ylim([data_min, data_max])

                                    ylocator = MaxNLocator(prune='both', nbins=3)
                                    ax[plt_index].yaxis.set_major_locator(ylocator)


                                    if x_month != 12:
                                        ax[plt_index].tick_params(which='both', labelbottom=False, pad=0.1, length=1)
                                        ax[plt_index].set_xlabel(' ')
                                    else:
                                        ax[plt_index].tick_params(which='both', color='r', labelsize=7, labelcolor='m',
                                                           pad=0.1, length=1, rotation=0)
                                        ax[plt_index].set_xlabel('Days', rotation=0, fontsize=8, color='b')

                                    dep = 1
                                    for etimes in end_times:
                                        ax[plt_index].axvline(x=etimes, color='b', linestyle='--', linewidth=.8)
                                        if ma[col_name].values.any():
                                            ax[plt_index].text(etimes, max(ma[col_name].dropna(axis=0)), 'End' + str(dep),
                                                        fontsize=6, style='italic',
                                                        bbox=dict(boxstyle='round',
                                                                  ec=(0., 0.5, 0.5),
                                                                  fc=(1., 1., 1.),
                                                                  ))
                                        else:
                                            ax[plt_index].text(etimes, min(series_m['DO_n']), 'End' + str(dep),
                                                        fontsize=6, style='italic',
                                                        bbox=dict(boxstyle='round',
                                                                  ec=(0., 0.5, 0.5),
                                                                  fc=(1., 1., 1.),
                                                                  ))
                                        dep += 1
                                    tm += 1
                                tn += 1


                                # pyplot.show()
                                sfile = '_'.join((str(x_year), sname))
                                save_file = os.path.join(save_dir, sfile)
                                fig.savefig(str(save_file), dpi=150)

                                sfile = '_'.join(('Statistics', str(x_year), sname))
                                save_file = os.path.join(save_dir, sfile)
                                fig0.savefig(str(save_file), dpi=150)
Esempio n. 24
0
def main(sDir, url_list, start_time, end_time, preferred_only):
    rd_list = []
    for uu in url_list:
        elements = uu.split('/')[-2].split('-')
        rd = '-'.join((elements[1], elements[2], elements[3], elements[4]))
        if rd not in rd_list and 'PRESF' in rd:
            rd_list.append(rd)

    for r in rd_list:
        print('\n{}'.format(r))
        datasets = []
        for u in url_list:
            splitter = u.split('/')[-2].split('-')
            rd_check = '-'.join(
                (splitter[1], splitter[2], splitter[3], splitter[4]))
            if rd_check == r:
                udatasets = cf.get_nc_urls([u])
                for ud in udatasets:  # filter out collocated data files
                    if 'PRESF' in ud.split('/')[-1]:
                        datasets.append(ud)
        fdatasets = []
        if preferred_only == 'yes':
            # get the preferred stream information
            ps_df, n_streams = cf.get_preferred_stream_info(r)
            for index, row in ps_df.iterrows():
                for ii in range(n_streams):
                    try:
                        rms = '-'.join((r, row[ii]))
                    except TypeError:
                        continue
                    for dd in datasets:
                        spl = dd.split('/')[-2].split('-')
                        catalog_rms = '-'.join(
                            (spl[1], spl[2], spl[3], spl[4], spl[5], spl[6]))
                        fdeploy = dd.split('/')[-1].split('_')[0]
                        if rms == catalog_rms and fdeploy == row['deployment']:
                            fdatasets.append(dd)
        else:
            fdatasets = datasets

        fdatasets = np.unique(fdatasets).tolist()
        for fd in fdatasets:
            ds = xr.open_dataset(fd, mask_and_scale=False)
            ds = ds.swap_dims({'obs': 'time'})

            if start_time is not None and end_time is not None:
                ds = ds.sel(time=slice(start_time, end_time))
                if len(ds['time'].values) == 0:
                    print(
                        'No data to plot for specified time range: ({} to {})'.
                        format(start_time, end_time))
                    continue

            fname, subsite, refdes, method, stream, deployment = cf.nc_attributes(
                fd)
            sci_vars = cf.return_science_vars(stream)
            print('\nPlotting {} {}'.format(r, deployment))
            array = subsite[0:2]
            filename = '_'.join(fname.split('_')[:-1])
            save_dir = os.path.join(sDir, array, subsite, refdes,
                                    'timeseries_plots', deployment)
            cf.create_dir(save_dir)

            tm = ds['time'].values
            t0 = pd.to_datetime(tm.min()).strftime('%Y-%m-%dT%H:%M:%S')
            t1 = pd.to_datetime(tm.max()).strftime('%Y-%m-%dT%H:%M:%S')
            title = ' '.join((deployment, refdes, method))

            for var in sci_vars:
                print(var)
                if var != 'id':
                    #if var == 'presf_wave_burst_pressure':
                    y = ds[var]
                    fv = y._FillValue
                    if len(y.dims) == 1:

                        # Check if the array is all NaNs
                        if sum(np.isnan(y.values)) == len(y.values):
                            print('Array of all NaNs - skipping plot.')

                        # Check if the array is all fill values
                        elif len(y[y != fv]) == 0:
                            print('Array of all fill values - skipping plot.')

                        else:
                            # reject fill values
                            ind = y.values != fv
                            t = tm[ind]
                            y = y[ind]

                            # Plot all data
                            fig, ax = pf.plot_timeseries(t,
                                                         y,
                                                         y.name,
                                                         stdev=None)
                            ax.set_title((title + '\n' + t0 + ' - ' + t1),
                                         fontsize=9)
                            sfile = '-'.join((filename, y.name, t0[:10]))
                            pf.save_fig(save_dir, sfile)

                            # Plot data with outliers removed
                            fig, ax = pf.plot_timeseries(t, y, y.name, stdev=5)
                            ax.set_title((title + '\n' + t0 + ' - ' + t1),
                                         fontsize=9)
                            sfile = '-'.join(
                                (filename, y.name, t0[:10])) + '_rmoutliers'
                            pf.save_fig(save_dir, sfile)
                    else:
                        v = y.values.T
                        n_nan = np.sum(np.isnan(v))

                        # convert fill values to nans
                        try:
                            v[v == fv] = np.nan
                        except ValueError:
                            v = v.astype(float)
                            v[v == fv] = np.nan
                        n_fv = np.sum(np.isnan(v)) - n_nan

                        # plot before global ranges are removed
                        fig, ax = pf.plot_presf_2d(tm, v, y.name, y.units)
                        ax.set_title((title + '\n' + t0 + ' - ' + t1),
                                     fontsize=9)
                        sfile = '-'.join((filename, var, t0[:10]))
                        pf.save_fig(save_dir, sfile)

                        # reject data outside of global ranges
                        [g_min, g_max] = cf.get_global_ranges(r, var)
                        if g_min is not None and g_max is not None:
                            v[v < g_min] = np.nan
                            v[v > g_max] = np.nan
                            n_grange = np.sum(np.isnan(v)) - n_fv - n_nan

                            if n_grange > 0:
                                # don't plot if the array is all nans
                                if len(np.unique(
                                        np.isnan(v))) == 1 and np.unique(
                                            np.isnan(v))[0] == True:
                                    continue
                                else:
                                    # plot after global ranges are removed
                                    fig, ax = pf.plot_presf_2d(
                                        tm, v, y.name, y.units)
                                    title2 = 'removed: {} global ranges [{}, {}]'.format(
                                        n_grange, g_min, g_max)
                                    ax.set_title((title + '\n' + t0 + ' - ' +
                                                  t1 + '\n' + title2),
                                                 fontsize=9)
                                    sfile = '-'.join(
                                        (filename, var, t0[:10], 'rmgr'))
                                    pf.save_fig(save_dir, sfile)
Esempio n. 25
0
def main(sDir, url_list, start_time, end_time):
    rd_list = []
    for uu in url_list:
        elements = uu.split('/')[-2].split('-')
        rd = '-'.join((elements[1], elements[2], elements[3], elements[4]))
        if rd not in rd_list:
            rd_list.append(rd)

    for r in rd_list:
        print('\n{}'.format(r))
        datasets = []
        for u in url_list:
            splitter = u.split('/')[-2].split('-')
            rd_check = '-'.join((splitter[1], splitter[2], splitter[3], splitter[4]))
            if rd_check == r:
                udatasets = cf.get_nc_urls([u])
                datasets.append(udatasets)
        datasets = list(itertools.chain(*datasets))
        fdatasets = []
        # get the preferred stream information
        ps_df, n_streams = cf.get_preferred_stream_info(r)
        for index, row in ps_df.iterrows():
            for ii in range(n_streams):
                try:
                    rms = '-'.join((r, row[ii]))
                except TypeError:
                    continue
                for dd in datasets:
                    spl = dd.split('/')[-2].split('-')
                    catalog_rms = '-'.join((spl[1], spl[2], spl[3], spl[4], spl[5], spl[6]))
                    fdeploy = dd.split('/')[-1].split('_')[0]
                    if rms == catalog_rms and fdeploy == row['deployment']:
                        fdatasets.append(dd)

        main_sensor = r.split('-')[-1]
        fdatasets_sel = cf.filter_collocated_instruments(main_sensor, fdatasets)

        # get science variable long names from the Data Review Database
        #stream_sci_vars = cd.sci_var_long_names(r)
        if 'SPKIR' in r or 'PRESF' in r:  # only get the main science variable for SPKIR
            stream_vars = cd.sci_var_long_names(r)
        else:
            stream_vars = var_long_names(r)

        # check if the science variable long names are the same for each stream and initialize empty arrays
        sci_vars_dict = cd.sci_var_long_names_check(stream_vars)

        # get the preferred stream information
        ps_df, n_streams = cf.get_preferred_stream_info(r)

        # build dictionary of science data from the preferred dataset for each deployment
        print('\nAppending data from files')
        et = []
        sci_vars_dict, __, __ = cd.append_science_data(ps_df, n_streams, r, fdatasets_sel, sci_vars_dict, et, start_time, end_time)

        # get end times of deployments
        dr_data = cf.refdes_datareview_json(r)
        deployments = []
        dend_times = []
        for index, row in ps_df.iterrows():
            deploy = row['deployment']
            deploy_info = get_deployment_information(dr_data, int(deploy[-4:]))
            deployments.append(int(deploy[-4:]))
            dend_times.append(pd.to_datetime(deploy_info['stop_date']))

        subsite = r.split('-')[0]
        array = subsite[0:2]
        save_dir = os.path.join(sDir, array, subsite, r, 'timeseries_plots_preferred_all')
        cf.create_dir(save_dir)

        print('\nPlotting data')
        for m, n in sci_vars_dict.items():
            for sv, vinfo in n['vars'].items():
                print(sv)
                if 'SPKIR' in r:
                    fv_lst = np.unique(vinfo['fv']).tolist()
                    if len(fv_lst) == 1:
                        fill_value = fv_lst[0]
                    else:
                        print(fv_lst)
                        print('No unique fill value for {}'.format(sv))

                    sv_units = np.unique(vinfo['units']).tolist()

                    t = vinfo['t']
                    if len(t) > 1:
                        data = vinfo['values']
                        [dd_data, g_min, g_max] = index_dataset_2d(r, 'spkir_abj_cspp_downwelling_vector', data, fill_value)
                        t0 = pd.to_datetime(min(t)).strftime('%Y-%m-%dT%H:%M:%S')
                        t1 = pd.to_datetime(max(t)).strftime('%Y-%m-%dT%H:%M:%S')
                        deploy_final = vinfo['deployments']
                        deploy = list(np.unique(deploy_final))
                        deployments = [int(dd) for dd in deploy]

                        sname = '-'.join((r, sv))
                        fig, ax = pf.plot_spkir(t, dd_data, sv, sv_units[0])
                        ax.set_title((r + '\nDeployments: ' + str(sorted(deployments)) + '\n' + t0 + ' - ' + t1 + '\n'
                                      + 'removed global ranges +/- [{} - {}]'.format(g_min, g_max)), fontsize=8)
                        for etimes in dend_times:
                            ax.axvline(x=etimes, color='k', linestyle='--', linewidth=.6)
                        pf.save_fig(save_dir, sname)

                        # plot each wavelength
                        wavelengths = ['412nm', '443nm', '490nm', '510nm', '555nm', '620nm', '683nm']
                        for wvi in range(len(dd_data)):
                            fig, ax = pf.plot_spkir_wv(t, dd_data[wvi], sv, sv_units[0], wvi)
                            ax.set_title(
                                (r + '\nDeployments: ' + str(sorted(deployments)) + '\n' + t0 + ' - ' + t1 + '\n'
                                 + 'removed global ranges +/- [{} - {}]'.format(g_min, g_max)), fontsize=8)
                            for etimes in dend_times:
                                ax.axvline(x=etimes, color='k', linestyle='--', linewidth=.6)
                            snamewvi = '-'.join((sname, wavelengths[wvi]))
                            pf.save_fig(save_dir, snamewvi)

                elif 'presf_abc_wave_burst' in m:
                    fv_lst = np.unique(vinfo['fv']).tolist()
                    if len(fv_lst) == 1:
                        fill_value = fv_lst[0]
                    else:
                        print(fv_lst)
                        print('No unique fill value for {}'.format(sv))

                    sv_units = np.unique(vinfo['units']).tolist()

                    t = vinfo['t']
                    if len(t) > 1:
                        data = vinfo['values']
                        [dd_data, g_min, g_max] = index_dataset_2d(r, 'presf_wave_burst_pressure', data, fill_value)
                        t0 = pd.to_datetime(min(t)).strftime('%Y-%m-%dT%H:%M:%S')
                        t1 = pd.to_datetime(max(t)).strftime('%Y-%m-%dT%H:%M:%S')
                        deploy_final = vinfo['deployments']
                        deploy = list(np.unique(deploy_final))
                        deployments = [int(dd) for dd in deploy]

                        sname = '-'.join((r, sv))
                        fig, ax = pf.plot_presf_2d(t, dd_data, sv, sv_units[0])
                        ax.set_title((r + '\nDeployments: ' + str(sorted(deployments)) + '\n' + t0 + ' - ' + t1 + '\n'
                                      + 'removed global ranges +/- [{} - {}]'.format(g_min, g_max)), fontsize=8)
                        for etimes in dend_times:
                            ax.axvline(x=etimes, color='k', linestyle='--', linewidth=.6)
                        pf.save_fig(save_dir, sname)

                else:
                    if type(vinfo['values']) != dict:  # if the variable is not a 2D array
                        if 'Spectra' not in sv:
                            if len(vinfo['t']) < 1:
                                print('no variable data to plot')
                            else:
                                sv_units = vinfo['units'][0]
                                sv_name = vinfo['var_name']
                                t0 = pd.to_datetime(min(vinfo['t'])).strftime('%Y-%m-%dT%H:%M:%S')
                                t1 = pd.to_datetime(max(vinfo['t'])).strftime('%Y-%m-%dT%H:%M:%S')
                                x = vinfo['t']
                                y = vinfo['values']

                                # reject NaNs and values of 0.0
                                nan_ind = (~np.isnan(y)) & (y != 0.0)
                                x_nonan = x[nan_ind]
                                y_nonan = y[nan_ind]

                                # reject fill values
                                fv_ind = y_nonan != vinfo['fv'][0]
                                x_nonan_nofv = x_nonan[fv_ind]
                                y_nonan_nofv = y_nonan[fv_ind]

                                # reject extreme values
                                Ev_ind = cf.reject_extreme_values(y_nonan_nofv)
                                y_nonan_nofv_nE = y_nonan_nofv[Ev_ind]
                                x_nonan_nofv_nE = x_nonan_nofv[Ev_ind]

                                # reject values outside global ranges:
                                global_min, global_max = cf.get_global_ranges(r, sv_name)
                                if any(e is None for e in [global_min, global_max]):
                                    y_nonan_nofv_nE_nogr = y_nonan_nofv_nE
                                    x_nonan_nofv_nE_nogr = x_nonan_nofv_nE
                                else:
                                    gr_ind = cf.reject_global_ranges(y_nonan_nofv_nE, global_min, global_max)
                                    y_nonan_nofv_nE_nogr = y_nonan_nofv_nE[gr_ind]
                                    x_nonan_nofv_nE_nogr = x_nonan_nofv_nE[gr_ind]

                                if len(y_nonan_nofv) > 0:
                                    if m == 'common_stream_placeholder':
                                        sname = '-'.join((r, sv))
                                    else:
                                        sname = '-'.join((r, m, sv))

                                    plt_deploy = [int(x) for x in list(np.unique(vinfo['deployments']))]

                                    # plot hourly averages for cabled and FDCHP data
                                    if 'streamed' in sci_vars_dict[list(sci_vars_dict.keys())[0]]['ms'][0] or 'FDCHP' in r:
                                        sname = '-'.join((sname, 'hourlyavg'))
                                        df = pd.DataFrame({'dfx': x_nonan_nofv_nE_nogr, 'dfy': y_nonan_nofv_nE_nogr})
                                        dfr = df.resample('H', on='dfx').mean()

                                        # Plot all data
                                        fig, ax = pf.plot_timeseries_all(dfr.index, dfr['dfy'], sv, sv_units, stdev=None)
                                        ax.set_title((r + '\nDeployments: ' + str(plt_deploy) + '\n' + t0 + ' - ' + t1),
                                                     fontsize=8)

                                        # if plotting a specific time range, plot deployment lines only for those deployments
                                        if type(start_time) == dt.datetime:
                                            for e in list(np.unique(vinfo['deployments'])):
                                                etime = dend_times[int(e) - 1]
                                                ax.axvline(x=etime, color='b', linestyle='--', linewidth=.6)
                                        else:
                                            for etime in dend_times:
                                                ax.axvline(x=etime, color='b', linestyle='--', linewidth=.6)
                                        pf.save_fig(save_dir, sname)
                                    else:
                                        # Plot all data
                                        fig, ax = pf.plot_timeseries_all(x_nonan_nofv, y_nonan_nofv, sv, sv_units, stdev=None)
                                        ax.set_title((r + '\nDeployments: ' + str(plt_deploy) + '\n' + t0 + ' - ' + t1),
                                                     fontsize=8)

                                        # if plotting a specific time range, plot deployment lines only for those deployments
                                        if type(start_time) == dt.datetime:
                                            # for e in list(np.unique(vinfo['deployments'])):
                                            #     etime = dend_times[int(e) - 1]
                                            #     ax.axvline(x=etime, color='b', linestyle='--', linewidth=.6)
                                            etime = dend_times[int(list(np.unique(vinfo['deployments']))[0]) - 1]
                                            ax.axvline(x=etime, color='b', linestyle='--', linewidth=.6)
                                        else:
                                            for etime in dend_times:
                                                ax.axvline(x=etime, color='b', linestyle='--', linewidth=.6)
                                        # if not any(e is None for e in [global_min, global_max]):
                                        #     ax.axhline(y=global_min, color='r', linestyle='--', linewidth=.6)
                                        #     ax.axhline(y=global_max, color='r', linestyle='--', linewidth=.6)
                                        # else:
                                        #     maxpoint = x[np.argmax(y_nonan_nofv)], max(y_nonan_nofv)
                                        #     ax.annotate('No Global Ranges', size=8,
                                        #                 xy=maxpoint, xytext=(5, 5), textcoords='offset points')
                                        pf.save_fig(save_dir, sname)

                                        # Plot data with outliers removed
                                        fig, ax = pf.plot_timeseries_all(x_nonan_nofv_nE_nogr, y_nonan_nofv_nE_nogr, sv, sv_units,
                                                                         stdev=5)
                                        ax.set_title((r + '\nDeployments: ' + str(plt_deploy) + '\n' + t0 + ' - ' + t1),
                                                     fontsize=8)

                                        # if plotting a specific time range, plot deployment lines only for those deployments
                                        if type(start_time) == dt.datetime:
                                            # for e in list(np.unique(vinfo['deployments'])):
                                            #     etime = dend_times[int(e) - 1]
                                            #     ax.axvline(x=etime, color='b', linestyle='--', linewidth=.6)
                                            etime = dend_times[int(list(np.unique(vinfo['deployments']))[0]) - 1]
                                            ax.axvline(x=etime, color='b', linestyle='--', linewidth=.6)
                                        else:
                                            for etime in dend_times:
                                                ax.axvline(x=etime, color='b', linestyle='--', linewidth=.6)
                                        # if not any(e is None for e in [global_min, global_max]):
                                        #     ax.axhline(y=global_min, color='r', linestyle='--', linewidth=.6)
                                        #     ax.axhline(y=global_max, color='r', linestyle='--', linewidth=.6)
                                        # else:
                                        #     maxpoint = x[np.argmax(y_nonan_nofv_nE_nogr)], max(y_nonan_nofv_nE_nogr)
                                        #     ax.annotate('No Global Ranges', size=8,
                                        #                 xy=maxpoint, xytext=(5, 5), textcoords='offset points')

                                        sfile = '_'.join((sname, 'rmoutliers'))
                                        pf.save_fig(save_dir, sfile)
Esempio n. 26
0
def main(sDir, url_list, preferred_only):
    rd_list = []
    ms_list = []
    for uu in url_list:
        elements = uu.split('/')[-2].split('-')
        rd = '-'.join((elements[1], elements[2], elements[3], elements[4]))
        ms = uu.split(rd + '-')[1].split('/')[0]
        if rd not in rd_list:
            rd_list.append(rd)
        if ms not in ms_list:
            ms_list.append(ms)

    for r in rd_list:
        print('\n{}'.format(r))
        subsite = r.split('-')[0]
        array = subsite[0:2]

        # filter datasets
        datasets = []
        for u in url_list:
            print(u)
            splitter = u.split('/')[-2].split('-')
            rd_check = '-'.join(
                (splitter[1], splitter[2], splitter[3], splitter[4]))
            if rd_check == r:
                udatasets = cf.get_nc_urls([u])
                datasets.append(udatasets)
        datasets = list(itertools.chain(*datasets))

        fdatasets = []
        if preferred_only == 'yes':
            # get the preferred stream information
            ps_df, n_streams = cf.get_preferred_stream_info(r)
            for index, row in ps_df.iterrows():
                for ii in range(n_streams):
                    try:
                        rms = '-'.join((r, row[ii]))
                    except TypeError:
                        continue
                    for dd in datasets:
                        spl = dd.split('/')[-2].split('-')
                        catalog_rms = '-'.join(
                            (spl[1], spl[2], spl[3], spl[4], spl[5], spl[6]))
                        fdeploy = dd.split('/')[-1].split('_')[0]
                        if rms == catalog_rms and fdeploy == row['deployment']:
                            fdatasets.append(dd)
        else:
            fdatasets = datasets

        main_sensor = r.split('-')[-1]
        fdatasets = cf.filter_collocated_instruments(main_sensor, fdatasets)

        # ps_df, n_streams = cf.get_preferred_stream_info(r)

        # get end times of deployments
        dr_data = cf.refdes_datareview_json(r)
        deployments = []
        end_times = []
        for index, row in ps_df.iterrows():
            deploy = row['deployment']
            deploy_info = get_deployment_information(dr_data, int(deploy[-4:]))
            deployments.append(int(deploy[-4:]))
            end_times.append(pd.to_datetime(deploy_info['stop_date']))

        # # filter datasets
        # datasets = []
        # for u in url_list:
        #     print(u)
        #     splitter = u.split('/')[-2].split('-')
        #     rd_check = '-'.join((splitter[1], splitter[2], splitter[3], splitter[4]))
        #     if rd_check == r:
        #         udatasets = cf.get_nc_urls([u])
        #         datasets.append(udatasets)
        # datasets = list(itertools.chain(*datasets))
        # main_sensor = r.split('-')[-1]
        # fdatasets = cf.filter_collocated_instruments(main_sensor, datasets)
        # fdatasets = cf.filter_other_streams(r, ms_list, fdatasets)

        methodstream = []
        for f in fdatasets:
            methodstream.append('-'.join((f.split('/')[-2].split('-')[-2],
                                          f.split('/')[-2].split('-')[-1])))

        ms_dict = save_dir_path(ms_list)
        for ms in np.unique(methodstream):
            fdatasets_sel = [x for x in fdatasets if ms in x]
            check_ms = ms.split('-')[1]
            if 'recovered' in check_ms:
                check_ms = check_ms.split('_recovered')[0]

            if ms_dict['ms_count'][ms_dict['ms_unique'] == ms.split('-')
                                   [0]] == 1:
                save_dir = os.path.join(sDir, array, subsite, r,
                                        'timeseries_yearly_plot',
                                        ms.split('-')[0])
            else:
                save_dir = os.path.join(sDir, array, subsite, r,
                                        'timeseries_yearly_plot',
                                        ms.split('-')[0], check_ms)
            cf.create_dir(save_dir)

            stream_sci_vars_dict = dict()
            for x in dr_data['instrument']['data_streams']:
                dr_ms = '-'.join((x['method'], x['stream_name']))
                if ms == dr_ms:
                    stream_sci_vars_dict[dr_ms] = dict(vars=dict())
                    sci_vars = dict()
                    for y in x['stream']['parameters']:
                        if y['data_product_type'] == 'Science Data':
                            sci_vars.update(
                                {y['name']: dict(db_units=y['unit'])})
                    if len(sci_vars) > 0:
                        stream_sci_vars_dict[dr_ms]['vars'] = sci_vars

            sci_vars_dict = cd.initialize_empty_arrays(stream_sci_vars_dict,
                                                       ms)
            print('\nAppending data from files: {}'.format(ms))
            for fd in fdatasets_sel:
                ds = xr.open_dataset(fd, mask_and_scale=False)
                print(fd)
                for var in list(sci_vars_dict[ms]['vars'].keys()):
                    sh = sci_vars_dict[ms]['vars'][var]
                    try:
                        ds[var]
                        print(var)
                        deployment_num = np.unique(ds['deployment'].values)[0]
                        sh['deployments'] = np.append(sh['deployments'],
                                                      deployment_num)
                        if ds[var].units == sh['db_units']:
                            if ds[var]._FillValue not in sh['fv']:
                                sh['fv'].append(ds[var]._FillValue)
                            if ds[var].units not in sh['units']:
                                sh['units'].append(ds[var].units)
                            tD = ds['time'].values
                            varD = ds[var].values
                            sh['t'] = np.append(sh['t'], tD)
                            sh['values'] = np.append(sh['values'], varD)
                    except KeyError:
                        print('KeyError: ', var)

            print('\nPlotting data')
            for m, n in sci_vars_dict.items():
                for sv, vinfo in n['vars'].items():
                    print(sv)
                    if len(vinfo['t']) < 1:
                        print('no variable data to plot')
                    else:
                        sv_units = vinfo['units'][0]
                        deployments_num = vinfo['deployments']
                        fv = vinfo['fv'][0]
                        t0 = pd.to_datetime(min(
                            vinfo['t'])).strftime('%Y-%m-%dT%H:%M:%S')
                        t1 = pd.to_datetime(max(
                            vinfo['t'])).strftime('%Y-%m-%dT%H:%M:%S')
                        x = vinfo['t']
                        y = vinfo['values']

                        # reject NaNs
                        nan_ind = ~np.isnan(y)
                        x_nonan = x[nan_ind]
                        y_nonan = y[nan_ind]

                        # reject fill values
                        fv_ind = y_nonan != vinfo['fv'][0]
                        x_nonan_nofv = x_nonan[fv_ind]
                        y_nonan_nofv = y_nonan[fv_ind]

                        # reject extreme values
                        Ev_ind = cf.reject_extreme_values(y_nonan_nofv)
                        y_nonan_nofv_nE = y_nonan_nofv[Ev_ind]
                        x_nonan_nofv_nE = x_nonan_nofv[Ev_ind]

                        # reject values outside global ranges:
                        global_min, global_max = cf.get_global_ranges(r, sv)
                        print('global ranges: ', global_min, global_max)
                        if global_min and global_max:
                            gr_ind = cf.reject_global_ranges(
                                y_nonan_nofv_nE, global_min, global_max)
                            y_nonan_nofv_nE_nogr = y_nonan_nofv_nE[gr_ind]
                            x_nonan_nofv_nE_nogr = x_nonan_nofv_nE[gr_ind]
                        else:
                            y_nonan_nofv_nE_nogr = y_nonan_nofv_nE
                            x_nonan_nofv_nE_nogr = x_nonan_nofv_nE

                        # check array length
                        if len(y_nonan_nofv_nE_nogr) > 0:
                            if m == 'common_stream_placeholder':
                                sname = '-'.join((r, sv))
                                print(var, 'empty array')
                            else:
                                sname = '-'.join((r, m, sv))

                            # group data by year
                            groups, g_data = gt.group_by_time_range(
                                x_nonan_nofv_nE_nogr, y_nonan_nofv_nE_nogr,
                                'A')

                            # create bins
                            # groups_min = min(groups.describe()['DO']['min'])
                            # lower_bound = int(round(groups_min))
                            # groups_max = max(groups.describe()['DO']['max'])
                            # if groups_max < 1:
                            #     upper_bound = 1
                            #     step_bound = 1
                            # else:
                            #     upper_bound = int(round(groups_max + (groups_max / 50)))
                            #     step_bound = int(round((groups_max - groups_min) / 10))
                            #
                            # if step_bound == 0:
                            #     step_bound += 1
                            #
                            # if (upper_bound - lower_bound) == step_bound:
                            #     lower_bound -= 1
                            #     upper_bound += 1
                            # if (upper_bound - lower_bound) < step_bound:
                            #     print('<')
                            #     step_bound = int(round(step_bound / 10))
                            # print(lower_bound, upper_bound, step_bound)
                            # bin_range = list(range(lower_bound, upper_bound, step_bound))
                            # print(bin_range)

                            # preparing color palette
                            colors = color_names[:len(groups)]

                            # colors = [color['color'] for color in
                            #           list(pyplot.rcParams['axes.prop_cycle'][:len(groups)])]

                            fig0, ax0 = pyplot.subplots(nrows=2, ncols=1)

                            # subplot for  histogram and basic statistics table
                            ax0[1].axis('off')
                            ax0[1].axis('tight')
                            the_table = ax0[1].table(
                                cellText=groups.describe().round(2).values,
                                rowLabels=groups.describe().index.year,
                                rowColours=colors,
                                colLabels=groups.describe().columns.levels[1],
                                loc='center')
                            the_table.set_fontsize(5)

                            # subplot for data
                            fig, ax = pyplot.subplots(nrows=len(groups),
                                                      ncols=1,
                                                      sharey=True)
                            if len(groups) == 1:
                                ax = [ax]
                            t = 1
                            for ny in range(len(groups)):
                                # prepare data for plotting
                                y_data = g_data[ny + (t + 1)].dropna(axis=0)
                                x_time = g_data[ny + t].dropna(axis=0)
                                t += 1
                                if len(y_data) != 0 and len(x_time) != 0:
                                    n_year = x_time[0].year

                                    col_name = str(n_year)

                                    serie_n = pd.DataFrame(columns=[col_name],
                                                           index=x_time)
                                    serie_n[col_name] = list(y_data[:])

                                    # plot histogram
                                    # serie_n.plot.hist(ax=ax0[0], bins=bin_range,
                                    #                   histtype='bar', color=colors[ny], stacked=True)

                                    if len(serie_n) != 1:
                                        serie_n.plot.kde(ax=ax0[0],
                                                         color=colors[ny])
                                        ax0[0].legend(fontsize=8,
                                                      bbox_to_anchor=(0., 1.12,
                                                                      1.,
                                                                      .102),
                                                      loc=3,
                                                      ncol=len(groups),
                                                      mode="expand",
                                                      borderaxespad=0.)

                                        # ax0[0].set_xticks(bin_range)
                                        ax0[0].set_xlabel('Observation Ranges',
                                                          fontsize=8)
                                        ax0[0].set_ylabel(
                                            'Density', fontsize=8
                                        )  #'Number of Observations'
                                        ax0[0].set_title(
                                            ms.split('-')[0] + ' (' + sv +
                                            ', ' + sv_units + ')' +
                                            '  Kernel Density Estimates',
                                            fontsize=8)

                                        # plot data
                                        serie_n.plot(ax=ax[ny],
                                                     linestyle='None',
                                                     marker='.',
                                                     markersize=0.5,
                                                     color=colors[ny])
                                        ax[ny].legend().set_visible(False)

                                        # plot Mean and Standard deviation
                                        ma = serie_n.rolling('86400s').mean()
                                        mstd = serie_n.rolling('86400s').std()

                                        ax[ny].plot(ma.index,
                                                    ma[col_name].values,
                                                    'k',
                                                    linewidth=0.15)
                                        ax[ny].fill_between(
                                            mstd.index,
                                            ma[col_name].values -
                                            2 * mstd[col_name].values,
                                            ma[col_name].values +
                                            2 * mstd[col_name].values,
                                            color='b',
                                            alpha=0.2)

                                        # prepare the time axis parameters
                                        datemin = datetime.date(n_year, 1, 1)
                                        datemax = datetime.date(n_year, 12, 31)
                                        ax[ny].set_xlim(datemin, datemax)
                                        xlocator = mdates.MonthLocator(
                                        )  # every month
                                        myFmt = mdates.DateFormatter('%m')
                                        ax[ny].xaxis.set_minor_locator(
                                            xlocator)
                                        ax[ny].xaxis.set_major_formatter(myFmt)

                                        # prepare the time axis parameters
                                        # ax[ny].set_yticks(bin_range)
                                        ylocator = MaxNLocator(prune='both',
                                                               nbins=3)
                                        ax[ny].yaxis.set_major_locator(
                                            ylocator)

                                        # format figure
                                        ax[ny].tick_params(axis='both',
                                                           color='r',
                                                           labelsize=7,
                                                           labelcolor='m')

                                        if ny < len(groups) - 1:
                                            ax[ny].tick_params(
                                                which='both',
                                                pad=0.1,
                                                length=1,
                                                labelbottom=False)
                                            ax[ny].set_xlabel(' ')
                                        else:
                                            ax[ny].tick_params(which='both',
                                                               color='r',
                                                               labelsize=7,
                                                               labelcolor='m',
                                                               pad=0.1,
                                                               length=1,
                                                               rotation=0)
                                            ax[ny].set_xlabel('Months',
                                                              rotation=0,
                                                              fontsize=8,
                                                              color='b')

                                        ax[ny].set_ylabel(n_year,
                                                          rotation=0,
                                                          fontsize=8,
                                                          color='b',
                                                          labelpad=20)
                                        ax[ny].yaxis.set_label_position(
                                            "right")

                                        if ny == 0:
                                            if global_min and global_max:

                                                ax[ny].set_title(
                                                    sv + '( ' + sv_units +
                                                    ') -- Global Range: [' +
                                                    str(int(global_min)) +
                                                    ',' +
                                                    str(int(global_max)) +
                                                    '] \n'
                                                    'Plotted: Data, Mean and 2STD (Method: One day rolling window calculations) \n',
                                                    fontsize=8)
                                            else:
                                                ax[ny].set_title(
                                                    sv + '( ' + sv_units +
                                                    ') -- Global Range: [] \n'
                                                    'Plotted: Data, Mean and 2STD (Method: One day rolling window calculations) \n',
                                                    fontsize=8)

                                        # plot global ranges
                                        # ax[ny].axhline(y=global_min, color='r', linestyle='--', linewidth=.6)
                                        # ax[ny].axhline(y=global_max, color='r', linestyle='--', linewidth=.6)

                                        # mark deployment end times on figure
                                        ymin, ymax = ax[ny].get_ylim()
                                        #dep = 1
                                        for etimes in range(len(end_times)):
                                            if end_times[
                                                    etimes].year == n_year:
                                                ax[ny].axvline(
                                                    x=end_times[etimes],
                                                    color='b',
                                                    linestyle='--',
                                                    linewidth=.6)
                                                ax[ny].text(
                                                    end_times[etimes],
                                                    ymin,
                                                    'End' +
                                                    str(deployments_num[etimes]
                                                        ),
                                                    fontsize=6,
                                                    style='italic',
                                                    bbox=dict(boxstyle='round',
                                                              ec=(0., 0.5,
                                                                  0.5),
                                                              fc=(1., 1., 1.)))
                                        #    dep += 1

                                        # ax[ny].set_ylim(5, 12)

                                    # save figure to a file
                                    sfile = '_'.join(('all', sname))
                                    save_file = os.path.join(save_dir, sfile)
                                    fig.savefig(str(save_file), dpi=150)

                                    sfile = '_'.join(('Statistics', sname))
                                    save_file = os.path.join(save_dir, sfile)
                                    fig0.savefig(str(save_file), dpi=150)

                                    pyplot.close()
Esempio n. 27
0
def main(sDir, plotting_sDir, url_list, sd_calc):
    dr = pd.read_csv('https://datareview.marine.rutgers.edu/notes/export')
    drn = dr.loc[dr.type == 'exclusion']
    rd_list = []
    for uu in url_list:
        elements = uu.split('/')[-2].split('-')
        rd = '-'.join((elements[1], elements[2], elements[3], elements[4]))
        if rd not in rd_list:
            rd_list.append(rd)

    for r in rd_list:
        print('\n{}'.format(r))
        datasets = []
        for u in url_list:
            splitter = u.split('/')[-2].split('-')
            rd_check = '-'.join(
                (splitter[1], splitter[2], splitter[3], splitter[4]))
            if rd_check == r:
                udatasets = cf.get_nc_urls([u])
                datasets.append(udatasets)
        datasets = list(itertools.chain(*datasets))
        fdatasets = []
        # get the preferred stream information
        ps_df, n_streams = cf.get_preferred_stream_info(r)
        pms = []
        for index, row in ps_df.iterrows():
            for ii in range(n_streams):
                try:
                    rms = '-'.join((r, row[ii]))
                    pms.append(row[ii])
                except TypeError:
                    continue
                for dd in datasets:
                    spl = dd.split('/')[-2].split('-')
                    catalog_rms = '-'.join(
                        (spl[1], spl[2], spl[3], spl[4], spl[5], spl[6]))
                    fdeploy = dd.split('/')[-1].split('_')[0]
                    if rms == catalog_rms and fdeploy == row['deployment']:
                        fdatasets.append(dd)

        main_sensor = r.split('-')[-1]
        fdatasets_sel = cf.filter_collocated_instruments(
            main_sensor, fdatasets)

        # find time ranges to exclude from analysis for data review database
        subsite = r.split('-')[0]
        subsite_node = '-'.join((subsite, r.split('-')[1]))

        drne = drn.loc[drn.reference_designator.isin(
            [subsite, subsite_node, r])]
        et = []
        for i, row in drne.iterrows():
            sdate = cf.format_dates(row.start_date)
            edate = cf.format_dates(row.end_date)
            et.append([sdate, edate])

        # get science variable long names from the Data Review Database
        stream_sci_vars = cd.sci_var_long_names(r)

        # check if the science variable long names are the same for each stream
        sci_vars_dict = cd.sci_var_long_names_check(stream_sci_vars)

        # get the preferred stream information
        ps_df, n_streams = cf.get_preferred_stream_info(r)

        # build dictionary of science data from the preferred dataset for each deployment
        print('\nAppending data from files')
        sci_vars_dict, pressure_unit, pressure_name = cd.append_science_data(
            ps_df, n_streams, r, fdatasets_sel, sci_vars_dict, et)

        # analyze combined dataset
        print('\nAnalyzing combined dataset and writing summary file')

        array = subsite[0:2]
        save_dir = os.path.join(sDir, array, subsite)
        cf.create_dir(save_dir)

        rows = []
        if ('FLM' in r) and (
                'CTDMO' in r
        ):  # calculate Flanking Mooring CTDMO stats based on pressure
            headers = [
                'common_stream_name', 'preferred_methods_streams',
                'deployments', 'long_name', 'units', 't0', 't1', 'fill_value',
                'global_ranges', 'n_all', 'press_min_max',
                'n_excluded_forpress', 'n_nans', 'n_fillvalues', 'n_grange',
                'define_stdev', 'n_outliers', 'n_stats', 'mean', 'min', 'max',
                'stdev', 'note'
            ]
        else:
            headers = [
                'common_stream_name', 'preferred_methods_streams',
                'deployments', 'long_name', 'units', 't0', 't1', 'fill_value',
                'global_ranges', 'n_all', 'n_nans', 'n_fillvalues', 'n_grange',
                'define_stdev', 'n_outliers', 'n_stats', 'mean', 'min', 'max',
                'stdev'
            ]

        for m, n in sci_vars_dict.items():
            print('\nSTREAM: ', m)
            if m == 'common_stream_placeholder':
                m = 'science_data_stream'
            if m == 'metbk_hourly':  # don't calculate ranges for metbk_hourly
                continue

            if ('FLM' in r) and (
                    'CTDMO' in r
            ):  # calculate Flanking Mooring CTDMO stats based on pressure
                # index the pressure variable to filter and calculate stats on the rest of the variables
                sv_press = 'Seawater Pressure'
                vinfo_press = n['vars'][sv_press]

                # first, index where data are nans, fill values, and outside of global ranges
                fv_press = list(np.unique(vinfo_press['fv']))[0]
                pdata = vinfo_press['values']

                [pind, __, __, __, __,
                 __] = index_dataset(r, vinfo_press['var_name'], pdata,
                                     fv_press)

                pdata_filtered = pdata[pind]
                [__, pmean, __, __, psd,
                 __] = cf.variable_statistics(pdata_filtered, None)

                # index of pressure = average of all 'valid' pressure data +/- 1 SD
                ipress_min = pmean - psd
                ipress_max = pmean + psd
                ind_press = (pdata >= ipress_min) & (pdata <= ipress_max)

                # calculate stats for all variables
                print('\nPARAMETERS:')
                for sv, vinfo in n['vars'].items():
                    print(sv)
                    fv_lst = np.unique(vinfo['fv']).tolist()
                    if len(fv_lst) == 1:
                        fill_value = fv_lst[0]
                    else:
                        print('No unique fill value for {}'.format(sv))

                    lunits = np.unique(vinfo['units']).tolist()
                    n_all = len(vinfo['t'])

                    # filter data based on pressure index
                    t_filtered = vinfo['t'][ind_press]
                    data_filtered = vinfo['values'][ind_press]
                    deploy_filtered = vinfo['deployments'][ind_press]

                    n_excluded = n_all - len(t_filtered)

                    [dataind, g_min, g_max, n_nan, n_fv,
                     n_grange] = index_dataset(r, vinfo['var_name'],
                                               data_filtered, fill_value)

                    t_final = t_filtered[dataind]
                    data_final = data_filtered[dataind]
                    deploy_final = deploy_filtered[dataind]

                    t0 = pd.to_datetime(
                        min(t_final)).strftime('%Y-%m-%dT%H:%M:%S')
                    t1 = pd.to_datetime(
                        max(t_final)).strftime('%Y-%m-%dT%H:%M:%S')
                    deploy = list(np.unique(deploy_final))
                    deployments = [int(dd) for dd in deploy]

                    if len(data_final) > 1:
                        [num_outliers, mean, vmin, vmax, sd, n_stats
                         ] = cf.variable_statistics(data_final, sd_calc)
                    else:
                        mean = None
                        vmin = None
                        vmax = None
                        sd = None
                        n_stats = None

                    note = 'restricted stats calculation to data points where pressure is within defined ranges' \
                           ' (average of all pressure data +/- 1 SD)'
                    rows.append([
                        m,
                        list(np.unique(pms)), deployments, sv, lunits, t0, t1,
                        fv_lst, [g_min, g_max], n_all,
                        [round(ipress_min, 2),
                         round(ipress_max,
                               2)], n_excluded, n_nan, n_fv, n_grange, sd_calc,
                        num_outliers, n_stats, mean, vmin, vmax, sd, note
                    ])

                    # plot CTDMO data used for stats
                    psave_dir = os.path.join(plotting_sDir, array, subsite, r,
                                             'timeseries_plots_stats')
                    cf.create_dir(psave_dir)

                    dr_data = cf.refdes_datareview_json(r)
                    deployments = []
                    end_times = []
                    for index, row in ps_df.iterrows():
                        deploy = row['deployment']
                        deploy_info = cf.get_deployment_information(
                            dr_data, int(deploy[-4:]))
                        deployments.append(int(deploy[-4:]))
                        end_times.append(
                            pd.to_datetime(deploy_info['stop_date']))

                    sname = '-'.join((r, sv))
                    fig, ax = pf.plot_timeseries_all(t_final,
                                                     data_final,
                                                     sv,
                                                     lunits[0],
                                                     stdev=None)
                    ax.set_title(
                        (r + '\nDeployments: ' + str(sorted(deployments)) +
                         '\n' + t0 + ' - ' + t1),
                        fontsize=8)
                    for etimes in end_times:
                        ax.axvline(x=etimes,
                                   color='k',
                                   linestyle='--',
                                   linewidth=.6)
                    pf.save_fig(psave_dir, sname)

                    if sd_calc:
                        sname = '-'.join((r, sv, 'rmoutliers'))
                        fig, ax = pf.plot_timeseries_all(t_final,
                                                         data_final,
                                                         sv,
                                                         lunits[0],
                                                         stdev=sd_calc)
                        ax.set_title(
                            (r + '\nDeployments: ' + str(sorted(deployments)) +
                             '\n' + t0 + ' - ' + t1),
                            fontsize=8)
                        for etimes in end_times:
                            ax.axvline(x=etimes,
                                       color='k',
                                       linestyle='--',
                                       linewidth=.6)
                        pf.save_fig(psave_dir, sname)

            else:
                if not sd_calc:
                    sdcalc = None

                print('\nPARAMETERS: ')
                for sv, vinfo in n['vars'].items():
                    print(sv)

                    fv_lst = np.unique(vinfo['fv']).tolist()
                    if len(fv_lst) == 1:
                        fill_value = fv_lst[0]
                    else:
                        print(fv_lst)
                        print('No unique fill value for {}'.format(sv))

                    lunits = np.unique(vinfo['units']).tolist()

                    t = vinfo['t']
                    [g_min, g_max] = cf.get_global_ranges(r, vinfo['var_name'])
                    if len(t) > 1:
                        data = vinfo['values']
                        n_all = len(t)

                        if 'SPKIR' in r or 'presf_abc_wave_burst' in m:
                            if 'SPKIR' in r:
                                [dd_data, g_min, g_max, n_nan, n_fv,
                                 n_grange] = index_dataset_2d(
                                     r, 'spkir_abj_cspp_downwelling_vector',
                                     data, fill_value)
                            else:
                                [dd_data, g_min, g_max, n_nan, n_fv,
                                 n_grange] = index_dataset_2d(
                                     r, 'presf_wave_burst_pressure', data,
                                     fill_value)
                            t_final = t
                            t0 = pd.to_datetime(
                                min(t_final)).strftime('%Y-%m-%dT%H:%M:%S')
                            t1 = pd.to_datetime(
                                max(t_final)).strftime('%Y-%m-%dT%H:%M:%S')
                            deploy_final = vinfo['deployments']
                            deploy = list(np.unique(deploy_final))
                            deployments = [int(dd) for dd in deploy]

                            num_outliers = []
                            mean = []
                            vmin = []
                            vmax = []
                            sd = []
                            n_stats = []
                            for i in range(len(dd_data)):
                                dd = data[i]
                                # drop nans before calculating stats
                                dd = dd[~np.isnan(dd)]
                                [
                                    num_outliersi, meani, vmini, vmaxi, sdi,
                                    n_statsi
                                ] = cf.variable_statistics(dd, sd_calc)
                                num_outliers.append(num_outliersi)
                                mean.append(meani)
                                vmin.append(vmini)
                                vmax.append(vmaxi)
                                sd.append(sdi)
                                n_stats.append(n_statsi)

                        else:
                            if type(vinfo['values']
                                    ) == dict:  # if the variable is a 2D array
                                [dd_data, g_min, g_max, n_nan, n_fv, n_grange
                                 ] = index_dataset_2d(r, vinfo['var_name'],
                                                      data, fill_value)
                                t_final = t
                                t0 = pd.to_datetime(
                                    min(t_final)).strftime('%Y-%m-%dT%H:%M:%S')
                                t1 = pd.to_datetime(
                                    max(t_final)).strftime('%Y-%m-%dT%H:%M:%S')
                                deploy_final = vinfo['deployments']
                                deploy = list(np.unique(deploy_final))
                                deployments = [int(dd) for dd in deploy]

                                num_outliers = []
                                mean = []
                                vmin = []
                                vmax = []
                                sd = []
                                n_stats = []
                                for i in range(len(dd_data)):
                                    dd = data[i]
                                    # drop nans before calculating stats
                                    dd = dd[~np.isnan(dd)]
                                    [
                                        num_outliersi, meani, vmini, vmaxi,
                                        sdi, n_statsi
                                    ] = cf.variable_statistics(dd, sd_calc)
                                    num_outliers.append(num_outliersi)
                                    mean.append(meani)
                                    vmin.append(vmini)
                                    vmax.append(vmaxi)
                                    sd.append(sdi)
                                    n_stats.append(n_statsi)
                            else:
                                [dataind, g_min, g_max, n_nan, n_fv, n_grange
                                 ] = index_dataset(r, vinfo['var_name'], data,
                                                   fill_value)
                                t_final = t[dataind]
                                if len(t_final) > 0:
                                    t0 = pd.to_datetime(min(t_final)).strftime(
                                        '%Y-%m-%dT%H:%M:%S')
                                    t1 = pd.to_datetime(max(t_final)).strftime(
                                        '%Y-%m-%dT%H:%M:%S')
                                    data_final = data[dataind]
                                    # if sv == 'Dissolved Oxygen Concentration':
                                    #     xx = (data_final > 0) & (data_final < 400)
                                    #     data_final = data_final[xx]
                                    #     t_final = t_final[xx]
                                    # if sv == 'Seawater Conductivity':
                                    #     xx = (data_final > 1) & (data_final < 400)
                                    #     data_final = data_final[xx]
                                    #     t_final = t_final[xx]
                                    deploy_final = vinfo['deployments'][
                                        dataind]
                                    deploy = list(np.unique(deploy_final))
                                    deployments = [int(dd) for dd in deploy]

                                    if len(data_final) > 1:
                                        [
                                            num_outliers, mean, vmin, vmax, sd,
                                            n_stats
                                        ] = cf.variable_statistics(
                                            data_final, sd_calc)
                                    else:
                                        sdcalc = None
                                        num_outliers = None
                                        mean = None
                                        vmin = None
                                        vmax = None
                                        sd = None
                                        n_stats = None
                                else:
                                    sdcalc = None
                                    num_outliers = None
                                    mean = None
                                    vmin = None
                                    vmax = None
                                    sd = None
                                    n_stats = None
                                    deployments = None
                                    t0 = None
                                    t1 = None
                    else:
                        sdcalc = None
                        num_outliers = None
                        mean = None
                        vmin = None
                        vmax = None
                        sd = None
                        n_stats = None
                        deployments = None
                        t0 = None
                        t1 = None
                        t_final = []
                        n_all = None

                    if sd_calc:
                        print_sd = sd_calc
                    else:
                        print_sd = sdcalc

                    rows.append([
                        m,
                        list(np.unique(pms)), deployments, sv, lunits, t0, t1,
                        fv_lst, [g_min, g_max], n_all, n_nan, n_fv, n_grange,
                        print_sd, num_outliers, n_stats, mean, vmin, vmax, sd
                    ])

                    if len(t_final) > 0:
                        # plot data used for stats
                        psave_dir = os.path.join(
                            plotting_sDir, array, subsite, r,
                            'timeseries_reviewed_datarange')
                        cf.create_dir(psave_dir)

                        dr_data = cf.refdes_datareview_json(r)
                        deployments = []
                        end_times = []
                        for index, row in ps_df.iterrows():
                            deploy = row['deployment']
                            deploy_info = cf.get_deployment_information(
                                dr_data, int(deploy[-4:]))
                            deployments.append(int(deploy[-4:]))
                            end_times.append(
                                pd.to_datetime(deploy_info['stop_date']))

                        sname = '-'.join((r, sv))

                        # plot hourly averages for streaming data
                        if 'streamed' in sci_vars_dict[list(
                                sci_vars_dict.keys())[0]]['ms'][0]:
                            sname = '-'.join((sname, 'hourlyavg'))
                            df = pd.DataFrame({
                                'dfx': t_final,
                                'dfy': data_final
                            })
                            dfr = df.resample('H', on='dfx').mean()

                            # Plot all data
                            fig, ax = pf.plot_timeseries_all(dfr.index,
                                                             dfr['dfy'],
                                                             sv,
                                                             lunits[0],
                                                             stdev=None)
                            ax.set_title((r + '\nDeployments: ' +
                                          str(sorted(deployments)) + '\n' +
                                          t0 + ' - ' + t1),
                                         fontsize=8)
                            for etimes in end_times:
                                ax.axvline(x=etimes,
                                           color='k',
                                           linestyle='--',
                                           linewidth=.6)
                            pf.save_fig(psave_dir, sname)

                            if sd_calc:
                                sname = '-'.join(
                                    (sname, 'hourlyavg_rmoutliers'))
                                fig, ax = pf.plot_timeseries_all(dfr.index,
                                                                 dfr['dfy'],
                                                                 sv,
                                                                 lunits[0],
                                                                 stdev=sd_calc)
                                ax.set_title((r + '\nDeployments: ' +
                                              str(sorted(deployments)) + '\n' +
                                              t0 + ' - ' + t1),
                                             fontsize=8)
                                for etimes in end_times:
                                    ax.axvline(x=etimes,
                                               color='k',
                                               linestyle='--',
                                               linewidth=.6)
                                pf.save_fig(psave_dir, sname)

                        elif 'SPKIR' in r:
                            fig, ax = pf.plot_spkir(t_final, dd_data, sv,
                                                    lunits[0])
                            ax.set_title((r + '\nDeployments: ' +
                                          str(sorted(deployments)) + '\n' +
                                          t0 + ' - ' + t1),
                                         fontsize=8)
                            for etimes in end_times:
                                ax.axvline(x=etimes,
                                           color='k',
                                           linestyle='--',
                                           linewidth=.6)
                            pf.save_fig(psave_dir, sname)

                            # plot each wavelength
                            wavelengths = [
                                '412nm', '443nm', '490nm', '510nm', '555nm',
                                '620nm', '683nm'
                            ]
                            for wvi in range(len(dd_data)):
                                fig, ax = pf.plot_spkir_wv(
                                    t_final, dd_data[wvi], sv, lunits[0], wvi)
                                ax.set_title((r + '\nDeployments: ' +
                                              str(sorted(deployments)) + '\n' +
                                              t0 + ' - ' + t1),
                                             fontsize=8)
                                for etimes in end_times:
                                    ax.axvline(x=etimes,
                                               color='k',
                                               linestyle='--',
                                               linewidth=.6)
                                snamewvi = '-'.join((sname, wavelengths[wvi]))
                                pf.save_fig(psave_dir, snamewvi)
                        elif 'presf_abc_wave_burst' in m:
                            fig, ax = pf.plot_presf_2d(t_final, dd_data, sv,
                                                       lunits[0])
                            ax.set_title((r + '\nDeployments: ' +
                                          str(sorted(deployments)) + '\n' +
                                          t0 + ' - ' + t1),
                                         fontsize=8)
                            for etimes in end_times:
                                ax.axvline(x=etimes,
                                           color='k',
                                           linestyle='--',
                                           linewidth=.6)
                            snamewave = '-'.join((sname, m))
                            pf.save_fig(psave_dir, snamewave)

                        else:  # plot all data if not streamed or 2D
                            if type(
                                    vinfo['values']
                            ) != dict:  # if the variable is not a 2D array
                                fig, ax = pf.plot_timeseries_all(t_final,
                                                                 data_final,
                                                                 sv,
                                                                 lunits[0],
                                                                 stdev=None)
                                ax.set_title((r + '\nDeployments: ' +
                                              str(sorted(deployments)) + '\n' +
                                              t0 + ' - ' + t1),
                                             fontsize=8)
                                for etimes in end_times:
                                    ax.axvline(x=etimes,
                                               color='k',
                                               linestyle='--',
                                               linewidth=.6)
                                pf.save_fig(psave_dir, sname)

                                if sd_calc:
                                    sname = '-'.join((r, sv, 'rmoutliers'))
                                    fig, ax = pf.plot_timeseries_all(
                                        t_final,
                                        data_final,
                                        sv,
                                        lunits[0],
                                        stdev=sd_calc)
                                    ax.set_title((r + '\nDeployments: ' +
                                                  str(sorted(deployments)) +
                                                  '\n' + t0 + ' - ' + t1),
                                                 fontsize=8)
                                    for etimes in end_times:
                                        ax.axvline(x=etimes,
                                                   color='k',
                                                   linestyle='--',
                                                   linewidth=.6)
                                    pf.save_fig(psave_dir, sname)

        fsum = pd.DataFrame(rows, columns=headers)
        fsum.to_csv('{}/{}_data_ranges.csv'.format(save_dir, r), index=False)