Example #1
0
# compute the standard error of the mean
swh_reg_stdm = []
for ireg in range(5):
    swh_reg_stdm.append(
        np.sqrt(swh_reg_var_m_n[ireg]) / np.sqrt(n_eff_reg[ireg]))

# Perform a weighted least squares annual cycle fit to the mean regional data
hfit_basin_swh, x_data_basin, x_data_sigma_basin = [], [], []
for ireg in range(5):

    # Perform weigthed least squares fit
    hfit, x_data, x_data_sigma = weighted_least_square_fit(
        data=np.ma.copy(swh_reg_m_n[ireg]),
        sigma=np.ma.copy(swh_reg_stdm[ireg]),
        trend="sinusoidal",
        parameters=3,
        period=12,
    )

    # save data
    hfit_basin_swh.append(hfit)
    x_data_basin.append(x_data)
    x_data_sigma_basin.append(x_data_sigma)

# Compute regional climatologies:

####### Western Australia #######
# intialize variable
ngrid = 4
lon_grid = 108
def regional_clima(
    data_mean,
    data_var,
    data_n,
    lon,
    lat,
    lon_grid,
    lat_grid,
    ngrid,
    dcor,
    loc,
    lsf,
    parameters,
):
    """
    regional_clima(data_mean, data_var, data_n, lon, lat, lon_grid, lat_grid, ngrid, dcor, loc, lsf, parameters)

        Function to compute the regional climatology from monthly climatological data with specified grid and location
        in the ocean.

        Parameters
        ----------
        data_mean : Monthly Climatology data with the following dimensions in 2D masked geospatial arrays:
                    (ntime, nlat, nlon) = (12, 133, 360)
        date_var : Monthly Climatology data's variance with the following dimensions in 2D masked
                   geospatial arrays: (ntime, nlat, nlon) = (12, 133, 360)
        data_n : Number of observations used in computing Monthly Climatology mean and variance with the
                 following dimensions: (ntime, nlat, nlon) = (12, 133, 360). Used to compute the standard
                 error of the mean for error bars.
        lon : Longitude vector
            ex: lon = np.arange(0, 360, 1)
        lat : Latitude vector
            ex: lon = np.arange(-66, 66, 1)
        lon_grid : Initial longitude grid point to compute the regional climatology
            ex: lon_grid = 230
        lat_grid : Initial latitude grid point to compute the regional climatology
            ex: lon_grid = 230
        ngrid : Specifies the size of the n by n grid box that the regional climatology will computed in.
            ex: n_grid = 2
        dcor : Decorrelation scale (climatological resolution) used for computing degrees of freedom or dof
               (used to compute standard error of the mean) where dof is defined as:
               dof = n_eff = nobs/dcor where nobs = number of observations in time series. dcor must be an array.
        loc : Specifies if the regional climatology is in the northern or southern hemisphere or if it is east or
              west of the prime meridian.
            ex: loc = [loc_lat, loc_lon] loc[0] = 'NH' or loc[0] = 'SH' and loc[1] = 'west' or loc[1] = 'east'
        lsf : Specifies whether the least squares fit model is weighted or unweighted. Options
              include: lsf = 'weighted' or 'unweighted'
        parameters : Specifies the amount of paramaters for the model. Look at weighted_least_squares_fit.py
                     documentation for details.

        Returns
        -------
        data_reg_mean : Regional climatology mean
            ex: data_reg_mean.shape = (1,12)
        data_reg_stdm : Regional climatology standard error of the mean
            ex: data_reg_stdm.shape = (1,12)
        hfit : Least square fit model.
        x_data : Least squares fit model coefficients.
        residual : Difference between the model and data.
        grid_coordinates : Indices from longitude and latitude which the climatology is computed for.

        Libraries necessary to run function
        -----------------------------------
        import numpy as np
        from unweighted_least_square_fit import least_square_fit
        from weighted_least_square_fit import weighted_least_square_fit
    """

    # Set path to my functions:
    import sys

    sys.path.append("../tools/")

    # import libraries
    import numpy as np

    # import my functions
    from lsf import least_square_fit, weighted_least_square_fit

    # case 1: west of prime meridian
    if loc[1] == "west":
        loc_lon = 360
    # case 2: east of prime meridian
    elif loc[1] == "east":
        loc_lon = 0

    # latitude and longitude grid points that will be averaged over:
    lat_grid_i = lat[lat_grid]
    lat_grid_f = lat[lat_grid + ngrid - 1]
    lon_grid_i = lon[lon_grid] - loc_lon
    lon_grid_f = lon[lon_grid + ngrid - 1] - loc_lon
    grid_cor = [lat_grid_i, lat_grid_f, lon_grid_i, lon_grid_f]

    # call mean, variance, number of observations, and decorrelation scale from grid box indices:
    data_grid_mean = data_mean[:, lat_grid:(lat_grid + ngrid),
                               lon_grid:(lon_grid + ngrid)]
    data_grid_var = data_var[:, lat_grid:(lat_grid + ngrid),
                             lon_grid:(lon_grid + ngrid)]
    data_grid_n = data_n[:, lat_grid:(lat_grid + ngrid),
                         lon_grid:(lon_grid + ngrid)]
    data_grid_dcor = dcor[:, lat_grid:(lat_grid + ngrid),
                          lon_grid:(lon_grid + ngrid)]

    # Compute the mean and average variance, decorrelation scale number of observations for the region
    data_reg_mean = np.ma.mean(np.ma.mean(data_grid_mean,
                                          axis=1,
                                          dtype=np.float64),
                               axis=1)
    data_reg_var = np.ma.mean(np.ma.mean(data_grid_var,
                                         axis=1,
                                         dtype=np.float64),
                              axis=1)
    dcor_reg = np.ma.mean(np.ma.mean(data_grid_dcor, axis=1, dtype=np.float64),
                          axis=1)
    data_reg_n_mean = np.ma.mean(np.ma.mean(data_grid_n,
                                            axis=1,
                                            dtype=np.float64),
                                 axis=1)

    # Compute N_eff:
    n_eff = data_grid_n / data_grid_dcor

    # Compute the average number degrees of freedom (n_eff):
    n_eff_mean = np.ma.mean(np.ma.mean(n_eff, axis=1, dtype=np.float64),
                            axis=1)

    # compute the standard error of the mean
    data_reg_stdm = np.sqrt(data_reg_var) / np.sqrt(n_eff_mean)

    # compute the least square fit:
    if lsf == "weighted":
        hfit, x_data, x_data_sigma = weighted_least_square_fit(
            data=np.ma.copy(data_reg_mean),
            sigma=np.ma.copy(data_reg_stdm),
            trend="sinusoidal",
            parameters=parameters,
            period=12,
        )
    elif lsf == "unweighted":
        hfit, x_data = least_square_fit(
            data=np.ma.copy(data_reg_mean),
            trend="sinusoidal",
            parameters=parameters,
            period=12,
        )

    # compute the residue between the model and regional climatology:
    residual = hfit - data_reg_mean

    # For SH regional climatologies, shift the time series such that austral summer months are center in the figure
    if loc[0] == "SH":

        # Shift
        data_reg_mean = np.reshape(
            np.ma.array([data_reg_mean[6:13], data_reg_mean[0:6]]), (1, 12))[0]
        data_reg_stdm = np.reshape(
            np.ma.array([data_reg_stdm[6:13], data_reg_stdm[0:6]]), (1, 12))[0]
        hfit = np.reshape(np.ma.array([hfit[6:13], hfit[0:6]]), (1, 12))[0]
        residual = np.reshape(np.ma.array([residual[6:13], residual[0:6]]),
                              (1, 12))[0]

    return data_reg_mean, data_reg_stdm, hfit, x_data, residual, grid_cor
        ival_wsp_spr = np.count_nonzero(~np.ma.getmask(wsp_mean_ts[2:5]))
        ival_wsp_sum = np.count_nonzero(~np.ma.getmask(wsp_mean_ts[5:8]))
        ival_wsp_f = np.count_nonzero(~np.ma.getmask(wsp_mean_ts[8:11]))

        # Count number of observations of mean and stdm wsp:
        wsp_count = len(wsp_mean_ts[np.ma.nonzero(wsp_mean_ts)])

        # place condition:
        if (ival_wsp_w > 0 and ival_wsp_spr > 0 and ival_wsp_sum > 0
                and ival_wsp_f > 0 and wsp_count >= 5):

            # compute least square fit:
            wsp_hfit_w, x_wsp_w, x_wsp_sigma = weighted_least_square_fit(
                data=np.ma.copy(wsp_mean_ts),
                sigma=np.ma.copy(wsp_stdm_ts),
                trend="sinusoidal",
                parameters=parameters,
                period=12,
            )

            # compute parameters of least square fit
            (
                wsp_res,
                wsp_rms,
                wsp_amp1,
                wsp_phase1,
                wsp_amp2,
                wsp_phase2,
                wsp_fve,
            ) = LSF_parameters(
                data=np.ma.copy(wsp_mean_ts),
Example #4
0
    n_eff_reg.append(wsp_reg_n_m_n[ireg] / wsp_reg_dcor_m_n[0])

# compute the standard error of the mean
wsp_reg_stdm = []
for ireg in range(5):
    wsp_reg_stdm.append(np.sqrt(wsp_reg_var_m_n[ireg]) / np.sqrt(n_eff_reg[ireg]))

# Perform a weighted least squares annual cycle fit to the mean regional data
hfit_reg_wsp = []
for ireg in range(5):

    # Perform weigthed least squares fit
    hfit, x_data, x_data_sigma = weighted_least_square_fit(
        data=np.ma.copy(wsp_reg_m_n[ireg]),
        sigma=np.ma.copy(wsp_reg_stdm[ireg]),
        trend="sinusoidal",
        parameters=3,
        period=12,
    )

    # save data
    hfit_reg_wsp.append(hfit)

# Change order of climatologies variables and fits for SH partitions:
####### Climatology #######
wsp_reg_m_n[2] = np.reshape(
    np.ma.array([wsp_reg_m_n[2][6:13], wsp_reg_m_n[2][0:6]]), (1, 12)
)[0]
wsp_reg_m_n[3] = np.reshape(
    np.ma.array([wsp_reg_m_n[3][6:13], wsp_reg_m_n[3][0:6]]), (1, 12)
)[0]