Beispiel #1
0
def get_sonde_profile(sonde_1, sonde_2, smooth):
    '''
    Read ozonesonde profiles

    Return: two profile dicts
    '''

    # --------------- read data --------------- #
    profile_1, _ = read_profile(sonde_1, smooth=smooth)
    profile_2, _ = read_profile(sonde_2, smooth=smooth)
    profile_1 = profile_1.reset_index(drop=True)
    profile_2 = profile_2.reset_index(drop=True)
    # --------------- correct data --------------- #
    # Because there's something wrong with ozonesonde,
    # we need to correct by ourself.
    profile_1['T'] -= 9

    # set surface pressure (hPa)
    profile_1['PR'][0] = 1001.60
    profile_2['PR'][0] = 998.4

    # correct pressures based on GPS and radiosonde
    corrected_profile_1 = correct_p(profile_1.copy())
    corrected_profile_2 = correct_p(profile_2.copy())

    # reassign and calculate water mixing ratio again ...
    profile_1 = corrected_profile_1
    profile_2 = corrected_profile_2

    profile_1['QV'] = mpcalc.mixing_ratio_from_relative_humidity(
                            pressure=profile_1['PR'].values * units.hPa,
                            temperature=profile_1['T'].values * units.degC,
                            relative_humidity=profile_1['rh'].values * units.percent)
    profile_2['QV'] = mpcalc.mixing_ratio_from_relative_humidity(
                            pressure=profile_2['PR'].values * units.hPa,
                            temperature=profile_2['T'].values * units.degC,
                            relative_humidity=profile_2['rh'].values * units.percent)

    # ---------  convert units ---------- #
    profile_1['QV'] *= 1e6  # ppm
    profile_2['QV'] *= 1e6  # ppm
    profile_1['h'] /= 1e3  # km
    profile_2['h'] /= 1e3  # km

    # ---------  calculate tropopause ---------- #
    # itrop_1, trop_1 = trop_wmo(np.flipud(profile_1['PR']), np.flipud(profile_1['T'] + 273.15))
    # itrop_2, trop_2 = trop_wmo(np.flipud(profile_2['PR']), np.flipud(profile_2['T'] + 273.15))
    # trop = [itrop_1, itrop_2]
    # print (trop_1, trop_2)

    return profile_1, profile_2
Beispiel #2
0
def test_mixing_ratio_from_relative_humidity():
    """Test relative humidity from mixing ratio."""
    p = 1013.25 * units.mbar
    temperature = 20. * units.degC
    rh = 81.7219 * units.percent
    w = mixing_ratio_from_relative_humidity(rh, temperature, p)
    assert_almost_equal(w, 0.012 * units.dimensionless, 3)
Beispiel #3
0
def test_mixing_ratio_from_rh_dimensions():
    """Verify mixing ratio from RH returns a dimensionless number."""
    p = 1000. * units.mbar
    temperature = 0. * units.degC
    rh = 100. * units.percent
    assert (str(mixing_ratio_from_relative_humidity(rh, temperature, p).units) ==
            'dimensionless')
Beispiel #4
0
def test_mixing_ratio_from_relative_humidity():
    """Test relative humidity from mixing ratio."""
    p = 1013.25 * units.mbar
    temperature = 20. * units.degC
    rh = 81.7219 * units.percent
    w = mixing_ratio_from_relative_humidity(rh, temperature, p)
    assert_almost_equal(w, 0.012 * units.dimensionless, 3)
Beispiel #5
0
def run_calcs(df, ctx):
    """Do our maths."""
    # Convert sea level pressure to station pressure
    df['pressure'] = mcalc.add_height_to_pressure(
        df['slp'].values * units('millibars'),
        ctx['nt'].sts[ctx['station']]['elevation'] * units('m')).to(
            units('millibar'))
    # Compute the mixing ratio
    df['mixingratio'] = mcalc.mixing_ratio_from_relative_humidity(
        df['relh'].values * units('percent'),
        df['tmpf'].values * units('degF'),
        df['pressure'].values * units('millibars'))
    # Compute the saturation mixing ratio
    df['saturation_mixingratio'] = mcalc.saturation_mixing_ratio(
        df['pressure'].values * units('millibars'),
        df['tmpf'].values * units('degF'))
    df['vapor_pressure'] = mcalc.vapor_pressure(
        df['pressure'].values * units('millibars'),
        df['mixingratio'].values * units('kg/kg')).to(units('kPa'))
    df['saturation_vapor_pressure'] = mcalc.vapor_pressure(
        df['pressure'].values * units('millibars'),
        df['saturation_mixingratio'].values * units('kg/kg')).to(units('kPa'))
    df['vpd'] = df['saturation_vapor_pressure'] - df['vapor_pressure']
    group = df.groupby('year')
    df = group.aggregate(np.average)

    df['dwpf'] = mcalc.dewpoint(df['vapor_pressure'].values * units('kPa')).to(
        units('degF')).m
    return df
Beispiel #6
0
def run_calcs(df, ctx):
    """Do our maths."""
    # Convert sea level pressure to station pressure
    df["pressure"] = mcalc.add_height_to_pressure(
        df["slp"].values * units("millibars"),
        ctx["_nt"].sts[ctx["station"]]["elevation"] * units("m"),
    ).to(units("millibar"))
    # Compute the mixing ratio
    df["mixingratio"] = mcalc.mixing_ratio_from_relative_humidity(
        df["relh"].values * units("percent"),
        df["tmpf"].values * units("degF"),
        df["pressure"].values * units("millibars"),
    )
    # Compute the saturation mixing ratio
    df["saturation_mixingratio"] = mcalc.saturation_mixing_ratio(
        df["pressure"].values * units("millibars"),
        df["tmpf"].values * units("degF"),
    )
    df["vapor_pressure"] = mcalc.vapor_pressure(
        df["pressure"].values * units("millibars"),
        df["mixingratio"].values * units("kg/kg"),
    ).to(units("kPa"))
    df["saturation_vapor_pressure"] = mcalc.vapor_pressure(
        df["pressure"].values * units("millibars"),
        df["saturation_mixingratio"].values * units("kg/kg"),
    ).to(units("kPa"))
    df["vpd"] = df["saturation_vapor_pressure"] - df["vapor_pressure"]
    # remove any NaN rows
    df = df.dropna()
    group = df.groupby("year")
    df = group.aggregate(np.average)

    df["dwpf"] = (mcalc.dewpoint(df["vapor_pressure"].values *
                                 units("kPa")).to(units("degF")).m)
    return df
Beispiel #7
0
def test_mixing_ratio_from_rh_dimensions():
    """Verify mixing ratio from RH returns a dimensionless number."""
    p = 1000. * units.mbar
    temperature = 0. * units.degC
    rh = 100. * units.percent
    assert (str(mixing_ratio_from_relative_humidity(
        rh, temperature, p).units) == 'dimensionless')
Beispiel #8
0
def calc_thta_vir(united_data):
    """
    returns virtual potential temperature (K)
    and equvalent potential temperaure (K)
    """

    pres = united_data['PRES']
    temp = united_data['TEMP']
    rh = united_data['HUM']
    mixing = mpcalc.mixing_ratio_from_relative_humidity(rh, temp, pres)
    theta_vir = mpcalc.virtual_potential_temperature(pres, temp, mixing)

    td = mpcalc.dewpoint_rh(temp, rh)
    theta_e = mpcalc.equivalent_potential_temperature(pres, temp, td)

    return theta_vir, theta_e
Beispiel #9
0
def plotter(fdict):
    """ Go """
    pgconn = get_dbconn("asos")
    ctx = get_autoplot_context(fdict, get_description())
    station = ctx["zstation"]
    month = ctx["month"]

    if month == "all":
        months = range(1, 13)
    elif month == "fall":
        months = [9, 10, 11]
    elif month == "winter":
        months = [12, 1, 2]
    elif month == "spring":
        months = [3, 4, 5]
    elif month == "summer":
        months = [6, 7, 8]
    else:
        ts = datetime.datetime.strptime("2000-" + month + "-01", "%Y-%b-%d")
        # make sure it is length two for the trick below in SQL
        months = [ts.month, 999]

    df = read_sql(
        """
        SELECT tmpf::int as tmpf, dwpf, relh,
        coalesce(mslp, alti * 33.8639, 1013.25) as slp
        from alldata where station = %s
        and drct is not null and dwpf is not null and dwpf <= tmpf
        and sknt > 3 and drct::int %% 10 = 0
        and extract(month from valid) in %s
        and report_type = 2
    """,
        pgconn,
        params=(station, tuple(months)),
    )
    if df.empty:
        raise NoDataFound("No Data Found.")
    # Convert sea level pressure to station pressure
    df["pressure"] = mcalc.add_height_to_pressure(
        df["slp"].values * units("millibars"),
        ctx["_nt"].sts[station]["elevation"] * units("m"),
    ).to(units("millibar"))
    # compute mixing ratio
    df["mixingratio"] = mcalc.mixing_ratio_from_relative_humidity(
        df["relh"].values * units("percent"),
        df["tmpf"].values * units("degF"),
        df["pressure"].values * units("millibars"),
    )
    # compute pressure
    df["vapor_pressure"] = mcalc.vapor_pressure(
        df["pressure"].values * units("millibars"),
        df["mixingratio"].values * units("kg/kg"),
    ).to(units("kPa"))

    means = df.groupby("tmpf").mean().copy()
    # compute dewpoint now
    means["dwpf"] = (
        mcalc.dewpoint(means["vapor_pressure"].values * units("kPa"))
        .to(units("degF"))
        .m
    )
    means.reset_index(inplace=True)
    # compute RH again
    means["relh"] = (
        mcalc.relative_humidity_from_dewpoint(
            means["tmpf"].values * units("degF"),
            means["dwpf"].values * units("degF"),
        )
        * 100.0
    )

    (fig, ax) = plt.subplots(1, 1, figsize=(8, 6))
    ax.bar(
        means["tmpf"].values - 0.5,
        means["dwpf"].values - 0.5,
        ec="green",
        fc="green",
        width=1,
    )
    ax.grid(True, zorder=11)
    ab = ctx["_nt"].sts[station]["archive_begin"]
    if ab is None:
        raise NoDataFound("Unknown station metadata.")
    ax.set_title(
        (
            "%s [%s]\nAverage Dew Point by Air Temperature (month=%s) "
            "(%s-%s)\n"
            "(must have 3+ hourly observations at the given temperature)"
        )
        % (
            ctx["_nt"].sts[station]["name"],
            station,
            month.upper(),
            ab.year,
            datetime.datetime.now().year,
        ),
        size=10,
    )

    ax.plot([0, 140], [0, 140], color="b")
    ax.set_ylabel("Dew Point [F]")
    y2 = ax.twinx()
    y2.plot(means["tmpf"].values, means["relh"].values, color="k")
    y2.set_ylabel("Relative Humidity [%] (black line)")
    y2.set_yticks([0, 5, 10, 25, 50, 75, 90, 95, 100])
    y2.set_ylim(0, 100)
    ax.set_ylim(0, means["tmpf"].max() + 2)
    ax.set_xlim(0, means["tmpf"].max() + 2)
    ax.set_xlabel(r"Air Temperature $^\circ$F")

    return fig, means[["tmpf", "dwpf", "relh"]]
Beispiel #10
0
def calc_param_wrf_par(it):

    #Have copied this function here from calc_param_wrf_par, to use global arrays

    t, param = it
    wg = False

    p_3d = np.moveaxis(np.tile(p, [ta.shape[0], ta.shape[2], ta.shape[3], 1]),
                       [0, 1, 2, 3], [0, 2, 3, 1])
    param = np.array(param)
    param_out = [0] * (len(param))
    for i in np.arange(0, len(param)):
        param_out[i] = np.empty((len(lat), len(lon)))
    if len(param) != len(np.unique(param)):
        ValueError("Each parameter can only appear once in parameter list")
    print(date_list[t])

    start = dt.datetime.now()
    hur_unit = units.percent * hur[t, :, :, :]
    ta_unit = units.degC * ta[t, :, :, :]
    dp_unit = units.degC * dp[t, :, :, :]
    p_unit = units.hectopascals * p_3d[t, :, :, :]
    q_unit = mpcalc.mixing_ratio_from_relative_humidity(hur_unit,\
     ta_unit,p_unit)
    theta_unit = mpcalc.potential_temperature(p_unit, ta_unit)
    q = np.array(q_unit)

    ml_inds = ((p_3d[t] <= ps[t]) & (p_3d[t] >= (ps[t] - 100)))
    ml_ta_avg = np.ma.masked_where(~ml_inds, ta[t]).mean(axis=0).data
    ml_q_avg = np.ma.masked_where(~ml_inds, q).mean(axis=0).data
    ml_hgt_avg = np.ma.masked_where(~ml_inds, hgt[t]).mean(axis=0).data
    ml_p3d_avg = np.ma.masked_where(~ml_inds, p_3d[t]).mean(axis=0).data
    ml_ta_arr = np.insert(ta[t], 0, ml_ta_avg, axis=0)
    ml_q_arr = np.insert(q, 0, ml_q_avg, axis=0)
    ml_hgt_arr = np.insert(hgt[t], 0, ml_hgt_avg, axis=0)
    ml_p3d_arr = np.insert(p_3d[t], 0, ml_p3d_avg, axis=0)
    a,temp1,temp2 = np.meshgrid(np.arange(ml_p3d_arr.shape[0]) ,\
      np.arange(ml_p3d_arr.shape[1]), np.arange(ml_p3d_arr.shape[2]))
    sort_inds = np.flipud(
        np.lexsort([np.swapaxes(a, 1, 0), ml_p3d_arr], axis=0))
    ml_ta_arr = np.take_along_axis(ml_ta_arr, sort_inds, axis=0)
    ml_p3d_arr = np.take_along_axis(ml_p3d_arr, sort_inds, axis=0)
    ml_hgt_arr = np.take_along_axis(ml_hgt_arr, sort_inds, axis=0)
    ml_q_arr = np.take_along_axis(ml_q_arr, sort_inds, axis=0)
    cape3d_mlavg = wrf.cape_3d(ml_p3d_arr,ml_ta_arr + 273.15,\
     ml_q_arr,ml_hgt_arr,terrain,ps[t,:,:],False,meta=False,missing=0)
    ml_cape = np.ma.masked_where(~((ml_ta_arr==ml_ta_avg) & (ml_p3d_arr==ml_p3d_avg)),\
     cape3d_mlavg.data[0]).max(axis=0).filled(0)
    ml_cin = np.ma.masked_where(~((ml_ta_arr==ml_ta_avg) & (ml_p3d_arr==ml_p3d_avg)),\
     cape3d_mlavg.data[1]).max(axis=0).filled(0)

    cape3d = wrf.cape_3d(p_3d[t,:,:,:],ta[t,:,:,:]+273.15,q,hgt[t,:,:,:],terrain,ps[t,:,:],\
     True,meta=False,missing=0)
    cape = cape3d.data[0]
    cin = cape3d.data[1]
    cape[p_3d[t] > ps[t] - 25] = np.nan
    cin[p_3d[t] > ps[t] - 25] = np.nan
    mu_cape_inds = np.nanargmax(cape, axis=0)
    mu_cape = mu_cape_inds.choose(cape)
    mu_cin = mu_cape_inds.choose(cin)
    cape_2d = wrf.cape_2d(p_3d[t,:,:,:],ta[t,:,:,:]+273.15,q\
     ,hgt[t,:,:,:],terrain,ps[t,:,:],True,meta=False,missing=0)
    lcl = cape_2d[2].data
    lfc = cape_2d[3].data

    del hur_unit, dp_unit, theta_unit, ml_inds, ml_ta_avg, ml_q_avg, \
     ml_hgt_avg, ml_p3d_avg, ml_ta_arr, ml_q_arr, ml_hgt_arr, ml_p3d_arr, a, temp1, temp2,\
     sort_inds, cape3d_mlavg, cape3d, cape, cin, cape_2d

    if "relhum850-500" in param:
        param_ind = np.where(param == "relhum850-500")[0][0]
        param_out[param_ind] = get_mean_var_p(hur[t], p, 850, 500)
    if "relhum1000-700" in param:
        param_ind = np.where(param == "relhum1000-700")[0][0]
        param_out[param_ind] = get_mean_var_p(hur[t], p, 1000, 700)
    if "mu_cape" in param:
        param_ind = np.where(param == "mu_cape")[0][0]
        param_out[param_ind] = mu_cape
    if "ml_cape" in param:
        param_ind = np.where(param == "ml_cape")[0][0]
        param_out[param_ind] = ml_cape
    if "s06" in param:
        param_ind = np.where(param == "s06")[0][0]
        s06 = get_shear_hgt(ua[t],va[t],hgt[t],0,6000,\
         uas[t],vas[t])
        param_out[param_ind] = s06
    if "s03" in param:
        param_ind = np.where(param == "s03")[0][0]
        s03 = get_shear_hgt(ua[t],va[t],hgt[t],0,3000,\
         uas[t],vas[t])
        param_out[param_ind] = s03
    if "s01" in param:
        param_ind = np.where(param == "s01")[0][0]
        s01 = get_shear_hgt(ua[t],va[t],hgt[t],0,1000,\
         uas[t],vas[t])
        param_out[param_ind] = s01
    if "s0500" in param:
        param_ind = np.where(param == "s0500")[0][0]
        param_out[param_ind] = get_shear_hgt(ua[t],va[t],hgt[t],0,500,\
         uas[t],vas[t])
    if "lr1000" in param:
        param_ind = np.where(param == "lr1000")[0][0]
        lr1000 = get_lr_hgt(ta[t], hgt[t], 0, 1000)
        param_out[param_ind] = lr1000
    if "mu_cin" in param:
        param_ind = np.where(param == "mu_cin")[0][0]
        param_out[param_ind] = mu_cin
    if "lcl" in param:
        param_ind = np.where(param == "lcl")[0][0]
        temp_lcl = np.copy(lcl)
        temp_lcl[temp_lcl <= 0] = np.nan
        param_out[param_ind] = temp_lcl
    if "ml_cin" in param:
        param_ind = np.where(param == "ml_cin")[0][0]
        param_out[param_ind] = ml_cin
    if "srh01" in param:
        param_ind = np.where(param == "srh01")[0][0]
        srh01 = get_srh(ua[t], va[t], hgt[t], 1000, True, 850, 700, p)
        param_out[param_ind] = srh01
    if "srh03" in param:
        srh03 = get_srh(ua[t], va[t], hgt[t], 3000, True, 850, 700, p)
        param_ind = np.where(param == "srh03")[0][0]
        param_out[param_ind] = srh03
    if "srh06" in param:
        param_ind = np.where(param == "srh06")[0][0]
        srh06 = get_srh(ua[t], va[t], hgt[t], 6000, True, 850, 700, p)
        param_out[param_ind] = srh06
    if "ship" in param:
        if "s06" not in param:
            raise NameError("To calculate ship, s06 must be included")
        param_ind = np.where(param == "ship")[0][0]
        muq = mu_cape_inds.choose(q)
        ship = get_ship(mu_cape, np.copy(muq), ta[t], ua[t], va[t], hgt[t], p,
                        np.copy(s06))
        param_out[param_ind] = ship
    if "mmp" in param:
        param_ind = np.where(param == "mmp")[0][0]
        param_out[param_ind] = get_mmp(ua[t],va[t],uas[t],vas[t],\
         mu_cape,ta[t],hgt[t])
    if "scp" in param:
        if "srh03" not in param:
            raise NameError("To calculate ship, srh03 must be included")
        param_ind = np.where(param == "scp")[0][0]
        scell_pot = get_supercell_pot(mu_cape,ua[t],va[t],hgt[t],ta_unit,p_unit,\
          q_unit,srh03)
        param_out[param_ind] = scell_pot
    if "stp" in param:
        if "srh01" not in param:
            raise NameError("To calculate stp, srh01 must be included")
        param_ind = np.where(param == "stp")[0][0]
        stp = get_tornado_pot(ml_cape,np.copy(lcl),np.copy(ml_cin),ua[t],va[t],p_3d[t],hgt[t],p,\
         np.copy(srh01))
        param_out[param_ind] = stp
    if "vo10" in param:
        param_ind = np.where(param == "vo10")[0][0]
        x, y = np.meshgrid(lon, lat)
        dx, dy = mpcalc.lat_lon_grid_deltas(x, y)
        vo10 = get_vo(uas[t], vas[t], dx, dy)
        param_out[param_ind] = vo10
    if "conv10" in param:
        param_ind = np.where(param == "conv10")[0][0]
        x, y = np.meshgrid(lon, lat)
        dx, dy = mpcalc.lat_lon_grid_deltas(x, y)
        param_out[param_ind] = get_conv(uas[t], vas[t], dx, dy)
    if "conv1000-850" in param:
        levs = np.where((p <= 1001) & (p >= 849))[0]
        param_ind = np.where(param == "conv1000-850")[0][0]
        x, y = np.meshgrid(lon, lat)
        dx, dy = mpcalc.lat_lon_grid_deltas(x, y)
        param_out[param_ind] = \
         np.mean(np.stack([get_conv(ua[t,i],va[t,i],dx,dy) for i in levs]),axis=0)
    if "conv800-600" in param:
        levs = np.where((p <= 801) & (p >= 599))[0]
        param_ind = np.where(param == "conv800-600")[0][0]
        x, y = np.meshgrid(lon, lat)
        dx, dy = mpcalc.lat_lon_grid_deltas(x, y)
        param_out[param_ind] = \
         np.mean(np.stack([get_conv(ua[t,i],va[t,i],dx,dy) for i in levs]),axis=0)
    if "non_sc_stp" in param:
        if "vo10" not in param:
            raise NameError("To calculate non_sc_stp, vo must be included")
        if "lr1000" not in param:
            raise NameError("To calculate non_sc_stp, lr1000 must be included")
        param_ind = np.where(param == "non_sc_stp")[0][0]
        non_sc_stp = get_non_sc_tornado_pot(ml_cape,ml_cin,np.copy(lcl),ua[t],va[t],\
         uas[t],vas[t],p_3d[t],ta[t],hgt[t],p,vo10,lr1000)
        param_out[param_ind] = non_sc_stp
    if "cape*s06" in param:
        param_ind = np.where(param == "cape*s06")[0][0]
        cs6 = ml_cape * np.power(s06, 1.67)
        param_out[param_ind] = cs6
    if "td850" in param:
        param_ind = np.where(param == "td850")[0][0]
        td850 = get_td_diff(ta[t], dp[t], p_3d[t], 850)
        param_out[param_ind] = td850
    if "td800" in param:
        param_ind = np.where(param == "td800")[0][0]
        param_out[param_ind] = get_td_diff(ta[t], dp[t], p_3d[t], 800)
    if "td950" in param:
        param_ind = np.where(param == "td950")[0][0]
        param_out[param_ind] = get_td_diff(ta[t], dp[t], p_3d[t], 950)
    if "wg" in param:
        try:
            param_ind = np.where(param == "wg")[0][0]
            param_out[param_ind] = wg[t]
        except ValueError:
            print("wg field expected, but not parsed")
    if "dcape" in param:
        param_ind = np.where(param == "dcape")[0][0]
        dcape = np.nanmax(get_dcape(p_3d[t], ta[t], hgt[t], p, ps[t]), axis=0)
        param_out[param_ind] = dcape
    if "mlm" in param:
        param_ind = np.where(param == "mlm")[0][0]
        mlm_u, mlm_v = get_mean_wind(ua[t], va[t], hgt[t], 800, 600, False,
                                     None, "plevels", p)
        mlm = np.sqrt(np.square(mlm_u) + np.square(mlm_v))
        param_out[param_ind] = mlm
    if "dlm" in param:
        param_ind = np.where(param == "dlm")[0][0]
        dlm_u, dlm_v = get_mean_wind(ua[t], va[t], hgt[t], 1000, 500, False,
                                     None, "plevels", p)
        dlm = np.sqrt(np.square(dlm_u) + np.square(dlm_v))
        param_out[param_ind] = dlm
    if "dlm+dcape" in param:
        param_ind = np.where(param == "dlm+dcape")[0][0]
        dlm_dcape = dlm + np.sqrt(2 * dcape)
        param_out[param_ind] = dlm_dcape
    if "mlm+dcape" in param:
        param_ind = np.where(param == "mlm+dcape")[0][0]
        mlm_dcape = mlm + np.sqrt(2 * dcape)
        param_out[param_ind] = mlm_dcape
    if "dcape*cs6" in param:
        param_ind = np.where(param == "dcape*cs6")[0][0]
        param_out[param_ind] = (dcape / 980.) * (cs6 / 20000)
    if "dlm*dcape*cs6" in param:
        param_ind = np.where(param == "dlm*dcape*cs6")[0][0]
        param_out[param_ind] = (dlm_dcape / 30.) * (cs6 / 20000)
    if "mlm*dcape*cs6" in param:
        param_ind = np.where(param == "mlm*dcape*cs6")[0][0]
        param_out[param_ind] = (mlm_dcape / 30.) * (cs6 / 20000)
    if "dcp" in param:
        param_ind = np.where(param == "dcp")[0][0]
        param_out[param_ind] = (dcape / 980) * (mu_cape /
                                                2000) * (s06 / 10) * (dlm / 8)
    if "mf" in param:
        param_ind = np.where(param == "mf")[0][0]
        mf = ((ml_cape > 120) & (dcape > 350) & (mlm < 26))
        mf = mf * 1.0
        param_out[param_ind] = mf
    if "sf" in param:
        param_ind = np.where(param == "sf")[0][0]
        sf = ((s06 >= 30) & (dcape < 500) & (mlm >= 26))
        sf = sf * 1.0
        param_out[param_ind] = sf
    if "cond" in param:
        param_ind = np.where(param == "cond")[0][0]
        cond = (sf == 1.0) | (mf == 1.0)
        cond = cond * 1.0
        param_out[param_ind] = cond

    return param_out
def main():
	load_start = dt.datetime.now()
	#Try parsing arguments using argparse
	parser = argparse.ArgumentParser(description='wrf non-parallel convective diagnostics processer')
	parser.add_argument("-m",help="Model name",required=True)
	parser.add_argument("-r",help="Region name (default is aus)",default="aus")
	parser.add_argument("-t1",help="Time start YYYYMMDDHH",required=True)
	parser.add_argument("-t2",help="Time end YYYYMMDDHH",required=True)
	parser.add_argument("-e", help="CMIP5 experiment name (not required if using era5, erai or barra)", default="")
	parser.add_argument("--ens", help="CMIP5 ensemble name (not required if using era5, erai or barra)", default="r1i1p1")
	parser.add_argument("--group", help="CMIP6 modelling group name", default="")
	parser.add_argument("--project", help="CMIP6 modelling intercomparison project", default="CMIP")
	parser.add_argument("--ver6hr", help="Version on al33 for 6hr data", default="")
	parser.add_argument("--ver3hr", help="Version on al33 for 3hr data", default="")
	parser.add_argument("--issave",help="Save output (True or False, default is False)", default="False")
	parser.add_argument("--outname",help="Name of saved output. In the form *outname*_*t1*_*t2*.nc. Default behaviour is the model name",default=None)
	parser.add_argument("--al33",help="Should data be gathered from al33? Default is False, and data is gathered from r87. If True, then group is required",default="False")
	args = parser.parse_args()

	#Parse arguments from cmd line and set up inputs (date region model)
	model = args.m
	region = args.r
	t1 = args.t1
	t2 = args.t2
	issave = args.issave
	al33 = args.al33
	if args.outname==None:
		out_name = model
	else:
		out_name = args.outname
	experiment = args.e
	ensemble = args.ens
	group = args.group
	project = args.project
	ver6hr = args.ver6hr
	ver3hr = args.ver3hr
	if region == "sa_small":
		start_lat = -38; end_lat = -26; start_lon = 132; end_lon = 142
	elif region == "aus":
		start_lat = -44.525; end_lat = -9.975; start_lon = 111.975; end_lon = 156.275
	elif region == "global":
		start_lat = -70; end_lat = 70; start_lon = -180; end_lon = 179.75
	else:
		raise ValueError("INVALID REGION\n")
	domain = [start_lat,end_lat,start_lon,end_lon]
	try:
		time = [dt.datetime.strptime(t1,"%Y%m%d%H"),dt.datetime.strptime(t2,"%Y%m%d%H")]
	except:
		raise ValueError("INVALID START OR END TIME. SHOULD BE YYYYMMDDHH\n")
	if issave=="True":
		issave = True
	elif issave=="False":
		issave = False
	else:
		raise ValueError("\n INVALID ISSAVE...SHOULD BE True OR False")
	if al33=="True":
		al33 = True
	elif al33=="False":
		al33 = False
	else:
		raise ValueError("\n INVALID al33...SHOULD BE True OR False")

	#Load data
	print("LOADING DATA...")
	if model in ["ACCESS1-0","ACCESS1-3","GFDL-CM3","GFDL-ESM2M","CNRM-CM5","MIROC5",\
		    "MRI-CGCM3","IPSL-CM5A-LR","IPSL-CM5A-MR","GFDL-ESM2G","bcc-csm1-1","MIROC-ESM",\
		    "BNU-ESM"]:
		#Check that t1 and t2 are in the same year
		year = np.arange(int(t1[0:4]), int(t2[0:4])+1)
		ta, hur, hgt, terrain, p_3d, ps, ua, va, uas, vas, tas, ta2d, tp, lon, lat, \
		    date_list = read_cmip(model, experiment, \
		    ensemble, year, domain, cmip_ver=5, al33=al33, group=group, ver6hr=ver6hr, ver3hr=ver3hr)
		p = np.zeros(p_3d[0,:,0,0].shape)
		tp = tp.astype("float32", order="C")
	elif model in ["ACCESS-ESM1-5", "ACCESS-CM2"]:
		year = np.arange(int(t1[0:4]), int(t2[0:4])+1)
		ta, hur, hgt, terrain, p_3d, ps, ua, va, uas, vas, tas, ta2d, lon, lat, \
		    date_list = read_cmip(model, experiment,\
		    ensemble, year, domain, cmip_ver=6, group=group, project=project)
		p = np.zeros(p_3d[0,:,0,0].shape)
	else:
		raise ValueError("Model not recognised")
	ta = ta.astype("float32", order="C")
	hur = hur.astype("float32", order="C")
	hgt = hgt.astype("float32", order="C")
	terrain = terrain.astype("float32", order="C")
	p = p.astype("float32", order="C")
	ps = ps.astype("float32", order="C")
	ua = ua.astype("float32", order="C")
	va = va.astype("float32", order="C")
	uas = uas.astype("float32", order="C")
	vas = vas.astype("float32", order="C")
	tas= tas.astype("float32", order="C")
	ta2d = ta2d.astype("float32", order="C")
	lon = lon.astype("float32", order="C")
	lat = lat.astype("float32", order="C")

	gc.collect()

	#This param list was originally given to AD for ERA5 global lightning report
	#param = np.array(["mu_cape", "eff_cape","ncape","mu_cin", "muq", "s06", "s0500", "lr700_500", "mhgt", "ta500","tp","cp","laplacian","t_totals"])
	#This param list is intended for application to GCMs based on AD ERA5 lightning report
	param = np.array(["mu_cape","s06","laplacian","t_totals","ta850","ta500","dp850","tp",\
		"muq","lr700_500","mhgt","z500"])

	#Set output array
	output_data = np.zeros((ps.shape[0], ps.shape[1], ps.shape[2], len(param)))


	#Assign p levels to a 3d array, with same dimensions as input variables (ta, hgt, etc.)
	#If the 3d p-lvl array already exists, then declare the variable "mdl_lvl" as true. 
	try:
		p_3d;
		mdl_lvl = True
		full_p3d = p_3d
	except:
		mdl_lvl = False
		p_3d = np.moveaxis(np.tile(p,[ta.shape[2],ta.shape[3],1]),[0,1,2],[1,2,0]).\
			astype(np.float32)

	print("LOAD TIME..."+str(dt.datetime.now()-load_start))
	tot_start = dt.datetime.now()

	for t in np.arange(0,ta.shape[0]):
		output = np.zeros((1, ps.shape[1], ps.shape[2], len(param)))
		cape_start = dt.datetime.now()

		print(date_list[t])

		if mdl_lvl:
			p_3d = full_p3d[t]

		dp = get_dp(hur=hur[t], ta=ta[t], dp_mask = False)

		#Insert surface arrays, creating new arrays with "sfc" prefix
		sfc_ta = np.insert(ta[t], 0, tas[t], axis=0) 
		sfc_hgt = np.insert(hgt[t], 0, terrain, axis=0) 
		sfc_dp = np.insert(dp, 0, ta2d[t], axis=0) 
		sfc_p_3d = np.insert(p_3d, 0, ps[t], axis=0) 
		sfc_ua = np.insert(ua[t], 0, uas[t], axis=0) 
		sfc_va = np.insert(va[t], 0, vas[t], axis=0) 

		#Sort by ascending p
		a,temp1,temp2 = np.meshgrid(np.arange(sfc_p_3d.shape[0]) , np.arange(sfc_p_3d.shape[1]),\
			 np.arange(sfc_p_3d.shape[2]))
		sort_inds = np.flip(np.lexsort([np.swapaxes(a,1,0),sfc_p_3d],axis=0), axis=0)
		sfc_hgt = np.take_along_axis(sfc_hgt, sort_inds, axis=0)
		sfc_dp = np.take_along_axis(sfc_dp, sort_inds, axis=0)
		sfc_p_3d = np.take_along_axis(sfc_p_3d, sort_inds, axis=0)
		sfc_ua = np.take_along_axis(sfc_ua, sort_inds, axis=0)
		sfc_va = np.take_along_axis(sfc_va, sort_inds, axis=0)
		sfc_ta = np.take_along_axis(sfc_ta, sort_inds, axis=0)

		#Calculate q and wet bulb for pressure level arrays with surface values
		sfc_ta_unit = units.units.degC*sfc_ta
		sfc_dp_unit = units.units.degC*sfc_dp
		sfc_p_unit = units.units.hectopascals*sfc_p_3d
		sfc_hur_unit = mpcalc.relative_humidity_from_dewpoint(sfc_ta_unit, sfc_dp_unit)*\
			100*units.units.percent
		sfc_q_unit = mpcalc.mixing_ratio_from_relative_humidity(sfc_hur_unit,\
			sfc_ta_unit,sfc_p_unit)
		sfc_q = np.array(sfc_q_unit)

		#Now get most-unstable CAPE (max CAPE in vertical, ensuring parcels used are AGL)
		cape3d = wrf.cape_3d(sfc_p_3d,sfc_ta+273.15,\
				sfc_q,sfc_hgt,\
				terrain,ps[t],\
				True,meta=False, missing=0)
		cape = cape3d.data[0]
		cin = cape3d.data[1]
		lfc = cape3d.data[2]
		lcl = cape3d.data[3]
		el = cape3d.data[4]
		#Mask values which are below the surface and above 350 hPa AGL
		cape[(sfc_p_3d > ps[t]) | (sfc_p_3d<(ps[t]-350))] = np.nan
		cin[(sfc_p_3d > ps[t]) | (sfc_p_3d<(ps[t]-350))] = np.nan
		lfc[(sfc_p_3d > ps[t]) | (sfc_p_3d<(ps[t]-350))] = np.nan
		lcl[(sfc_p_3d > ps[t]) | (sfc_p_3d<(ps[t]-350))] = np.nan
		el[(sfc_p_3d > ps[t]) | (sfc_p_3d<(ps[t]-350))] = np.nan
		#Get maximum (in the vertical), and get cin, lfc, lcl for the same parcel
		mu_cape_inds = np.tile(np.nanargmax(cape,axis=0), (cape.shape[0],1,1))
		mu_cape = np.take_along_axis(cape, mu_cape_inds, 0)[0]
		muq = np.take_along_axis(sfc_q, mu_cape_inds, 0)[0] * 1000

		#Calculate other parameters
		#Thermo
		thermo_start = dt.datetime.now()
		lr700_500 = get_lr_p(ta[t], p_3d, hgt[t], 700, 500)
		melting_hgt = get_t_hgt(sfc_ta,np.copy(sfc_hgt),0,terrain)
		melting_hgt = np.where((melting_hgt < 0) | (np.isnan(melting_hgt)), 0, melting_hgt)
		ta500 = get_var_p_lvl(np.copy(sfc_ta), sfc_p_3d, 500)
		ta850 = get_var_p_lvl(np.copy(sfc_ta), sfc_p_3d, 850)
		dp850 = get_var_p_lvl(np.copy(sfc_dp), sfc_p_3d, 850)
		v_totals = ta850 - ta500
		c_totals = dp850 - ta500
		t_totals = v_totals + c_totals
		#Winds
		winds_start = dt.datetime.now()
		s06 = get_shear_hgt(sfc_ua, sfc_va, np.copy(sfc_hgt), 0, 6000, terrain)

		#Laplacian
		x, y = np.meshgrid(lon,lat)
		dx, dy = mpcalc.lat_lon_grid_deltas(x,y)
		if mdl_lvl:
			z500 = get_var_p_lvl(hgt[t], p_3d, 500)
			laplacian = np.array(mpcalc.laplacian(z500,deltas=[dy,dx])*1e9)
		else:
			z500 = np.squeeze(hgt[t,p==500])
			laplacian = np.array(mpcalc.laplacian(uniform_filter(z500, 4),deltas=[dy,dx])*1e9)
        

		#Fill output
		output = fill_output(output, t, param, ps, "mu_cape", mu_cape)
		output = fill_output(output, t, param, ps, "muq", muq)
		output = fill_output(output, t, param, ps, "s06", s06)
		output = fill_output(output, t, param, ps, "lr700_500", lr700_500)
		output = fill_output(output, t, param, ps, "ta500", ta500)
		output = fill_output(output, t, param, ps, "ta850", ta850)
		output = fill_output(output, t, param, ps, "dp850", dp850)
		output = fill_output(output, t, param, ps, "mhgt", melting_hgt)
		output = fill_output(output, t, param, ps, "tp", tp[t])
		output = fill_output(output, t, param, ps, "laplacian", laplacian)
		output = fill_output(output, t, param, ps, "t_totals", t_totals)
		output = fill_output(output, t, param, ps, "z500", z500)

		output_data[t] = output

	print("SAVING DATA...")
	param_out = []
	for param_name in param:
		temp_data = output_data[:,:,:,np.where(param==param_name)[0][0]]
		param_out.append(temp_data)

	#If the mhgt variable is zero everywhere, then it is likely that data has not been read.
	#In this case, all values are missing, set to zero.
	for t in np.arange(param_out[0].shape[0]):
		if param_out[np.where(param=="mhgt")[0][0]][t].max() == 0:
			for p in np.arange(len(param_out)):
				param_out[p][t] = np.nan

	if issave:
		save_netcdf(region, model, out_name, date_list, lat, lon, param, param_out, \
			out_dtype = "f4", compress=True)

	print(dt.datetime.now() - tot_start)
Beispiel #12
0
def calculate_stability_indicies(
    ds,
    temp_name='temperature',
    td_name='dewpoint_temperature',
    p_name='pressure',
    rh_name='relative_humidity',
    moving_ave_window=0,
):
    """
    Function for calculating stability indices from sounding data.

    Parameters
    ----------
    ds : ACT dataset
        The dataset to compute the stability indicies of. Must have
        temperature, dewpoint, and pressure in vertical coordinates.
    temp_name : str
        The name of the temperature field.
    td_name : str
        The name of the dewpoint field.
    p_name : str
        The name of the pressure field.
    rh_name : str
        The name of the relative humidity field.
    moving_ave_window : int
        Number of points to do a moving average on sounding data to reduce
        noise. This is useful if noise in the sounding is preventing parcel
        ascent.

    Returns
    -------
    ds : ACT dataset
        An ACT dataset with additional stability indicies added.

    """
    if not METPY_AVAILABLE:
        raise ImportError(
            'MetPy need to be installed on your system to ' + 'calculate stability indices'
        )

    t = ds[temp_name]
    td = ds[td_name]
    p = ds[p_name]
    rh = ds[rh_name]

    if not hasattr(t, 'units'):
        raise AttributeError('Temperature field must have units' + ' for ACT to discern!')

    if not hasattr(td, 'units'):
        raise AttributeError('Dewpoint field must have units' + ' for ACT to discern!')

    if not hasattr(p, 'units'):
        raise AttributeError('Pressure field must have units' + ' for ACT to discern!')
    if t.units == 'C':
        t_units = units.degC
    else:
        t_units = getattr(units, t.units)

    if td.units == 'C':
        td_units = units.degC
    else:
        td_units = getattr(units, td.units)

    p_units = getattr(units, p.units)
    rh_units = getattr(units, rh.units)

    # Sort all values by decreasing pressure
    t_sorted = np.array(t.values)
    td_sorted = np.array(td.values)
    p_sorted = np.array(p.values)
    rh_sorted = np.array(rh.values)
    ind_sort = np.argsort(p_sorted)
    t_sorted = t_sorted[ind_sort[-1:0:-1]]
    td_sorted = td_sorted[ind_sort[-1:0:-1]]
    p_sorted = p_sorted[ind_sort[-1:0:-1]]
    rh_sorted = rh_sorted[ind_sort[-1:0:-1]]

    if moving_ave_window > 0:
        t_sorted = np.convolve(t_sorted, np.ones((moving_ave_window,)) / moving_ave_window)
        td_sorted = np.convolve(td_sorted, np.ones((moving_ave_window,)) / moving_ave_window)
        p_sorted = np.convolve(p_sorted, np.ones((moving_ave_window,)) / moving_ave_window)
        rh_sorted = np.convolve(rh_sorted, np.ones((moving_ave_window,)) / moving_ave_window)

    t_sorted = t_sorted * t_units
    td_sorted = td_sorted * td_units
    p_sorted = p_sorted * p_units
    rh_sorted = rh_sorted * rh_units

    # Calculate mixing ratio
    mr = mpcalc.mixing_ratio_from_relative_humidity(p_sorted, t_sorted, rh_sorted)

    # Discussion of issue #361 use virtual temperature.
    vt = mpcalc.virtual_temperature(t_sorted, mr)

    t_profile = mpcalc.parcel_profile(p_sorted, t_sorted[0], td_sorted[0])

    # Calculate parcel trajectory
    ds['parcel_temperature'] = t_profile.magnitude
    ds['parcel_temperature'].attrs['units'] = t_profile.units

    # Calculate CAPE, CIN, LCL
    sbcape, sbcin = mpcalc.surface_based_cape_cin(p_sorted, vt, td_sorted)

    lcl = mpcalc.lcl(p_sorted[0], t_sorted[0], td_sorted[0])
    try:
        lfc = mpcalc.lfc(p_sorted[0], t_sorted[0], td_sorted[0])
    except IndexError:
        lfc = np.nan * p_sorted.units

    mucape, mucin = mpcalc.most_unstable_cape_cin(p_sorted, vt, td_sorted)

    where_500 = np.argmin(np.abs(p_sorted - 500 * units.hPa))
    li = t_sorted[where_500] - t_profile[where_500]

    ds['surface_based_cape'] = sbcape.magnitude
    ds['surface_based_cape'].attrs['units'] = 'J/kg'
    ds['surface_based_cape'].attrs['long_name'] = 'Surface-based CAPE'
    ds['surface_based_cin'] = sbcin.magnitude
    ds['surface_based_cin'].attrs['units'] = 'J/kg'
    ds['surface_based_cin'].attrs['long_name'] = 'Surface-based CIN'
    ds['most_unstable_cape'] = mucape.magnitude
    ds['most_unstable_cape'].attrs['units'] = 'J/kg'
    ds['most_unstable_cape'].attrs['long_name'] = 'Most unstable CAPE'
    ds['most_unstable_cin'] = mucin.magnitude
    ds['most_unstable_cin'].attrs['units'] = 'J/kg'
    ds['most_unstable_cin'].attrs['long_name'] = 'Most unstable CIN'
    ds['lifted_index'] = li.magnitude
    ds['lifted_index'].attrs['units'] = t_profile.units
    ds['lifted_index'].attrs['long_name'] = 'Lifted index'
    ds['level_of_free_convection'] = lfc.magnitude
    ds['level_of_free_convection'].attrs['units'] = lfc.units
    ds['level_of_free_convection'].attrs['long_name'] = 'Level of free convection'
    ds['lifted_condensation_level_temperature'] = lcl[1].magnitude
    ds['lifted_condensation_level_temperature'].attrs['units'] = lcl[1].units
    ds['lifted_condensation_level_temperature'].attrs[
        'long_name'
    ] = 'Lifted condensation level temperature'
    ds['lifted_condensation_level_pressure'] = lcl[0].magnitude
    ds['lifted_condensation_level_pressure'].attrs['units'] = lcl[0].units
    ds['lifted_condensation_level_pressure'].attrs[
        'long_name'
    ] = 'Lifted condensation level pressure'
    return ds
Beispiel #13
0
def main():
	load_start = dt.datetime.now()
	#Try parsing arguments using argparse
	parser = argparse.ArgumentParser(description='wrf non-parallel convective diagnostics processer')
	parser.add_argument("-m",help="Model name",required=True)
	parser.add_argument("-r",help="Region name (default is aus)",default="aus")
	parser.add_argument("-t1",help="Time start YYYYMMDDHH",required=True)
	parser.add_argument("-t2",help="Time end YYYYMMDDHH",required=True)
	parser.add_argument("-e", help="CMIP5 experiment name (not required if using era5, erai or barra)", default="")
	parser.add_argument("--barpa_forcing_mdl", help="BARPA forcing model (erai or ACCESS1-0). Default erai.", default="erai")
	parser.add_argument("--ens", help="CMIP5 ensemble name (not required if using era5, erai or barra)", default="r1i1p1")
	parser.add_argument("--group", help="CMIP6 modelling group name", default="")
	parser.add_argument("--project", help="CMIP6 modelling intercomparison project", default="CMIP")
	parser.add_argument("--ver6hr", help="Version on al33 for 6hr data", default="")
	parser.add_argument("--ver3hr", help="Version on al33 for 3hr data", default="")
	parser.add_argument("--issave",help="Save output (True or False, default is False)", default="False")
	parser.add_argument("--outname",help="Name of saved output. In the form *outname*_*t1*_*t2*.nc. Default behaviour is the model name",default=None)
	parser.add_argument("--is_dcape",help="Should DCAPE be calculated? (1 or 0. Default is 1)",default=1)
	parser.add_argument("--al33",help="Should data be gathered from al33? Default is False, and data is gathered from r87. If True, then group is required",default="False")
	parser.add_argument("--params",help="Should the full set of convective parameters be calculated (full) or just a reduced set (reduced)",default="full")
	args = parser.parse_args()

	#Parse arguments from cmd line and set up inputs (date region model)
	if args.params == "full":
		full_params = True
	else:
		full_params = False
	model = args.m
	region = args.r
	t1 = args.t1
	t2 = args.t2
	issave = args.issave
	al33 = args.al33
	if args.outname==None:
		out_name = model
	else:
		out_name = args.outname
	is_dcape = args.is_dcape
	barpa_forcing_mdl = args.barpa_forcing_mdl
	experiment = args.e
	ensemble = args.ens
	group = args.group
	project = args.project
	ver6hr = args.ver6hr
	ver3hr = args.ver3hr
	if region == "sa_small":
		start_lat = -38; end_lat = -26; start_lon = 132; end_lon = 142
	elif region == "aus":
		start_lat = -44.525; end_lat = -9.975; start_lon = 111.975; end_lon = 156.275
	else:
		raise ValueError("INVALID REGION\n")
	domain = [start_lat,end_lat,start_lon,end_lon]
	try:
		time = [dt.datetime.strptime(t1,"%Y%m%d%H"),dt.datetime.strptime(t2,"%Y%m%d%H")]
	except:
		raise ValueError("INVALID START OR END TIME. SHOULD BE YYYYMMDDHH\n")
	if issave=="True":
		issave = True
	elif issave=="False":
		issave = False
	else:
		raise ValueError("\n INVALID ISSAVE...SHOULD BE True OR False")
	if al33=="True":
		al33 = True
	elif al33=="False":
		al33 = False
	else:
		raise ValueError("\n INVALID al33...SHOULD BE True OR False")

	#Load data
	print("LOADING DATA...")
	if model == "erai":
		ta,temp1,hur,hgt,terrain,p,ps,wap,ua,va,uas,vas,tas,ta2d,\
			cp,wg10,mod_cape,lon,lat,date_list = \
			read_erai(domain,time)
		cp = cp.astype("float32", order="C")
		mod_cape = mod_cape.astype("float32", order="C")
	elif model == "era5":
		ta,temp1,hur,hgt,terrain,p,ps,ua,va,uas,vas,tas,ta2d,\
			cp,wg10,mod_cape,lon,lat,date_list = \
			read_era5(domain,time)
		cp = cp.astype("float32", order="C")
		mod_cape = mod_cape.astype("float32", order="C")
		wap = np.zeros(hgt.shape)
	elif model == "barra":
		ta,temp1,hur,hgt,terrain,p,ps,wap,ua,va,uas,vas,tas,ta2d,wg10,lon,lat,date_list = \
			read_barra(domain,time)
	elif model == "barra_fc":
		ta,temp1,hur,hgt,terrain,p,ps,wap,ua,va,uas,vas,tas,ta2d,wg10,lon,lat,date_list = \
			read_barra_fc(domain,time)
	elif model == "barpa":
		ta,hur,hgt,terrain,p,ps,ua,va,uas,vas,tas,ta2d,wg10,lon,lat,date_list = \
			read_barpa(domain, time, experiment, barpa_forcing_mdl, ensemble)
		wap = np.zeros(hgt.shape)
		temp1 = None
	elif model == "barra_ad":
		wg10,temp2,ta,temp1,hur,hgt,terrain,p,ps,wap,ua,va,uas,vas,tas,ta2d,lon,lat,date_list = \
			read_barra_ad(domain, time, False)
	elif model in ["ACCESS1-0","ACCESS1-3","GFDL-CM3","GFDL-ESM2M","CNRM-CM5","MIROC5",\
		    "MRI-CGCM3","IPSL-CM5A-LR","IPSL-CM5A-MR","GFDL-ESM2G","bcc-csm1-1","MIROC-ESM",\
		    "BNU-ESM"]:
		#Check that t1 and t2 are in the same year
		year = np.arange(int(t1[0:4]), int(t2[0:4])+1)
		ta, hur, hgt, terrain, p_3d, ps, ua, va, uas, vas, tas, ta2d, lon, lat, \
		    date_list = read_cmip(model, experiment, \
		    ensemble, year, domain, cmip_ver=5, al33=al33, group=group, ver6hr=ver6hr, ver3hr=ver3hr)
		wap = np.zeros(hgt.shape)
		wg10 = np.zeros(ps.shape)
		p = np.zeros(p_3d[0,:,0,0].shape)
		#date_list = pd.to_datetime(date_list).to_pydatetime()
		temp1 = None
	elif model in ["ACCESS-ESM1-5", "ACCESS-CM2"]:
		year = np.arange(int(t1[0:4]), int(t2[0:4])+1)
		ta, hur, hgt, terrain, p_3d, ps, ua, va, uas, vas, tas, ta2d, lon, lat, \
		    date_list = read_cmip(model, experiment,\
		    ensemble, year, domain, cmip_ver=6, group=group, project=project)
		wap = np.zeros(hgt.shape)
		wg10 = np.zeros(ps.shape)
		p = np.zeros(p_3d[0,:,0,0].shape)
		#date_list = pd.to_datetime(date_list).to_pydatetime()
		temp1 = None
	else:
		raise ValueError("Model not recognised")
	del temp1
	ta = ta.astype("float32", order="C")
	hur = hur.astype("float32", order="C")
	hgt = hgt.astype("float32", order="C")
	terrain = terrain.astype("float32", order="C")
	p = p.astype("float32", order="C")
	ps = ps.astype("float32", order="C")
	wap = wap.astype("float32", order="C")
	ua = ua.astype("float32", order="C")
	va = va.astype("float32", order="C")
	uas = uas.astype("float32", order="C")
	vas = vas.astype("float32", order="C")
	tas= tas.astype("float32", order="C")
	ta2d = ta2d.astype("float32", order="C")
	wg10 = wg10.astype("float32", order="C")
	lon = lon.astype("float32", order="C")
	lat = lat.astype("float32", order="C")

	gc.collect()

	if full_params:
		param = np.array(["ml_cape", "mu_cape", "sb_cape", "ml_cin", "sb_cin", "mu_cin",\
			"ml_lcl", "mu_lcl", "sb_lcl", "eff_cape", "eff_cin", "eff_lcl",\
			"lr01", "lr03", "lr13", "lr36", "lr24", "lr_freezing","lr_subcloud",\
			"qmean01", "qmean03", "qmean06", \
			"qmeansubcloud", "q_melting", "q1", "q3", "q6",\
			"rhmin01", "rhmin03", "rhmin13", \
			"rhminsubcloud", "tei", "wbz", \
			"mhgt", "mu_el", "ml_el", "sb_el", "eff_el", \
			"pwat", "v_totals", "c_totals", "t_totals", \
			"te_diff", "dpd850", "dpd700", "dcape", "ddraft_temp", "sfc_thetae", \
			\
			"srhe_left", "srh01_left", "srh03_left", "srh06_left", \
			"ebwd", "s010", "s06", "s03", "s01", "s13", "s36", "scld", \
			"U500", "U10", "U1", "U3", "U6", \
			"Ust_left", "Usr01_left",\
			"Usr03_left", "Usr06_left", \
			"Uwindinf", "Umeanwindinf", "Umean800_600", "Umean06", \
			"Umean01", "Umean03", "wg10",\
			\
			"dcp", "stp_cin_left", "stp_fixed_left",\
			"scp", "scp_fixed", "ship",\
			"mlcape*s06", "mucape*s06", "sbcape*s06", "effcape*s06", \
			"dmgwind", "dmgwind_fixed", "hmi", "wmsi_ml",\
			"dmi", "mwpi_ml", "convgust_wet", "convgust_dry", "windex",\
			"gustex", "eff_sherb", "sherb", "mmp", \
			"wndg","mburst","sweat","k_index","wmpi",\
			\
			"F10", "Fn10", "Fs10", "icon10", "vgt10", "conv10", "vo10",\
				])
	else:
		param = np.array(["ml_cape", "mu_cape", "sb_cape", "ml_cin", "sb_cin", "mu_cin",\
			"ml_lcl", "mu_lcl", "sb_lcl", "eff_cape", "eff_cin", "eff_lcl",\
			"lr36", "lr_freezing","lr_subcloud",\
			"qmean01",  \
			"qmeansubcloud",\
			"mhgt", "mu_el", "ml_el", "sb_el", "eff_el", \
			"pwat", "t_totals", \
			"dcape", \
			\
			"srhe_left", "srh03_left", \
			"ebwd", "s06", "s03", \
			"U10", \
			"Umean800_600", "Umean06", \
			"wg10",\
			\
			"dcp", "stp_cin_left", "stp_fixed_left",\
			"scp", "scp_fixed", "ship",\
			"mlcape*s06", "mucape*s06", "sbcape*s06", "effcape*s06", \
			"dmgwind", "dmgwind_fixed", \
			"convgust_wet", "convgust_dry", "windex",\
			"gustex", "mmp", \
			"wndg","sweat","k_index"\
				])


	if model != "era5":
		param = np.concatenate([param, ["omega01", "omega03", "omega06", \
			"maxtevv", "mosh", "moshe"]])
	else:
		param = np.concatenate([param, ["cp"]])
	if model == "erai":
		param = np.concatenate([param, ["cape","cp","cape*s06"]])

	#Set output array
	output_data = np.zeros((ps.shape[0], ps.shape[1], ps.shape[2], len(param)))


	#Assign p levels to a 3d array, with same dimensions as input variables (ta, hgt, etc.)
	#If the 3d p-lvl array already exists, then declare the variable "mdl_lvl" as true. 
	try:
		p_3d;
		mdl_lvl = True
		full_p3d = p_3d
	except:
		mdl_lvl = False
		p_3d = np.moveaxis(np.tile(p,[ta.shape[2],ta.shape[3],1]),[0,1,2],[1,2,0]).\
			astype(np.float32)

	print("LOAD TIME..."+str(dt.datetime.now()-load_start))
	tot_start = dt.datetime.now()
	for t in np.arange(0,ta.shape[0]):
		output = np.zeros((1, ps.shape[1], ps.shape[2], len(param)))
		cape_start = dt.datetime.now()
	
		print(date_list[t])

		if mdl_lvl:
			p_3d = full_p3d[t]

		dp = get_dp(hur=hur[t], ta=ta[t], dp_mask = False)

		#Insert surface arrays, creating new arrays with "sfc" prefix
		sfc_ta = np.insert(ta[t], 0, tas[t], axis=0) 
		sfc_hgt = np.insert(hgt[t], 0, terrain, axis=0) 
		sfc_dp = np.insert(dp, 0, ta2d[t], axis=0) 
		sfc_p_3d = np.insert(p_3d, 0, ps[t], axis=0) 
		sfc_ua = np.insert(ua[t], 0, uas[t], axis=0) 
		sfc_va = np.insert(va[t], 0, vas[t], axis=0) 
		sfc_wap = np.insert(wap[t], 0, np.zeros(vas[t].shape), axis=0) 

		#Sort by ascending p
		a,temp1,temp2 = np.meshgrid(np.arange(sfc_p_3d.shape[0]) , np.arange(sfc_p_3d.shape[1]),\
			 np.arange(sfc_p_3d.shape[2]))
		sort_inds = np.flip(np.lexsort([np.swapaxes(a,1,0),sfc_p_3d],axis=0), axis=0)
		sfc_hgt = np.take_along_axis(sfc_hgt, sort_inds, axis=0)
		sfc_dp = np.take_along_axis(sfc_dp, sort_inds, axis=0)
		sfc_p_3d = np.take_along_axis(sfc_p_3d, sort_inds, axis=0)
		sfc_ua = np.take_along_axis(sfc_ua, sort_inds, axis=0)
		sfc_va = np.take_along_axis(sfc_va, sort_inds, axis=0)
		sfc_ta = np.take_along_axis(sfc_ta, sort_inds, axis=0)

		#Calculate q and wet bulb for pressure level arrays with surface values
		sfc_ta_unit = units.units.degC*sfc_ta
		sfc_dp_unit = units.units.degC*sfc_dp
		sfc_p_unit = units.units.hectopascals*sfc_p_3d
		sfc_hur_unit = mpcalc.relative_humidity_from_dewpoint(sfc_ta_unit, sfc_dp_unit)*\
			100*units.units.percent
		sfc_q_unit = mpcalc.mixing_ratio_from_relative_humidity(sfc_hur_unit,\
			sfc_ta_unit,sfc_p_unit)
		sfc_theta_unit = mpcalc.potential_temperature(sfc_p_unit,sfc_ta_unit)
		sfc_thetae_unit = mpcalc.equivalent_potential_temperature(sfc_p_unit,sfc_ta_unit,sfc_dp_unit)
		sfc_q = np.array(sfc_q_unit)
		sfc_hur = np.array(sfc_hur_unit)
		sfc_wb = np.array(wrf.wetbulb( sfc_p_3d*100, sfc_ta+273.15, sfc_q, units="degC"))

		#Calculate mixed-layer parcel indices, based on avg sfc-100 hPa AGL layer parcel.
		#First, find avg values for ta, p, hgt and q for ML (between the surface
		# and 100 hPa AGL)
		ml_inds = ((sfc_p_3d <= ps[t]) & (sfc_p_3d >= (ps[t] - 100)))
		ml_p3d_avg = ( np.ma.masked_where(~ml_inds, sfc_p_3d).min(axis=0) + np.ma.masked_where(~ml_inds, sfc_p_3d).max(axis=0) ) / 2.
		ml_hgt_avg = ( np.ma.masked_where(~ml_inds, sfc_hgt).min(axis=0) + np.ma.masked_where(~ml_inds, sfc_hgt).max(axis=0) ) / 2.
		ml_ta_avg = trapz_int3d(sfc_ta, sfc_p_3d, ml_inds ).astype(np.float32)
		ml_q_avg = trapz_int3d(sfc_q, sfc_p_3d, ml_inds ).astype(np.float32)

		#Insert the mean values into the bottom of the 3d arrays pressure-level arrays
		ml_ta_arr = np.insert(sfc_ta,0,ml_ta_avg,axis=0)
		ml_q_arr = np.insert(sfc_q,0,ml_q_avg,axis=0)
		ml_hgt_arr = np.insert(sfc_hgt,0,ml_hgt_avg,axis=0)
		ml_p3d_arr = np.insert(sfc_p_3d,0,ml_p3d_avg,axis=0)
		#Sort by ascending p
		a,temp1,temp2 = np.meshgrid(np.arange(ml_p3d_arr.shape[0]) ,\
			 np.arange(ml_p3d_arr.shape[1]), np.arange(ml_p3d_arr.shape[2]))
		sort_inds = np.flipud(np.lexsort([np.swapaxes(a,1,0),ml_p3d_arr],axis=0))
		ml_ta_arr = np.take_along_axis(ml_ta_arr, sort_inds, axis=0)
		ml_p3d_arr = np.take_along_axis(ml_p3d_arr, sort_inds, axis=0)
		ml_hgt_arr = np.take_along_axis(ml_hgt_arr, sort_inds, axis=0)
		ml_q_arr = np.take_along_axis(ml_q_arr, sort_inds, axis=0)
		#Calculate CAPE using wrf-python. 
		cape3d_mlavg = wrf.cape_3d(ml_p3d_arr.astype(np.float64),\
			(ml_ta_arr + 273.15).astype(np.float64),\
			ml_q_arr.astype(np.float64),\
			ml_hgt_arr.astype(np.float64),terrain.astype(np.float64),\
			ps[t].astype(np.float64),False,meta=False, missing=0)
		ml_cape = np.ma.masked_where(~((ml_ta_arr==ml_ta_avg) & (ml_p3d_arr==ml_p3d_avg)),\
			cape3d_mlavg.data[0]).max(axis=0).filled(0)
		ml_cin = np.ma.masked_where(~((ml_ta_arr==ml_ta_avg) & (ml_p3d_arr==ml_p3d_avg)),\
			cape3d_mlavg.data[1]).max(axis=0).filled(0)
		ml_lfc = np.ma.masked_where(~((ml_ta_arr==ml_ta_avg) & (ml_p3d_arr==ml_p3d_avg)),\
			cape3d_mlavg.data[2]).max(axis=0).filled(0)
		ml_lcl = np.ma.masked_where(~((ml_ta_arr==ml_ta_avg) & (ml_p3d_arr==ml_p3d_avg)),\
			cape3d_mlavg.data[3]).max(axis=0).filled(0)
		ml_el = np.ma.masked_where(~((ml_ta_arr==ml_ta_avg) & (ml_p3d_arr==ml_p3d_avg)),\
			cape3d_mlavg.data[4]).max(axis=0).filled(0)

		#Now get most-unstable CAPE (max CAPE in vertical, ensuring parcels used are AGL)
		cape3d = wrf.cape_3d(sfc_p_3d,sfc_ta+273.15,\
				sfc_q,sfc_hgt,\
				terrain,ps[t],\
				True,meta=False, missing=0)
		cape = cape3d.data[0]
		cin = cape3d.data[1]
		lfc = cape3d.data[2]
		lcl = cape3d.data[3]
		el = cape3d.data[4]
		#Mask values which are below the surface and above 500 hPa AGL
		cape[(sfc_p_3d > ps[t]) | (sfc_p_3d<(ps[t]-500))] = np.nan
		cin[(sfc_p_3d > ps[t]) | (sfc_p_3d<(ps[t]-500))] = np.nan
		lfc[(sfc_p_3d > ps[t]) | (sfc_p_3d<(ps[t]-500))] = np.nan
		lcl[(sfc_p_3d > ps[t]) | (sfc_p_3d<(ps[t]-500))] = np.nan
		el[(sfc_p_3d > ps[t]) | (sfc_p_3d<(ps[t]-500))] = np.nan
		#Get maximum (in the vertical), and get cin, lfc, lcl for the same parcel
		mu_cape_inds = np.tile(np.nanargmax(cape,axis=0), (cape.shape[0],1,1))
		mu_cape = np.take_along_axis(cape, mu_cape_inds, 0)[0]
		mu_cin = np.take_along_axis(cin, mu_cape_inds, 0)[0]
		mu_lfc = np.take_along_axis(lfc, mu_cape_inds, 0)[0]
		mu_lcl = np.take_along_axis(lcl, mu_cape_inds, 0)[0]
		mu_el = np.take_along_axis(el, mu_cape_inds, 0)[0]
		muq = np.take_along_axis(sfc_q, mu_cape_inds, 0)[0]

		#Now get surface based CAPE. Simply the CAPE defined by parcel 
		#with surface properties
		sb_cape = np.ma.masked_where(~((sfc_p_3d==ps[t])),\
			cape).max(axis=0).filled(0)
		sb_cin = np.ma.masked_where(~((sfc_p_3d==ps[t])),\
			cin).max(axis=0).filled(0)
		sb_lfc = np.ma.masked_where(~((sfc_p_3d==ps[t])),\
			lfc).max(axis=0).filled(0)
		sb_lcl = np.ma.masked_where(~((sfc_p_3d==ps[t])),\
			lcl).max(axis=0).filled(0)
		sb_el = np.ma.masked_where(~((sfc_p_3d==ps[t])),\
			el).max(axis=0).filled(0)

		#Now get the effective-inflow layer parcel CAPE. Layer defined as a parcel with
		# the mass-wegithted average conditions of the inflow layer; the layer 
		# between when the profile has CAPE > 100 and cin < 250.
		#If no effective layer, effective layer CAPE is zero.
		#Only levels below 500 hPa AGL are considered

		#EDITS (23/01/2020)
		#Do not get surface-based values when eff_cape is not defined. Just leave as zero.
		#If an effective layer is only one level, the pacel is defined with quantities at 
		# that level. Previously, quantites were defined as zero, becuase of the averaging 
		# routine (i.e. bc pressure difference between the top of the effective layer and the 
		# bottom is zero). I assume this would result in zero CAPE (given q would be zero)
		eff_cape, eff_cin, eff_lfc, eff_lcl, eff_el, eff_hgt, eff_avg_hgt = get_eff_cape(\
			cape, cin, sfc_p_3d, sfc_ta, sfc_hgt, sfc_q, ps[t], terrain)
		eff_cape = np.where(np.isnan(eff_cape), 0, eff_cape)
		eff_cin = np.where(np.isnan(eff_cin), 0, eff_cin)
		eff_lfc = np.where(np.isnan(eff_lfc), 0, eff_lfc)
		eff_lcl = np.where(np.isnan(eff_lcl), 0, eff_lcl)
		eff_el = np.where(np.isnan(eff_el), 0, eff_el)

		#Calculate other parameters
		#Thermo
		thermo_start = dt.datetime.now()
		lr01 = get_lr_hgt(sfc_ta,np.copy(sfc_hgt),0,1000,terrain)
		lr03 = get_lr_hgt(sfc_ta,np.copy(sfc_hgt),0,3000,terrain)
		lr13 = get_lr_hgt(sfc_ta,np.copy(sfc_hgt),1000,3000,terrain)
		lr24 = get_lr_hgt(sfc_ta,np.copy(sfc_hgt),2000,4000,terrain)
		lr36 = get_lr_hgt(sfc_ta,np.copy(sfc_hgt),3000,6000,terrain)
		lr_freezing = get_lr_hgt(sfc_ta,np.copy(sfc_hgt),0,"freezing",terrain)
		lr_subcloud = get_lr_hgt(sfc_ta,np.copy(sfc_hgt),0,ml_lcl,terrain)
		lr850_670 = get_lr_p(ta[t], p_3d, hgt[t], 850, 670)
		lr750_500 = get_lr_p(ta[t], p_3d, hgt[t], 750, 500)
		lr700_500 = get_lr_p(ta[t], p_3d, hgt[t], 700, 500)
		melting_hgt = get_t_hgt(sfc_ta,np.copy(sfc_hgt),0,terrain)
		hwb0 = get_var_hgt(np.flipud(sfc_wb),np.flipud(np.copy(sfc_hgt)),0,terrain)
		rhmean01 = get_mean_var_hgt(np.copy(sfc_hur),np.copy(sfc_hgt),0,1000,terrain,True,np.copy(sfc_p_3d))
		rhmean03 = get_mean_var_hgt(np.copy(sfc_hur),np.copy(sfc_hgt),0,3000,terrain,True,np.copy(sfc_p_3d))
		rhmean06 = get_mean_var_hgt(np.copy(sfc_hur),np.copy(sfc_hgt),0,6000,terrain,True,np.copy(sfc_p_3d))
		rhmean13 = get_mean_var_hgt(np.copy(sfc_hur),np.copy(sfc_hgt),1000,3000,terrain,True,np.copy(sfc_p_3d))
		rhmean36 = get_mean_var_hgt(np.copy(sfc_hur),np.copy(sfc_hgt),3000,6000,terrain,True,np.copy(sfc_p_3d))
		rhmeansubcloud = get_mean_var_hgt(np.copy(sfc_hur),np.copy(sfc_hgt),0,ml_lcl,terrain,True,np.copy(sfc_p_3d))
		qmean01 = get_mean_var_hgt(np.copy(sfc_q),np.copy(sfc_hgt),0,1000,terrain,True,np.copy(sfc_p_3d)) * 1000
		qmean03 = get_mean_var_hgt(np.copy(sfc_q),np.copy(sfc_hgt),0,3000,terrain,True,np.copy(sfc_p_3d)) * 1000
		qmean06 = get_mean_var_hgt(np.copy(sfc_q),np.copy(sfc_hgt),0,6000,terrain,True,np.copy(sfc_p_3d)) * 1000
		qmean13 = get_mean_var_hgt(np.copy(sfc_q),np.copy(sfc_hgt),1000,3000,terrain,True,np.copy(sfc_p_3d)) * 1000
		qmean36 = get_mean_var_hgt(np.copy(sfc_q),np.copy(sfc_hgt),3000,6000,terrain,True,np.copy(sfc_p_3d)) * 1000
		qmeansubcloud = get_mean_var_hgt(np.copy(sfc_q),np.copy(sfc_hgt),0,ml_lcl,terrain,True,np.copy(sfc_p_3d)) * 1000
		q_melting = get_var_hgt_lvl(np.copy(sfc_q), np.copy(sfc_hgt), melting_hgt, terrain) * 1000
		q1 = get_var_hgt_lvl(np.copy(sfc_q), np.copy(sfc_hgt), 1000, terrain) * 1000
		q3 = get_var_hgt_lvl(np.copy(sfc_q), np.copy(sfc_hgt), 3000, terrain) * 1000
		q6 = get_var_hgt_lvl(np.copy(sfc_q), np.copy(sfc_hgt), 6000, terrain) * 1000
		sfc_thetae = get_var_hgt_lvl(np.array(sfc_thetae_unit), np.copy(sfc_hgt), 0, terrain)
		rhmin01 = get_min_var_hgt(np.copy(sfc_hur), np.copy(sfc_hgt), 0, 1000, terrain)
		rhmin03 = get_min_var_hgt(np.copy(sfc_hur), np.copy(sfc_hgt), 0, 3000, terrain)
		rhmin06 = get_min_var_hgt(np.copy(sfc_hur), np.copy(sfc_hgt), 0, 6000, terrain)
		rhmin13 = get_min_var_hgt(np.copy(sfc_hur), np.copy(sfc_hgt), 1000, 3000, terrain)
		rhmin36 = get_min_var_hgt(np.copy(sfc_hur), np.copy(sfc_hgt), 3000, 6000, terrain)
		rhminsubcloud = get_min_var_hgt(np.copy(sfc_hur), np.copy(sfc_hgt), 0, ml_lcl, terrain)
		v_totals = get_var_p_lvl(np.copy(sfc_ta), sfc_p_3d, 850) - \
				get_var_p_lvl(np.copy(sfc_ta), sfc_p_3d, 500)
		c_totals = get_var_p_lvl(np.copy(sfc_dp), sfc_p_3d, 850) - \
				get_var_p_lvl(np.copy(sfc_ta), sfc_p_3d, 500)
		t_totals = v_totals + c_totals
		pwat = get_pwat(sfc_q, np.copy(sfc_p_3d))
		if model != "era5":
			maxtevv = maxtevv_fn(np.array(sfc_thetae_unit), np.copy(sfc_wap), np.copy(sfc_hgt), terrain)
		te_diff = thetae_diff(np.array(sfc_thetae_unit), np.copy(sfc_hgt), terrain)
		tei = tei_fn(np.array(sfc_thetae_unit), sfc_p_3d, ps[t], np.copy(sfc_hgt), terrain)
		dpd850 = get_var_p_lvl(np.copy(sfc_ta), sfc_p_3d, 850) - \
				get_var_p_lvl(np.copy(sfc_dp), sfc_p_3d, 850)
		dpd700 = get_var_p_lvl(np.copy(sfc_ta), sfc_p_3d, 700) - \
				get_var_p_lvl(np.copy(sfc_dp), sfc_p_3d, 700)
		dpd670 = get_var_p_lvl(np.copy(sfc_ta), sfc_p_3d, 670) - \
				get_var_p_lvl(np.copy(sfc_dp), sfc_p_3d, 670)
		dpd500 = get_var_p_lvl(np.copy(sfc_ta), sfc_p_3d, 500) - \
				get_var_p_lvl(np.copy(sfc_dp), sfc_p_3d, 500)
		if (int(is_dcape) == 1) & (ps[t].max() > 0):
			#Define DCAPE as the area between the moist adiabat of a descending parcel 
			# and the environmental temperature (w/o virtual temperature correction). 
			#Starting parcel chosen by the pressure level with minimum thetae below 
			# 400 hPa AGL

			if mdl_lvl:
				sfc_thetae300 = np.copy(sfc_thetae_unit)
				sfc_thetae300[(ps[t] - sfc_p_3d) > 400] = np.nan 
				sfc_thetae300[(sfc_p_3d > ps[t])] = np.nan 
				dcape, ddraft_temp = get_dcape( sfc_p_3d, sfc_ta, sfc_q, sfc_hgt,\
					ps[t], p_lvl=False, \
					minthetae_inds=np.argmin(sfc_thetae300, axis=0))

			else:
				#Get 3d DCAPE for every point below 300 hPa, and then mask points above 400 hPa AGL
				#For each lat/lon point, calculate the minimum thetae, and use
				# DCAPE for that point
				dcape, ddraft_temp = get_dcape(\
							np.array(sfc_p_3d[np.concatenate([[1100], \
								p]) >= 300]), \
							sfc_ta[np.concatenate([[1100], p]) >= 300], \
							sfc_q[np.concatenate([[1100], p]) >= 300], \
							sfc_hgt[np.concatenate([[1100], p]) >= 300], \
							ps[t], p=np.array(p[p>=300]))
				sfc_thetae300 = np.array(sfc_thetae_unit[np.concatenate([[1100], \
					p]) >= 300])
				sfc_p300 = sfc_p_3d[np.concatenate([[1100], p]) >= 300]
				sfc_thetae300[(ps[t] - sfc_p300) > 400] = np.nan 
				sfc_thetae300[(sfc_p300 > ps[t])] = np.nan 
				dcape_inds = np.tile(np.nanargmin(sfc_thetae300, axis=0), \
					    (sfc_thetae300.shape[0],1,1) )
				dcape = np.take_along_axis(dcape, dcape_inds, 0)[0]
				ddraft_temp = tas[t] - \
					np.take_along_axis(ddraft_temp, dcape_inds, 0)[0]

				ddraft_temp[(ddraft_temp<0) | (np.isnan(ddraft_temp))] = 0
		else:
			ddraft_temp = np.zeros(dpd500.shape)
			dcape = np.zeros(dpd500.shape)
		#Winds
		winds_start = dt.datetime.now()
		umeanwindinf = get_mean_var_hgt(sfc_ua, np.copy(sfc_hgt), np.nanmin(eff_hgt,axis=0), \
					np.nanmax(eff_hgt,axis=0),0,False,sfc_p_3d)
		vmeanwindinf = get_mean_var_hgt(sfc_va, np.copy(sfc_hgt), np.nanmin(eff_hgt,axis=0),\
					np.nanmax(eff_hgt,axis=0),0,False,sfc_p_3d)
		umean01 = get_mean_var_hgt(sfc_ua, np.copy(sfc_hgt), 0, 1000, terrain, mass_weighted=True, p3d=np.copy(sfc_p_3d))
		vmean01 = get_mean_var_hgt(sfc_va, np.copy(sfc_hgt), 0, 1000, terrain, mass_weighted=True, p3d=np.copy(sfc_p_3d))
		umean03 = get_mean_var_hgt(sfc_ua, np.copy(sfc_hgt), 0, 3000, terrain, mass_weighted=True, p3d=np.copy(sfc_p_3d))
		vmean03 = get_mean_var_hgt(sfc_va, np.copy(sfc_hgt), 0, 3000, terrain, mass_weighted=True, p3d=np.copy(sfc_p_3d))
		umean06 = get_mean_var_hgt(sfc_ua, np.copy(sfc_hgt), 0, 6000, terrain, mass_weighted=True, p3d=np.copy(sfc_p_3d))
		vmean06 = get_mean_var_hgt(sfc_va, np.copy(sfc_hgt), 0, 6000, terrain, mass_weighted=True, p3d=np.copy(sfc_p_3d))
		umean800_600 = get_mean_var_p(ua[t], p_3d, 800, 600, ps[t], mass_weighted=True)
		vmean800_600 = get_mean_var_p(va[t], p_3d, 800, 600, ps[t], mass_weighted=True)
		Umeanwindinf = np.sqrt( (umeanwindinf**2) + (vmeanwindinf**2) )
		Umean01 = np.sqrt( (umean01**2) + (vmean01**2) )
		Umean03 = np.sqrt( (umean03**2) + (vmean03**2) )
		Umean06 = np.sqrt( (umean06**2) + (vmean06**2) )
		Umean800_600 = np.sqrt( (umean800_600**2) + (vmean800_600**2) )
		uwindinf = get_var_hgt_lvl(sfc_ua, np.copy(sfc_hgt), eff_avg_hgt, terrain)
		vwindinf = get_var_hgt_lvl(sfc_va, np.copy(sfc_hgt), eff_avg_hgt, terrain)
		u10 = get_var_hgt_lvl(sfc_ua, np.copy(sfc_hgt), 10, terrain)
		v10 = get_var_hgt_lvl(sfc_va, np.copy(sfc_hgt), 10, terrain)
		u500 = get_var_p_lvl(np.copy(sfc_ua), sfc_p_3d, 500)
		v500 = get_var_p_lvl(np.copy(sfc_va), sfc_p_3d, 500)
		u1 = get_var_hgt_lvl(sfc_ua, np.copy(sfc_hgt), 1000, terrain) 
		v1 = get_var_hgt_lvl(sfc_va, np.copy(sfc_hgt), 1000, terrain) 
		u3 = get_var_hgt_lvl(sfc_ua, np.copy(sfc_hgt), 3000, terrain) 
		v3 = get_var_hgt_lvl(sfc_va, np.copy(sfc_hgt), 3000, terrain) 
		u6 = get_var_hgt_lvl(sfc_ua, np.copy(sfc_hgt), 6000, terrain) 
		v6 = get_var_hgt_lvl(sfc_va, np.copy(sfc_hgt), 6000, terrain) 
		Uwindinf = np.sqrt( (uwindinf**2) + (vwindinf**2) )
		U500 = np.sqrt( (u500**2) + (v500**2) )
		U10 = np.sqrt( (u10**2) + (v10**2) )
		U1 = np.sqrt( (u1**2) + (v1**2) )
		U3 = np.sqrt( (u3**2) + (v3**2) )
		U6 = np.sqrt( (u6**2) + (v6**2) )
		scld = get_shear_hgt(sfc_ua, sfc_va, np.copy(sfc_hgt), ml_lcl, 0.5*mu_el, terrain)
		s01 = get_shear_hgt(sfc_ua, sfc_va, np.copy(sfc_hgt), 0, 1000, terrain)
		s03 = get_shear_hgt(sfc_ua, sfc_va, np.copy(sfc_hgt), 0, 3000, terrain)
		s06 = get_shear_hgt(sfc_ua, sfc_va, np.copy(sfc_hgt), 0, 6000, terrain)
		s010 = get_shear_hgt(sfc_ua, sfc_va, np.copy(sfc_hgt), 0, 10000, terrain)
		s13 = get_shear_hgt(sfc_ua, sfc_va, np.copy(sfc_hgt), 1000, 3000, terrain)
		s36 = get_shear_hgt(sfc_ua, sfc_va, np.copy(sfc_hgt), 3000, 6000, terrain)
		ebwd = get_shear_hgt(sfc_ua, sfc_va, np.copy(sfc_hgt), np.nanmin(eff_hgt,axis=0),\
					(mu_el * 0.5), terrain)
		srh01_left, srh01_right = get_srh(sfc_ua, sfc_va, np.copy(sfc_hgt), 0, 1000, terrain)
		srh03_left, srh03_right = get_srh(sfc_ua, sfc_va, np.copy(sfc_hgt), 0, 3000, terrain)
		srh06_left, srh06_right = get_srh(sfc_ua, sfc_va, np.copy(sfc_hgt), 0, 6000, terrain)
		srhe_left, srhe_right = get_srh(sfc_ua, sfc_va, np.copy(sfc_hgt), \
						np.nanmin(eff_hgt,axis=0), np.nanmax(eff_hgt,axis=0), terrain)
		ust_right, vst_right, ust_left, vst_left = \
			get_storm_motion(sfc_ua, sfc_va, np.copy(sfc_hgt), terrain)
		sru01_right = umean01 - ust_right
		srv01_right = vmean01 - vst_right
		sru03_right = umean03 - ust_right
		srv03_right = vmean03 - vst_right
		sru06_right = umean06 - ust_right
		srv06_right = vmean06 - vst_right
		sru01_left = umean01 - ust_left
		srv01_left = vmean01 - vst_left
		sru03_left = umean03 - ust_left
		srv03_left = vmean03 - vst_left
		sru06_left = umean06 - ust_left
		srv06_left = vmean06 - vst_left
		Ust_right = np.sqrt( ust_right**2 + vst_right**2)
		Ust_left = np.sqrt( ust_left**2 + vst_left**2)
		Usr01_right = np.sqrt( sru01_right**2 + srv01_right**2)
		Usr03_right = np.sqrt( sru03_right**2 + srv03_right**2)
		Usr06_right = np.sqrt( sru06_right**2 + srv06_right**2)
		Usr01_left = np.sqrt( sru01_left**2 + srv01_left**2)
		Usr03_left = np.sqrt( sru03_left**2 + srv03_left**2)
		Usr06_left = np.sqrt( sru06_left**2 + srv06_left**2)
		if model != "era5":
			omega01 = get_mean_var_hgt(wap[t], hgt[t], 0, 1000, terrain, True, np.copy(p_3d))
			omega03 = get_mean_var_hgt(wap[t], hgt[t], 0, 3000, terrain, True, np.copy(p_3d))
			omega06 = get_mean_var_hgt(wap[t], hgt[t], 0, 6000, terrain, True, np.copy(p_3d))
		#Kinematic
		kinematic_start = dt.datetime.now()
		x, y = np.meshgrid(lon,lat)
		dx, dy = mpcalc.lat_lon_grid_deltas(x,y)
		thetae10 = get_var_hgt_lvl(np.array(sfc_thetae_unit), np.copy(sfc_hgt), 10, terrain)
		thetae01 = get_mean_var_hgt(np.array(sfc_thetae_unit), np.copy(sfc_hgt), 0, 1000, terrain, True, np.copy(sfc_p_3d))
		thetae03 = get_mean_var_hgt(np.array(sfc_thetae_unit), np.copy(sfc_hgt), 0, 3000, terrain, True, np.copy(sfc_p_3d))
		F10, Fn10, Fs10, icon10, vgt10, conv10, vo10 = \
				kinematics(u10, v10, thetae10, dx, dy, y)
		F01, Fn01, Fs01, icon01, vgt01, conv01, vo01 = \
				kinematics(umean01, vmean01, thetae01, dx, dy, y)
		F03, Fn03, Fs03, icon03, vgt03, conv03, vo03 = \
				kinematics(umean03, vmean03, thetae03, dx, dy, y)
		#Composites
		Rq = qmean01 / 12.
		windex = 5. * np.power( (melting_hgt/1000.) * Rq * (np.power( lr_freezing,2) - 30. + \
				qmean01 - 2. * q_melting), 0.5)
		windex[np.isnan(windex)] = 0
		gustex = (0.5 * windex) + (0.5 * Umean06)
		hmi = lr850_670 + dpd850 - dpd670
		wmsi_ml = (ml_cape * te_diff) / 1000
		dmi = lr750_500 + dpd700 - dpd500
		mwpi_ml = (ml_cape / 100.) + (lr850_670 + dpd850 - dpd670)
		wmpi = np.sqrt( np.power(melting_hgt,2) * (lr_freezing / 1000. - 5.5e-3) + \
				melting_hgt * (q1 - 1.5*q_melting) / 3.) /5.
		dmi[dmi<0] = 0
		hmi[hmi<0] = 0
		wmsi_ml[wmsi_ml<0] = 0
		mwpi_ml[wmsi_ml<0] = 0
		stp_fixed_left, stp_cin_left = get_tornado_pot( np.copy(ml_cin), np.copy(ml_lcl)\
					, np.copy(sb_lcl), np.copy(s06), np.copy(ebwd), \
					np.copy(sb_cape), np.copy(ml_cape), np.copy(srh01_left), \
					np.copy(srhe_left))		
		if model != "era5":
			moshe = ((lr03 - 4.)/4.) * ((s01 - 8)/10.) * \
				((ebwd - 8)/10.) * ((maxtevv + 10.)/9.)
			moshe[moshe<0] = 0
			mosh = ((lr03 - 4.)/4.) * ((s01 - 8)/10.) * ((maxtevv + 10.)/9.)
			mosh[mosh<0] = 0
		ship = get_ship(np.copy(mu_cape), np.copy(muq), np.copy(s06), np.copy(lr700_500), \
				get_var_p_lvl(sfc_ta, sfc_p_3d, 500), np.copy(melting_hgt) )
		scp, scp_fixed = get_supercell_pot(mu_cape, np.copy(srhe_left), np.copy(srh01_left), np.copy(ebwd),\
					np.copy(s06) )
		sherb, eff_sherb = get_sherb(np.copy(s03), np.copy(ebwd), np.copy(lr03), np.copy(lr700_500))
		k_index = get_var_p_lvl(np.copy(sfc_ta), sfc_p_3d, 850) \
			- get_var_p_lvl(np.copy(sfc_ta), sfc_p_3d, 500) \
			+ get_var_p_lvl(np.copy(sfc_dp), sfc_p_3d, 850) - (dpd700)
		k_index[k_index<0] = 0
		mlcs6 = ml_cape * np.power(s06, 1.67)
		mucs6 = mu_cape * np.power(s06, 1.67)
		sbcs6 = sb_cape * np.power(s06, 1.67)
		effcs6 = eff_cape * np.power(s06, 1.67)
		if model == "erai":
			cs6 = mod_cape[t] * np.power(s06, 1.67)
		wndg = get_wndg(np.copy(ml_cape), np.copy(ml_cin), np.copy(lr03), sfc_ua, sfc_va, np.copy(sfc_hgt), terrain,\
			np.copy(sfc_p_3d))
		sweat = get_sweat(np.copy(sfc_p_3d), np.copy(sfc_dp), np.copy(t_totals), sfc_ua, sfc_va)
		mmp = get_mmp(sfc_ua, sfc_va, np.copy(mu_cape), sfc_ta, np.copy(sfc_hgt), terrain, np.copy(sfc_p_3d))
		dmgwind = (dcape/800.) * (Uwindinf / 8.)
		dmgwind_fixed = (dcape/800.) * (Umean800_600 / 8.)
		mburst = get_mburst(np.copy(sb_cape), np.copy(lr03), np.copy(v_totals), \
				np.copy(dcape), np.copy(pwat), np.copy(tei), \
				np.array(sfc_thetae_unit), \
				np.copy(sfc_hgt), terrain)
		mburst[mburst<0] = 0
		convgust_wet = np.sqrt( (Umean800_600**2) + (np.sqrt(2*dcape))**2 )
		convgust_dry = np.sqrt( (Umean800_600**2) + (np.sqrt(dcape))**2 )
		dcp = (dcape / 980.) * (mu_cape / 2000.) * (s06 / 20.) * (Umean06 / 16.)
	
		#Fill output
		output = fill_output(output, t, param, ps, "ml_cape", ml_cape)
		output = fill_output(output, t, param, ps, "mu_cape", mu_cape)
		output = fill_output(output, t, param, ps, "eff_cape", eff_cape)
		output = fill_output(output, t, param, ps, "sb_cape", sb_cape)
		output = fill_output(output, t, param, ps, "ml_cin", ml_cin)
		output = fill_output(output, t, param, ps, "mu_cin", mu_cin)
		output = fill_output(output, t, param, ps, "eff_cin", eff_cin)
		output = fill_output(output, t, param, ps, "sb_cin", sb_cin)
		output = fill_output(output, t, param, ps, "ml_lcl", ml_lcl)
		output = fill_output(output, t, param, ps, "mu_lcl", mu_lcl)
		output = fill_output(output, t, param, ps, "eff_lcl", eff_lcl)
		output = fill_output(output, t, param, ps, "sb_lcl", sb_lcl)
		output = fill_output(output, t, param, ps, "ml_el", ml_el)
		output = fill_output(output, t, param, ps, "mu_el", mu_el)
		output = fill_output(output, t, param, ps, "eff_el", eff_el)
		output = fill_output(output, t, param, ps, "sb_el", sb_el)
		output = fill_output(output, t, param, ps, "lr36", lr36)
		output = fill_output(output, t, param, ps, "lr_freezing", lr_freezing)
		output = fill_output(output, t, param, ps, "lr_subcloud", lr_subcloud)
		output = fill_output(output, t, param, ps, "qmean01", qmean01)
		output = fill_output(output, t, param, ps, "qmeansubcloud", qmeansubcloud)
		output = fill_output(output, t, param, ps, "mhgt", melting_hgt)
		output = fill_output(output, t, param, ps, "pwat", pwat)
		output = fill_output(output, t, param, ps, "dcape", dcape)
		output = fill_output(output, t, param, ps, "srh03_left", srh03_left)
		output = fill_output(output, t, param, ps, "srhe_left", srhe_left)
		output = fill_output(output, t, param, ps, "s03", s03)
		output = fill_output(output, t, param, ps, "s06", s06)
		output = fill_output(output, t, param, ps, "ebwd", ebwd)
		output = fill_output(output, t, param, ps, "Umean06", Umean06)
		output = fill_output(output, t, param, ps, "Umean800_600", Umean800_600)
		output = fill_output(output, t, param, ps, "wg10", wg10[t])
		output = fill_output(output, t, param, ps, "U10", U10)
		output = fill_output(output, t, param, ps, "stp_cin_left", stp_cin_left)
		output = fill_output(output, t, param, ps, "stp_fixed_left", stp_fixed_left)
		output = fill_output(output, t, param, ps, "windex", windex)
		output = fill_output(output, t, param, ps, "gustex", gustex)
		output = fill_output(output, t, param, ps, "ship", ship)
		output = fill_output(output, t, param, ps, "scp", scp)
		output = fill_output(output, t, param, ps, "scp_fixed", scp_fixed)
		output = fill_output(output, t, param, ps, "k_index", k_index)
		output = fill_output(output, t, param, ps, "mlcape*s06", mlcs6)
		output = fill_output(output, t, param, ps, "mucape*s06", mucs6)
		output = fill_output(output, t, param, ps, "sbcape*s06", sbcs6)
		output = fill_output(output, t, param, ps, "effcape*s06", effcs6)
		output = fill_output(output, t, param, ps, "wndg", wndg)
		output = fill_output(output, t, param, ps, "sweat", sweat)
		output = fill_output(output, t, param, ps, "mmp", mmp)
		output = fill_output(output, t, param, ps, "convgust_wet", convgust_wet)
		output = fill_output(output, t, param, ps, "convgust_dry", convgust_dry)
		output = fill_output(output, t, param, ps, "dcp", dcp)
		output = fill_output(output, t, param, ps, "dmgwind", dmgwind)
		output = fill_output(output, t, param, ps, "dmgwind_fixed", dmgwind_fixed)
		output = fill_output(output, t, param, ps, "t_totals", t_totals)
	    
		if full_params:
			if model == "erai":
				output = fill_output(output, t, param, ps, "cape*s06", cs6)
			if (model == "erai") | (model == "era5"):
				output = fill_output(output, t, param, ps, "cp", cp[t])
			if model == "erai":
				output = fill_output(output, t, param, ps, "cape", mod_cape[t])

			output = fill_output(output, t, param, ps, "lr01", lr01)
			output = fill_output(output, t, param, ps, "lr03", lr03)
			output = fill_output(output, t, param, ps, "lr13", lr13)
			output = fill_output(output, t, param, ps, "lr24", lr24)
			output = fill_output(output, t, param, ps, "wbz", hwb0)
			output = fill_output(output, t, param, ps, "qmean03", qmean03)
			output = fill_output(output, t, param, ps, "qmean06", qmean06)
			output = fill_output(output, t, param, ps, "q_melting", q_melting)
			output = fill_output(output, t, param, ps, "q1", q1)
			output = fill_output(output, t, param, ps, "q3", q3)
			output = fill_output(output, t, param, ps, "q6", q6)
			output = fill_output(output, t, param, ps, "sfc_thetae", sfc_thetae)
			output = fill_output(output, t, param, ps, "rhmin01", rhmin01)
			output = fill_output(output, t, param, ps, "rhmin03", rhmin03)
			output = fill_output(output, t, param, ps, "rhmin13", rhmin13)
			output = fill_output(output, t, param, ps, "rhminsubcloud", rhminsubcloud)
			output = fill_output(output, t, param, ps, "v_totals", v_totals)
			output = fill_output(output, t, param, ps, "c_totals", c_totals)
			output = fill_output(output, t, param, ps, "te_diff", te_diff)
			output = fill_output(output, t, param, ps, "tei", tei)
			output = fill_output(output, t, param, ps, "dpd700", dpd700)
			output = fill_output(output, t, param, ps, "dpd850", dpd850)
			output = fill_output(output, t, param, ps, "ddraft_temp", ddraft_temp)
			output = fill_output(output, t, param, ps, "Umeanwindinf", Umeanwindinf)
			output = fill_output(output, t, param, ps, "Umean01", Umean01)
			output = fill_output(output, t, param, ps, "Umean03", Umean03)
			output = fill_output(output, t, param, ps, "Uwindinf", Uwindinf)
			output = fill_output(output, t, param, ps, "U500", U500)
			output = fill_output(output, t, param, ps, "U1", U1)
			output = fill_output(output, t, param, ps, "U3", U3)
			output = fill_output(output, t, param, ps, "U6", U6)
			output = fill_output(output, t, param, ps, "Ust_left", Ust_left)
			output = fill_output(output, t, param, ps, "Usr01_left", Usr01_left)
			output = fill_output(output, t, param, ps, "Usr03_left", Usr03_left)
			output = fill_output(output, t, param, ps, "Usr06_left", Usr06_left)
			output = fill_output(output, t, param, ps, "scld", scld)
			output = fill_output(output, t, param, ps, "s01", s01)
			output = fill_output(output, t, param, ps, "s010", s010)
			output = fill_output(output, t, param, ps, "s13", s13)
			output = fill_output(output, t, param, ps, "s36", s36)
			output = fill_output(output, t, param, ps, "srh01_left", srh01_left)
			output = fill_output(output, t, param, ps, "srh06_left", srh06_left)

			output = fill_output(output, t, param, ps, "F10", F10)
			output = fill_output(output, t, param, ps, "Fn10", Fn10)
			output = fill_output(output, t, param, ps, "Fs10", Fs10)
			output = fill_output(output, t, param, ps, "icon10", icon10)
			output = fill_output(output, t, param, ps, "vgt10", vgt10)
			output = fill_output(output, t, param, ps, "conv10", conv10)
			output = fill_output(output, t, param, ps, "vo10", vo10)

			output = fill_output(output, t, param, ps, "hmi", hmi)
			output = fill_output(output, t, param, ps, "wmsi_ml", wmsi_ml)
			output = fill_output(output, t, param, ps, "dmi", dmi)
			output = fill_output(output, t, param, ps, "mwpi_ml", mwpi_ml)
			output = fill_output(output, t, param, ps, "wmpi", wmpi)
			output = fill_output(output, t, param, ps, "eff_sherb", eff_sherb)
			output = fill_output(output, t, param, ps, "sherb", sherb)
			if model == "erai":
				output = fill_output(output, t, param, ps, "cape*s06", cs6)
			output = fill_output(output, t, param, ps, "mburst", mburst)

			if model != "era5":
				output = fill_output(output, t, param, ps, "mosh", mosh)
				output = fill_output(output, t, param, ps, "moshe", moshe)
				output = fill_output(output, t, param, ps, "maxtevv", maxtevv)
				output = fill_output(output, t, param, ps, "omega01", omega01)
				output = fill_output(output, t, param, ps, "omega03", omega03)
				output = fill_output(output, t, param, ps, "omega06", omega06)

		output_data[t] = output



	print("SAVING DATA...")
	param_out = []
	for param_name in param:
		temp_data = output_data[:,:,:,np.where(param==param_name)[0][0]]
		param_out.append(temp_data)

	#If the mhgt variable is zero everywhere, then it is likely that data has not been read.
	#In this case, all values are missing, set to zero.
	for t in np.arange(param_out[0].shape[0]):
		if param_out[np.where(param=="mhgt")[0][0]][t].max() == 0:
			for p in np.arange(len(param_out)):
				param_out[p][t] = np.nan

	if issave:
		save_netcdf(region, model, out_name, date_list, lat, lon, param, param_out, \
			out_dtype = "f4", compress=True)

	print(dt.datetime.now() - tot_start)
Beispiel #14
0
def main():
    ### START OF USER SETTINGS BLOCK ###
    # FILE/DATA SETTINGS
    # file path to input
    datafile = '/home/jgodwin/python/sfc_observations/surface_observations.txt'
    timefile = '/home/jgodwin/python/sfc_observations/validtime.txt'

    # MAP SETTINGS
    # map names (for tracking purposes)
    maps = ['CONUS','Texas','Floater 1']
    restart_domain = [True,False]
    # map boundaries
    west = [-120,-108,-108]
    east = [-70,-93,-85]
    south = [20,25,37]
    north = [50,38,52]

    # OUTPUT SETTINGS
    # save directory for output
    savedir = '/var/www/html/images/'
    # filenames ("_[variable].png" will be appended, so only a descriptor like "conus" is needed)
    savenames = ['conus','texas','floater1']

    # TEST MODE SETTINGS
    test = False
    testnum = 3

    ### END OF USER SETTINGS BLOCK ###

    for i in range(len(maps)):
        if test and i != testnum:
            continue
        print(maps[i])
        # create the map projection
        cenlon = (west[i] + east[i]) / 2.0
        cenlat = (south[i] + north[i]) / 2.0
        sparallel = cenlat
        if cenlat > 0:
            cutoff = -30
            flip = False
        elif cenlat < 0:
            cutoff = 30
            flip = True
        if restart_domain:
            to_proj = ccrs.LambertConformal(central_longitude=cenlon,central_latitude=cenlat,standard_parallels=[sparallel],cutoff=cutoff)
        # open the data
        vt = open(timefile).read()
        with open(datafile) as f:
            data = pd.read_csv(f,header=0,names=['siteID','lat','lon','elev','slp','temp','sky','dpt','wx','wdr',\
                'wsp'],na_values=-99999)

        # filter data by lat/lon
        data = data[(data['lat'] >= south[i]-2.0) & (data['lat'] <= north[i]+2.0) & (data['lon'] >= west[i]-2.0)\
            & (data['lon'] <= east[i]+2.0)]
        # remove questionable data
        data = data[(cToF(data['temp']) <= 120) & (cToF(data['dpt']) <= 80)]

        # project lat/lon onto final projection
        print("Creating map projection.")
        lon = data['lon'].values
        lat = data['lat'].values
        xp, yp, _ = to_proj.transform_points(ccrs.Geodetic(), lon, lat).T

        # remove missing data from pressure and interpolate
        # we'll give this a try and see if it can help with my CPU credit problem
        if restart_domain:
            print("Performing Cressman interpolation.")
            x_masked, y_masked, pres = remove_nan_observations(xp, yp, data['slp'].values)
            slpgridx, slpgridy, slp = interpolate_to_grid(x_masked, y_masked, pres, interp_type='cressman',
                                                          minimum_neighbors=1, search_radius=400000,
                                                          hres=100000)

            # get wind information and remove missing data
            wind_speed = (data['wsp'].values * units('knots'))
            wind_dir = data['wdr'].values * units.degree
            good_indices = np.where((~np.isnan(wind_dir)) & (~np.isnan(wind_speed)))
            x_masked = xp[good_indices]
            y_masked = yp[good_indices]
            wind_speed = wind_speed[good_indices]
            wind_dir = wind_dir[good_indices]
            u, v = wind_components(wind_speed, wind_dir)
            windgridx, windgridy, uwind = interpolate_to_grid(x_masked, y_masked, np.array(u),
                                                              interp_type='cressman', search_radius=400000,
                                                              hres=100000)
            _, _, vwind = interpolate_to_grid(x_masked, y_masked, np.array(v), interp_type='cressman',
                                              search_radius=400000, hres=100000)

            # get temperature information
            data['temp'] = cToF(data['temp'])
            x_masked, y_masked, t = remove_nan_observations(xp, yp, data['temp'].values)
            tempx, tempy, temp = interpolate_to_grid(x_masked, y_masked, t, interp_type='cressman',
                                                     minimum_neighbors=3, search_radius=200000, hres=18000)
            temp = np.ma.masked_where(np.isnan(temp), temp)

            # get dewpoint information
            data['dpt'] = cToF(data['dpt'])
            x_masked,y_masked,td = remove_nan_observations(xp,yp,data['dpt'].values)
            dptx,dpty,dewp = interpolate_to_grid(x_masked,y_masked,td,interp_type='cressman',\
                minimum_neighbors=3,search_radius=200000,hres=18000)
            dewp = np.ma.masked_where(np.isnan(dewp),dewp)

            # interpolate wind speed
            x_masked,y_masked,wspd = remove_nan_observations(xp,yp,data['wsp'].values)
            wspx,wspy,speed = interpolate_to_grid(x_masked,y_masked,wspd,interp_type='cressman',\
                minimum_neighbors=3,search_radius=200000,hres=18000)
            speed = np.ma.masked_where(np.isnan(speed),speed)

            # derived values
            # station pressure
            data['pres'] = stationPressure(data['slp'],data['elev'])
            # theta-E
            data['thetae'] = equivalent_potential_temperature(data['pres'].values*units.hPa,data['temp'].values*units.degF,data['dpt'].values*units.degF)
            x_masked,y_masked,thetae = remove_nan_observations(xp,yp,data['thetae'].values)
            thex,they,thte = interpolate_to_grid(x_masked,y_masked,thetae,interp_type='cressman',\
                minimum_neighbors=3,search_radius=200000,hres=18000)
            thte = np.ma.masked_where(np.isnan(thte),thte)

            # mixing ratio
            relh = relative_humidity_from_dewpoint(data['temp'].values*units.degF,data['dpt'].values*units.degF)
            mixr = mixing_ratio_from_relative_humidity(relh,data['temp'].values*units.degF,data['pres'].values*units.hPa) * 1000.0
            x_masked,y_masked,mixrat = remove_nan_observations(xp,yp,mixr)
            mrx,mry,mrat = interpolate_to_grid(x_masked,y_masked,mixrat,interp_type='cressman',\
                minimum_neighbors=3,search_radius=200000,hres=18000)
            mrat = np.ma.masked_where(np.isnan(mrat),mrat)

        # set up the state borders
        state_boundaries = cfeature.NaturalEarthFeature(category='cultural',\
            name='admin_1_states_provinces_lines',scale='50m',facecolor='none')

        # SCALAR VARIABLES TO PLOT
        # variable names (will appear in plot title)
        variables = ['Temperature','Dewpoint','Wind Speed','Theta-E','Mixing Ratio']
        # units (for colorbar label)
        unitlabels = ['F','F','kt','K','g/kg']
        # list of actual variables to plot
        vardata = [temp,dewp,speed,thte,mrat]
        # tag in output filename
        varplots = ['temp','dewp','wspd','thte','mrat']
        # levels: (lower,upper,step)
        levs = [[-20,105,5],[30,85,5],[0,70,5],[250,380,5],[0,22,2]]
        # colormaps
        colormaps = ['hsv_r','Greens','plasma','hsv_r','Greens']

        for j in range(len(variables)):
            print("\t%s" % variables[j])
            fig = plt.figure(figsize=(20, 10))
            view = fig.add_subplot(1, 1, 1, projection=to_proj)
            
            # set up the map and plot the interpolated grids
            levels = list(range(levs[j][0],levs[j][1],levs[j][2]))
            cmap = plt.get_cmap(colormaps[j])
            norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)
            labels = variables[j] + " (" + unitlabels[j] + ")"

            # add map features
            view.set_extent([west[i],east[i],south[i],north[i]])
            view.add_feature(state_boundaries,edgecolor='black')
            view.add_feature(cfeature.OCEAN,zorder=-1)
            view.add_feature(cfeature.COASTLINE,zorder=2)
            view.add_feature(cfeature.BORDERS, linewidth=2,edgecolor='black')

            # plot the sea-level pressure
            cs = view.contour(slpgridx, slpgridy, slp, colors='k', levels=list(range(990, 1034, 4)))
            view.clabel(cs, inline=1, fontsize=12, fmt='%i')

            # plot the scalar background
            mmb = view.pcolormesh(tempx, tempy, vardata[j], cmap=cmap, norm=norm)
            fig.colorbar(mmb, shrink=.4, orientation='horizontal', pad=0.02, boundaries=levels, \
                extend='both',label=labels)

            # plot the wind barbs
            view.barbs(windgridx, windgridy, uwind, vwind, alpha=.4, length=5,flip_barb=flip)

            # plot title and save
            view.set_title('%s (shaded), SLP, and Wind (valid %s)' % (variables[j],vt))
            plt.savefig('/var/www/html/images/%s_%s.png' % (savenames[i],varplots[j]),bbox_inches='tight')

            # close everything
            fig.clear()
            view.clear()
            plt.close(fig)
            f.close()

    print("Script finished.")
Beispiel #15
0
def plotter(fdict):
    """ Go """
    pgconn = get_dbconn("asos")
    ctx = get_autoplot_context(fdict, get_description())

    station = ctx["zstation"]
    month = ctx["month"]

    if month == "all":
        months = range(1, 13)
    elif month == "fall":
        months = [9, 10, 11]
    elif month == "winter":
        months = [12, 1, 2]
    elif month == "spring":
        months = [3, 4, 5]
    elif month == "summer":
        months = [6, 7, 8]
    else:
        ts = datetime.datetime.strptime("2000-" + month + "-01", "%Y-%b-%d")
        # make sure it is length two for the trick below in SQL
        months = [ts.month, 999]

    df = read_sql(
        """
        SELECT drct::int as t, dwpf, tmpf, relh,
        coalesce(mslp, alti * 33.8639, 1013.25) as slp
        from alldata where station = %s
        and drct is not null and dwpf is not null and dwpf <= tmpf
        and sknt > 3 and drct::int %% 10 = 0
        and extract(month from valid) in %s
        and report_type = 2
    """,
        pgconn,
        params=(station, tuple(months)),
    )
    if df.empty:
        raise NoDataFound("No Data Found.")
    # Convert sea level pressure to station pressure
    df["pressure"] = mcalc.add_height_to_pressure(
        df["slp"].values * units("millibars"),
        ctx["_nt"].sts[station]["elevation"] * units("m"),
    ).to(units("millibar"))
    # compute mixing ratio
    df["mixingratio"] = mcalc.mixing_ratio_from_relative_humidity(
        df["relh"].values * units("percent"),
        df["tmpf"].values * units("degF"),
        df["pressure"].values * units("millibars"),
    )
    # compute pressure
    df["vapor_pressure"] = mcalc.vapor_pressure(
        df["pressure"].values * units("millibars"),
        df["mixingratio"].values * units("kg/kg"),
    ).to(units("kPa"))

    means = df.groupby("t").mean().copy()
    # compute dewpoint now
    means["dwpf"] = (mcalc.dewpoint(means["vapor_pressure"].values *
                                    units("kPa")).to(units("degF")).m)

    (fig, ax) = plt.subplots(1, 1)
    ax.bar(
        means.index.values,
        means["dwpf"].values,
        ec="green",
        fc="green",
        width=10,
        align="center",
    )
    ax.grid(True, zorder=11)
    ab = ctx["_nt"].sts[station]["archive_begin"]
    if ab is None:
        raise NoDataFound("Unknown station metadata.")
    ax.set_title(
        ("%s [%s]\nAverage Dew Point by Wind Direction (month=%s) "
         "(%s-%s)\n"
         "(must have 3+ hourly obs > 3 knots at given direction)") % (
             ctx["_nt"].sts[station]["name"],
             station,
             month.upper(),
             max([1973, ab.year]),
             datetime.datetime.now().year,
         ),
        size=10,
    )

    ax.set_ylabel("Dew Point [F]")
    ax.set_ylim(means["dwpf"].min() - 5, means["dwpf"].max() + 5)
    ax.set_xlim(-5, 365)
    ax.set_xticks([0, 45, 90, 135, 180, 225, 270, 315, 360])
    ax.set_xticklabels(["N", "NE", "E", "SE", "S", "SW", "W", "NW", "N"])
    ax.set_xlabel("Wind Direction")

    return fig, means["dwpf"]
Beispiel #16
0
    def __init__(self, datea, fhr, atcf, config):
        # Forecast fields to compute

        wnd_lev_1 = [250, 500]
        wnd_lev_2 = [350, 500]
        n_wnd_lev = len(wnd_lev_1)

        # Read steering flow parameters, or use defaults
        steerp1 = float(config['fields'].get('steer_level1', '300'))
        steerp2 = float(config['fields'].get('steer_level2', '850'))
        tcradius = float(config['fields'].get('steer_radius', '333'))

        # lat_lon info
        lat1 = float(config['fields'].get('min_lat', '0.'))
        lat2 = float(config['fields'].get('max_lat', '65.'))
        lon1 = float(config['fields'].get('min_lon', '-180.'))
        lon2 = float(config['fields'].get('max_lon', '-10.'))

        if not 'min_lat' in config:
            config.update({'min_lat': lat1})
            config.update({'max_lat': lat2})
            config.update({'min_lon': lon1})
            config.update({'max_lon': lon2})

        self.fhr = fhr
        self.atcf_files = atcf.atcf_files
        self.config = config
        self.nens = int(len(self.atcf_files))
        df_files = {}
        self.datea_str = datea
        self.datea = dt.datetime.strptime(datea, '%Y%m%d%H')
        self.datea_s = self.datea.strftime("%m%d%H%M")
        self.fff = str(self.fhr + 1000)[1:]
        datea_1 = self.datea + dt.timedelta(hours=self.fhr)
        datea_1 = datea_1.strftime("%m%d%H%M")

        self.dpp = importlib.import_module(config['io_module'])

        logging.warning("Computing hour {0} ensemble fields".format(self.fff))

        #  Obtain the ensemble lat/lon information, replace missing values with mean
        self.ens_lat, self.ens_lon = atcf.ens_lat_lon_time(self.fhr)

        e_cnt = 0
        m_lat = 0.0
        m_lon = 0.0
        for n in range(self.nens):
            if self.ens_lat[n] != atcf.missing and self.ens_lon[
                    n] != atcf.missing:
                e_cnt = e_cnt + 1
                m_lat = m_lat + self.ens_lat[n]
                m_lon = m_lon + self.ens_lon[n]

        if e_cnt > 0:
            m_lon = m_lon / e_cnt
            m_lat = m_lat / e_cnt

            for n in range(self.nens):
                if self.ens_lat[n] == atcf.missing or self.ens_lon[
                        n] == atcf.missing:
                    self.ens_lat[n] = m_lat
                    self.ens_lon[n] = m_lon

        #  Read grib file information for this forecast hour
        g1 = self.dpp.ReadGribFiles(self.datea_str, self.fhr, self.config)

        dencode = {
            'ensemble_data': {
                'dtype': 'float32'
            },
            'latitude': {
                'dtype': 'float32'
            },
            'longitude': {
                'dtype': 'float32'
            },
            'ensemble': {
                'dtype': 'int32'
            }
        }

        #  Compute steering wind components
        uoutfile = '{0}/{1}_f{2}_usteer_ens.nc'.format(config['work_dir'],
                                                       str(self.datea_str),
                                                       self.fff)
        voutfile = '{0}/{1}_f{2}_vsteer_ens.nc'.format(config['work_dir'],
                                                       str(self.datea_str),
                                                       self.fff)
        if (not os.path.isfile(uoutfile)
                or not os.path.isfile(voutfile)) and config['fields'].get(
                    'calc_uvsteer', 'True') == 'True':

            logging.warning("  Computing steering wind information")

            inpDict = {'isobaricInhPa': (steerp1, steerp2)}
            inpDict = g1.set_var_bounds('zonal_wind', inpDict)

            #  Create output arrays
            outDict = {
                'latitude': (lat1, lat2),
                'longitude': (lon1, lon2),
                'description': 'zonal steering wind',
                'units': 'm/s',
                '_FillValue': -9999.
            }
            outDict = g1.set_var_bounds('zonal_wind', outDict)
            uensmat = g1.create_ens_array('zonal_wind', self.nens, outDict)

            outDict = {
                'latitude': (lat1, lat2),
                'longitude': (lon1, lon2),
                'description': 'meridional steering wind',
                'units': 'm/s',
                '_FillValue': -9999.
            }
            outDict = g1.set_var_bounds('meridional_wind', outDict)
            vensmat = g1.create_ens_array('meridional_wind', self.nens,
                                          outDict)

            outDict = {
                'latitude': (lat1, lat2),
                'longitude': (lon1, lon2),
                'description': 'steering wind vorticity',
                'units': '1/s',
                '_FillValue': -9999.
            }
            outDict = g1.set_var_bounds('zonal_wind', outDict)
            vortmat = g1.create_ens_array('zonal_wind', self.nens, outDict)

            wencode = {
                'latitude': {
                    'dtype': 'float32'
                },
                'longitude': {
                    'dtype': 'float32'
                }
            }

            for n in range(self.nens):

                #  Read global zonal and meridional wind, write to file
                uwnd = g1.read_grib_field('zonal_wind', n, inpDict).rename('u')
                vwnd = g1.read_grib_field('meridional_wind', n,
                                          inpDict).rename('v')

                #             print(uwnd[:,0,0])
                #             print(vwnd[:,0,0])
                #             sys.exit(2)

                uwnd.to_netcdf('wind_info.nc',
                               mode='w',
                               encoding=wencode,
                               format='NETCDF3_CLASSIC')
                vwnd.to_netcdf('wind_info.nc',
                               mode='a',
                               encoding=wencode,
                               format='NETCDF3_CLASSIC')

                latvec = uwnd.latitude.values
                lonvec = uwnd.longitude.values

                if e_cnt > 0:

                    latcen = latvec[np.abs(latvec - self.ens_lat[n]).argmin()]
                    loncen = lonvec[np.abs(lonvec - self.ens_lon[n]).argmin()]

                    #  Call NCL to remove TC winds, read result from file
                    os.system('ncl -Q {0}/tc_steer.ncl tclat={1} tclon={2} tcradius={3}'.format(config['script_dir'],\
                                          str(latcen), str(loncen), str(tcradius)))

                    wfile = nc.Dataset('wind_info.nc')
                    uwnd[:, :, :] = wfile.variables['u'][:, :, :]
                    vwnd[:, :, :] = wfile.variables['v'][:, :, :]

                    os.remove('wind_info.nc')

                #  Integrate the winds over the layer to obtain the steering wind
                pres, lat, lon = uwnd.indexes.values()
                nlev = len(pres)

                uint = uwnd[0, :, :]
                uint[:, :] = 0.0
                vint = vwnd[0, :, :]
                vint[:, :] = 0.0

                for k in range(nlev - 1):

                    uint[:, :] = uint[:, :] + 0.5 * (uwnd[k, :, :] + uwnd[
                        k + 1, :, :]) * abs(pres[k + 1] - pres[k])
                    vint[:, :] = vint[:, :] + 0.5 * (vwnd[k, :, :] + vwnd[
                        k + 1, :, :]) * abs(pres[k + 1] - pres[k])

#             if pres[0] > pres[-1]:
#               uint = -np.trapz(uwnd[:,:,:], pres, axis=0) / abs(pres[-1]-pres[0])
#               vint = -np.trapz(vwnd[:,:,:], pres, axis=0) / abs(pres[-1]-pres[0])
#             else:
#               uint = np.trapz(uwnd[:,:,:], pres, axis=0) / abs(pres[-1]-pres[0])
#               vint = np.trapz(vwnd[:,:,:], pres, axis=0) / abs(pres[-1]-pres[0])

                if lat[0] > lat[-1]:
                    slat1 = lat2
                    slat2 = lat1
                else:
                    slat1 = lat1
                    slat2 = lat2

                #  Write steering flow to ensemble arrays
                uensmat[n, :, :] = np.squeeze(
                    uint.sel(latitude=slice(slat1, slat2),
                             longitude=slice(lon1,
                                             lon2))) / abs(pres[-1] - pres[0])
                vensmat[n, :, :] = np.squeeze(
                    vint.sel(latitude=slice(slat1, slat2),
                             longitude=slice(lon1,
                                             lon2))) / abs(pres[-1] - pres[0])

                #  Compute the vorticity associated with the steering wind

#             circ = VectorWind(unew, vnew).vorticity() * 1.0e5

#             vortmat[n,:,:] = np.squeeze(circ.sel(latitude=slice(lat2, lat1), longitude=slice(lon1, lon2)))

            uensmat.to_netcdf(uoutfile, encoding=dencode)
            vensmat.to_netcdf(voutfile, encoding=dencode)


#          vortmat.to_netcdf(vortfile, encoding=dencode)

        else:

            logging.warning("  Obtaining steering wind information from file")

        #  Read geopotential height from file, if ensemble file is not present
        if config['fields'].get('calc_height', 'True') == 'True':

            if 'height_levels' in config['fields']:
                height_list = json.loads(config['fields'].get('height_levels'))
            else:
                height_list = [500]

            for level in height_list:

                levstr = '%0.3i' % int(level)
                outfile = '{0}/{1}_f{2}_h{3}hPa_ens.nc'.format(
                    config['work_dir'], str(self.datea_str), self.fff, levstr)

                if not os.path.isfile(outfile):

                    logging.warning(
                        '  Computing {0} hPa height'.format(levstr))

                    vDict = {
                        'latitude': (lat1, lat2),
                        'longitude': (lon1, lon2),
                        'isobaricInhPa': (level, level),
                        'description': '{0} hPa height'.format(levstr),
                        'units': 'm',
                        '_FillValue': -9999.
                    }
                    vDict = g1.set_var_bounds('geopotential_height', vDict)
                    ensmat = g1.create_ens_array('geopotential_height',
                                                 g1.nens, vDict)

                    for n in range(g1.nens):
                        ensmat[n, :, :] = np.squeeze(
                            g1.read_grib_field('geopotential_height', n,
                                               vDict))

                    ensmat.to_netcdf(outfile, encoding=dencode)

                elif os.path.isfile(outfile):

                    logging.warning(
                        "  Obtaining {0} hPa height data from {1}".format(
                            levstr, outfile))

        #  Compute 250 hPa PV if the file does not exist
        outfile = '{0}/{1}_f{2}_pv250_ens.nc'.format(config['work_dir'],
                                                     str(self.datea_str),
                                                     self.fff)
        if (not os.path.isfile(outfile)
                and config['fields'].get('calc_pv250hPa', 'True') == 'True'):

            logging.warning("  Computing 250 hPa PV")

            vDict = {
                'latitude': (lat1, lat2),
                'longitude': (lon1, lon2),
                'isobaricInhPa': (200, 300),
                'description': '250 hPa Potential Vorticity',
                'units': 'PVU',
                '_FillValue': -9999.
            }
            vDict = g1.set_var_bounds('zonal_wind', vDict)

            ensmat = g1.create_ens_array('zonal_wind', self.nens, vDict)

            for n in range(self.nens):

                #  Read all the necessary files from file, smooth fields, so sensitivities are useful
                tmpk = g1.read_grib_field('temperature', n, vDict) * units('K')

                lats = tmpk.latitude.values * units('degrees')
                lons = tmpk.longitude.values * units('degrees')
                pres = tmpk.isobaricInhPa.values * units('hPa')

                tmpk = mpcalc.smooth_n_point(tmpk, 9, 4)

                thta = mpcalc.potential_temperature(pres[:, None, None], tmpk)

                uwnd = mpcalc.smooth_n_point(
                    g1.read_grib_field('zonal_wind', n, vDict) * units('m/s'),
                    9, 4)
                vwnd = mpcalc.smooth_n_point(
                    g1.read_grib_field('meridional_wind', n, vDict) *
                    units('m/s'), 9, 4)

                dx, dy = mpcalc.lat_lon_grid_deltas(lons,
                                                    lats,
                                                    x_dim=-1,
                                                    y_dim=-2,
                                                    geod=None)

                #  Compute PV and place in ensemble array
                pvout = mpcalc.potential_vorticity_baroclinic(
                    thta, pres[:, None, None], uwnd, vwnd, dx[None, :, :],
                    dy[None, :, :], lats[None, :, None])

                ensmat[n, :, :] = np.squeeze(pvout[np.where(
                    pres == 250 * units('hPa'))[0], :, :]) * 1.0e6

            ensmat.to_netcdf(outfile, encoding=dencode)

        elif os.path.isfile(outfile):

            logging.warning(
                "  Obtaining 250 hPa PV data from {0}".format(outfile))

        #  Compute the equivalent potential temperature (if desired and file is missing)
        if config['fields'].get('calc_theta-e', 'False') == 'True':

            if 'theta-e_levels' in config['fields']:
                thetae_list = json.loads(
                    config['fields'].get('theta-e_levels'))
            else:
                thetae_list = [700, 850]

            for level in thetae_list:

                levstr = '%0.3i' % int(level)
                outfile = '{0}/{1}_f{2}_e{3}hPa_ens.nc'.format(
                    config['work_dir'], str(self.datea_str), self.fff, levstr)

                if not os.path.isfile(outfile):

                    logging.warning(
                        '  Computing {0} hPa Theta-E'.format(levstr))

                    vDict = {
                        'latitude': (lat1, lat2),
                        'longitude': (lon1, lon2),
                        'isobaricInhPa': (level, level),
                        'description':
                        '{0} hPa Equivalent Potential Temperature'.format(
                            levstr),
                        'units':
                        'K',
                        '_FillValue':
                        -9999.
                    }
                    vDict = g1.set_var_bounds('temperature', vDict)

                    ensmat = g1.create_ens_array('temperature', g1.nens, vDict)

                    for n in range(g1.nens):

                        tmpk = g1.read_grib_field('temperature', n,
                                                  vDict) * units.K
                        pres = tmpk.isobaricInhPa.values * units.hPa

                        if g1.has_specific_humidity:
                            qvap = np.squeeze(
                                g1.read_grib_field('specific_humidity', n,
                                                   vDict))
                            tdew = mpcalc.dewpoint_from_specific_humidity(
                                pres[None, None], tmpk, qvap)
                        else:
                            relh = g1.read_grib_field('relative_humidity', n,
                                                      vDict)
                            relh = np.minimum(np.maximum(relh, 0.01),
                                              100.0) * units.percent
                            tdew = mpcalc.dewpoint_from_relative_humidity(
                                tmpk, relh)

                        ensmat[n, :, :] = np.squeeze(
                            mpcalc.equivalent_potential_temperature(
                                pres[None, None], tmpk, tdew))

                    ensmat.to_netcdf(outfile, encoding=dencode)

                elif os.path.isfile(outfile):

                    logging.warning(
                        "  Obtaining {0} hPa Theta-e data from {1}".format(
                            levstr, outfile))

        #  Compute the 500-850 hPa water vapor mixing ratio (if desired and file is missing)
        outfile = '{0}/{1}_f{2}_q500-850hPa_ens.nc'.format(
            config['work_dir'], str(self.datea_str), self.fff)
        if (not os.path.isfile(outfile) and config['fields'].get(
                'calc_q500-850hPa', 'False') == 'True'):

            logging.warning("  Computing 500-850 hPa Water Vapor")

            vDict = {
                'latitude': (lat1, lat2),
                'longitude': (lon1, lon2),
                'description': '500-850 hPa Integrated Water Vapor',
                'units': 'hPa',
                '_FillValue': -9999.
            }
            vDict = g1.set_var_bounds('temperature', vDict)

            ensmat = g1.create_ens_array('temperature', len(self.atcf_files),
                                         vDict)

            vDict = {
                'latitude': (lat1, lat2),
                'longitude': (lon1, lon2),
                'isobaricInhPa': (500, 850),
                'description': '500-850 hPa Integrated Water Vapor',
                'units': 'hPa',
                '_FillValue': -9999.
            }
            vDict = g1.set_var_bounds('temperature', vDict)

            for n in range(self.nens):

                tmpk = np.squeeze(g1.read_grib_field('temperature', n,
                                                     vDict)) * units('K')
                pres = (tmpk.isobaricInhPa.values * units.hPa).to(units.Pa)

                if g1.has_specific_humidity:
                    qvap = mpcalc.mixing_ratio_from_specific_humidity(
                        g1.read_grib_field('specific_humidity', n, vDict))
                else:
                    relh = np.minimum(
                        np.maximum(
                            g1.read_grib_field('relative_humidity', n, vDict),
                            0.01), 100.0) * units('percent')
                    qvap = mpcalc.mixing_ratio_from_relative_humidity(
                        pres[:, None, None], tmpk, relh)

                #  Integrate water vapor over the pressure levels
                ensmat[n, :, :] = np.abs(np.trapz(
                    qvap, pres, axis=0)) / mpcon.earth_gravity

            ensmat.to_netcdf(outfile, encoding=dencode)

        elif os.path.isfile(outfile):

            logging.warning(
                "  Obtaining 500-850 hPa water vapor data from {0}".format(
                    outfile))

        #  Compute wind-related forecast fields (if desired and file is missing)
        if config['fields'].get('calc_winds', 'False') == 'True':

            if 'wind_levels' in config['fields']:
                wind_list = json.loads(config['fields'].get('wind_levels'))
            else:
                wind_list = [850]

            for level in wind_list:

                levstr = '%0.3i' % int(level)
                ufile = '{0}/{1}_f{2}_u{3}hPa_ens.nc'.format(
                    config['work_dir'], str(self.datea_str), self.fff, levstr)
                vfile = '{0}/{1}_f{2}_v{3}hPa_ens.nc'.format(
                    config['work_dir'], str(self.datea_str), self.fff, levstr)

                if (not os.path.isfile(ufile)) or (not os.path.isfile(vfile)):

                    logging.warning(
                        '  Computing {0} hPa wind information'.format(levstr))

                    uDict = {
                        'latitude': (lat1, lat2),
                        'longitude': (lon1, lon2),
                        'isobaricInhPa': (level, level),
                        'description': '{0} hPa zonal wind'.format(levstr),
                        'units': 'm/s',
                        '_FillValue': -9999.
                    }
                    uDict = g1.set_var_bounds('zonal_wind', uDict)

                    uensmat = g1.create_ens_array('zonal_wind', g1.nens, uDict)

                    vDict = {
                        'latitude': (lat1, lat2),
                        'longitude': (lon1, lon2),
                        'isobaricInhPa': (level, level),
                        'description':
                        '{0} hPa meridional wind'.format(levstr),
                        'units': 'm/s',
                        '_FillValue': -9999.
                    }
                    vDict = g1.set_var_bounds('meridional_wind', vDict)

                    vensmat = g1.create_ens_array('meridional_wind', g1.nens,
                                                  vDict)

                    for n in range(g1.nens):

                        uwnd = g1.read_grib_field('zonal_wind', n,
                                                  uDict).squeeze()
                        vwnd = g1.read_grib_field('meridional_wind', n,
                                                  vDict).squeeze()

                        uensmat[n, :, :] = uwnd[:, :]
                        vensmat[n, :, :] = vwnd[:, :]

                    uensmat.to_netcdf(ufile, encoding=dencode)
                    vensmat.to_netcdf(vfile, encoding=dencode)

                elif os.path.isfile(outfile):

                    logging.warning(
                        "  Obtaining {0} hPa wind information from file".
                        format(levstr))
Beispiel #17
0
def plotter(fdict):
    """ Go """
    pgconn = get_dbconn("asos")
    ctx = get_autoplot_context(fdict, get_description())
    station = ctx["zstation"]
    year = ctx["year"]
    varname = ctx["var"]

    df = read_sql(
        """
        SELECT extract(year from valid) as year,
        coalesce(mslp, alti * 33.8639, 1013.25) as slp,
        extract(doy from valid) as doy, tmpf, dwpf, relh from alldata
        where station = %s and dwpf > -50 and dwpf < 90 and
        tmpf > -50 and tmpf < 120 and valid > '1950-01-01'
        and report_type = 2
    """,
        pgconn,
        params=(station, ),
        index_col=None,
    )
    if df.empty:
        raise NoDataFound("No Data was found.")
    # saturation vapor pressure
    # Convert sea level pressure to station pressure
    df["pressure"] = mcalc.add_height_to_pressure(
        df["slp"].values * units("millibars"),
        ctx["_nt"].sts[station]["elevation"] * units("m"),
    ).to(units("millibar"))
    # Compute the mixing ratio
    df["mixing_ratio"] = mcalc.mixing_ratio_from_relative_humidity(
        df["relh"].values * units("percent"),
        df["tmpf"].values * units("degF"),
        df["pressure"].values * units("millibars"),
    )
    # Compute the saturation mixing ratio
    df["saturation_mixingratio"] = mcalc.saturation_mixing_ratio(
        df["pressure"].values * units("millibars"),
        df["tmpf"].values * units("degF"),
    )
    df["vapor_pressure"] = mcalc.vapor_pressure(
        df["pressure"].values * units("millibars"),
        df["mixing_ratio"].values * units("kg/kg"),
    ).to(units("kPa"))
    df["saturation_vapor_pressure"] = mcalc.vapor_pressure(
        df["pressure"].values * units("millibars"),
        df["saturation_mixingratio"].values * units("kg/kg"),
    ).to(units("kPa"))
    df["vpd"] = df["saturation_vapor_pressure"] - df["vapor_pressure"]

    dailymeans = df[["year", "doy", varname]].groupby(["year", "doy"]).mean()
    dailymeans = dailymeans.reset_index()

    df2 = dailymeans[["doy", varname]].groupby("doy").describe()

    dyear = df[df["year"] == year]
    df3 = dyear[["doy", varname]].groupby("doy").describe()
    df3[(varname, "diff")] = df3[(varname, "mean")] - df2[(varname, "mean")]

    (fig, ax) = plt.subplots(2, 1, figsize=(8, 6))
    multiplier = 1000.0 if varname == "mixing_ratio" else 10.0

    ax[0].fill_between(
        df2[(varname, "min")].index.values,
        df2[(varname, "min")].values * multiplier,
        df2[(varname, "max")].values * multiplier,
        color="gray",
    )

    ax[0].plot(
        df2[(varname, "mean")].index.values,
        df2[(varname, "mean")].values * multiplier,
        label="Climatology",
    )
    ax[0].plot(
        df3[(varname, "mean")].index.values,
        df3[(varname, "mean")].values * multiplier,
        color="r",
        label="%s" % (year, ),
    )

    ax[0].set_title(("%s [%s]\nDaily Mean Surface %s") %
                    (station, ctx["_nt"].sts[station]["name"], PDICT[varname]))
    lbl = ("Mixing Ratio ($g/kg$)"
           if varname == "mixing_ratio" else PDICT[varname])
    ax[0].set_ylabel(lbl)
    ax[0].set_xlim(0, 366)
    ax[0].set_ylim(bottom=0)
    ax[0].set_xticks((1, 32, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335))
    ax[0].set_xticklabels(calendar.month_abbr[1:])
    ax[0].grid(True)
    ax[0].legend(loc=2, fontsize=10)

    cabove = "b" if varname == "mixing_ratio" else "r"
    cbelow = "r" if cabove == "b" else "b"
    rects = ax[1].bar(
        df3[(varname, "diff")].index.values,
        df3[(varname, "diff")].values * multiplier,
        facecolor=cabove,
        edgecolor=cabove,
    )
    for rect in rects:
        if rect.get_height() < 0.0:
            rect.set_facecolor(cbelow)
            rect.set_edgecolor(cbelow)

    plunits = "$g/kg$" if varname == "mixing_ratio" else "hPa"
    ax[1].set_ylabel("%.0f Departure (%s)" % (year, plunits))
    ax[1].set_xlim(0, 366)
    ax[1].set_xticks((1, 32, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335))
    ax[1].set_xticklabels(calendar.month_abbr[1:])
    ax[1].grid(True)
    return fig, df3
    x.strftime('%Y-%m-%d %H:%M')
    for x in rrule.rrule(rrule.HOURLY, dtstart=date_1, until=date_2)
]

metar_file = main + '/wyoming/' + date[0:6] + '/AbuDhabi_surf_' + date[
    0:8] + '.csv'
outFile = main + '/wyoming/' + date[0:6] + '/AbuDhabi_surf_mr' + date[
    0:8] + '.csv'

metar_data = pd.read_csv(metar_file)

metar_data = metar_data[[
    'STN', 'TIME', 'ALTM', 'TMP', 'DEW', 'RH', 'DIR', 'SPD', 'VIS'
]]
#metar_data=metar_data.drop('Unnamed: 9',axis=1)
metar_data = metar_data.drop(metar_data.index[0])
metar_data['TIME'] = date_list

tmp = np.array((metar_data.iloc[:, 3]).apply(pd.to_numeric, errors='coerce') +
               273.15) * units('K')
rh = np.array(
    (metar_data.iloc[:, 5]).apply(pd.to_numeric, errors='coerce') / 100.0)
press = np.array((metar_data.iloc[:, 2]).apply(
    pd.to_numeric, errors='coerce')) * units('millibar')

mrio = mcalc.mixing_ratio_from_relative_humidity(rh, tmp, press)

metar_data['mrio'] = mrio

metar_data.to_csv(outFile)
ds = xr.open_dataset(URL)    # use xarray to access the above URL as defined via 'dt'
ds = ds.sel(time=dt.strftime('%Y-%m-%dT%H:%M:%S'))  # reduce the dataset to the RAP analysis (starting) time from 'dt'

#### Want to list the field names + some info from this xarray Dataset? Uncomment the below 6 lines!
#fieldNames = list(ds.variables.keys())
#for fieldName in fieldNames:
#    try:
#        print('%s -- "%s" [%s]' % (fieldName, ds[fieldName].long_name, ds[fieldName].units))
#    except AttributeError:
#        print(fieldName)

#### Compute 1000-500-hPa geopotential thickness (pressure coordinates are in units of Pa)
thick = ds.Geopotential_height_isobaric.sel(isobaric=50000) - ds.Geopotential_height_isobaric.sel(isobaric=100000)

#### Compute 1000-500-hPa pressure-weighted mean virtual temperature
MR = mpcalc.mixing_ratio_from_relative_humidity(ds.isobaric, ds.Temperature_isobaric, ds.Relative_humidity_isobaric)
Tvirt = mpcalc.virtual_temperature(ds.Temperature_isobaric, MR, molecular_weight_ratio=0.6219569100577033)
layer = slice(50000,100000)  # 1000-500-hPa layer
dp = np.zeros(ds.isobaric.sel(isobaric=layer).size, dtype=float)  # NumPy array to store dp, the depth in Pa of each layer
dp[:-1] = np.abs(np.ediff1d(ds.isobaric.sel(isobaric=layer).values))
dp[-1] = dp[-2]  # np.ediff1d produces an array 1 smaller than the input, so make an assumption here for final value
dp = xr.DataArray(data=dp.copy(), dims=['isobaric'], coords={'isobaric':ds.isobaric.sel(isobaric=layer)},
                  attrs={'units': ds.isobaric.units})
TvirtLayer = np.sum(Tvirt.sel(isobaric=layer)*ds.isobaric.sel(isobaric=layer)*dp, axis=0) / \
             np.sum(ds.isobaric.sel(isobaric=layer)*dp, axis=0)

#### Convert x/y coordinates to latitude & longitude (note: steps are NOT via xarray as I'm debugging that approach)
dsProjDict = ds.LambertConformal_Projection.attrs  # obtain the source data map projection
R = dsProjDict['earth_radius']
dsProj = ccrs.LambertConformal(central_longitude=dsProjDict['longitude_of_central_meridian']-360.,
                               central_latitude=dsProjDict['latitude_of_projection_origin'],
Beispiel #20
0
def gradient_fluxes(df):  # This method is very sensitive to input data quality
    """Returns Sensible Heat Flux and Latent Heat Flux based on Steffen & DeMaria (1996) method"""
    g = 9.81  # m/s**2
    cp = 1005  # J/kg/K
    k = 0.4  # von Karman
    Lv = 2.50e6  # J/kg

    fillvalue = common.fillvalue_float

    ht_low, ht_high, ta_low, ta_high, wspd_low, wspd_high, rh_low, rh_high, phi_m, phi_h = ([] for _ in range(10))

    # Average temp from both sensors for height1 and height2
    ta1 = df.loc[:, ("ta_tc1", "ta_cs1")]
    ta2 = df.loc[:, ("ta_tc2", "ta_cs2")]
    df['ta1'] = ta1.mean(axis=1)
    df['ta2'] = ta2.mean(axis=1)

    # Assign low and high depending on height of sensors
    idx = 0
    while idx < len(df):
        if df['wind_sensor_height_1'][idx] == fillvalue or df['wind_sensor_height_2'][idx] == fillvalue:
            ht_low.append(np.nan)
            ht_high.append(np.nan)
            ta_low.append(df['ta1'][idx])
            ta_high.append(df['ta2'][idx])
            wspd_low.append(df['wspd1'][idx])
            wspd_high.append(df['wspd2'][idx])
            rh_low.append(df['rh1'][idx])
            rh_high.append(df['rh2'][idx])
        elif df['wind_sensor_height_1'][idx] > df['wind_sensor_height_2'][idx]:
            ht_low.append(df['wind_sensor_height_2'][idx])
            ht_high.append(df['wind_sensor_height_1'][idx])
            ta_low.append(df['ta2'][idx])
            ta_high.append(df['ta1'][idx])
            wspd_low.append(df['wspd2'][idx])
            wspd_high.append(df['wspd1'][idx])
            rh_low.append(df['rh2'][idx])
            rh_high.append(df['rh1'][idx])
        else:
            ht_low.append(df['wind_sensor_height_1'][idx])
            ht_high.append(df['wind_sensor_height_2'][idx])
            ta_low.append(df['ta1'][idx])
            ta_high.append(df['ta2'][idx])
            wspd_low.append(df['wspd1'][idx])
            wspd_high.append(df['wspd2'][idx])
            rh_low.append(df['rh1'][idx])
            rh_high.append(df['rh2'][idx])

        idx += 1

    # Convert lists to arrays
    ht_low = np.asarray(ht_low)
    ht_high = np.asarray(ht_high)
    ta_low = np.asarray(ta_low)
    ta_high = np.asarray(ta_high)
    wspd_low = np.asarray(wspd_low)
    wspd_high = np.asarray(wspd_high)
    rh_low = np.asarray(rh_low)
    rh_high = np.asarray(rh_high)
    ps = np.asarray(df['ps'].values)

    # Potential Temperature
    pot_tmp_low = potential_temperature(ps * units.pascal, ta_low * units.kelvin).magnitude
    pot_tmp_high = potential_temperature(ps * units.pascal, ta_high * units.kelvin).magnitude
    pot_tmp_avg = (pot_tmp_low + pot_tmp_high)/2
    ta_avg = (ta_low + ta_high)/2

    # Ri
    du = wspd_high-wspd_low
    du = np.asarray([fillvalue if i == 0 else i for i in du])
    pot_tmp_avg = np.asarray([fillvalue if i == 0 else i for i in pot_tmp_avg])
    ri = g*(pot_tmp_high - pot_tmp_low)*(ht_high - ht_low)/(pot_tmp_avg*du)

    # Phi
    for val in ri:
        if val < -0.03:
            phi = (1-18*val)**-0.25
            phi_m.append(phi)
            phi_h.append(phi/1.3)
        elif -0.03 <= val < 0:
            phi = (1-18*val)**-0.25
            phi_m.append(phi)
            phi_h.append(phi)
        else:
            phi = (1-5.2*val)**-1
            phi_m.append(phi)
            phi_h.append(phi)

    phi_e = phi_h

    # air density
    rho = density(ps * units.pascal, ta_avg * units.kelvin, 0).magnitude  # Use average temperature

    # SH
    ht_low = np.asarray([fillvalue if i == 0 else i for i in ht_low])
    num = np.asarray([-a1 * cp * k**2 * (b1 - c1) * (d1 - e1) for a1, b1, c1, d1, e1 in
           zip(rho, pot_tmp_high, pot_tmp_low, wspd_high, wspd_low)])
    dnm = [a2 * b2 * np.log(c2 / d2)**2 for a2, b2, c2, d2 in
           zip(phi_h, phi_m, ht_high, ht_low)]
    dnm = np.asarray([fillvalue if i == 0 else i for i in dnm])
    sh = num/dnm
    sh = [fillvalue if abs(i) >= 100 else i for i in sh]

    # Specific Humidity
    mixing_ratio_low = mixing_ratio_from_relative_humidity(rh_low, ta_low * units.kelvin, ps * units.pascal)
    mixing_ratio_high = mixing_ratio_from_relative_humidity(rh_high, ta_high * units.kelvin, ps * units.pascal)
    q_low = specific_humidity_from_mixing_ratio(mixing_ratio_low).magnitude
    q_high = specific_humidity_from_mixing_ratio(mixing_ratio_high).magnitude
    q_low = q_low/100  # Divide by 100 to make it in range [0,1]
    q_high = q_high/100

    # LH
    num = np.asarray([-a1 * Lv * k**2 * (b1 - c1) * (d1 - e1) for a1, b1, c1, d1, e1 in
           zip(rho, q_high, q_low, wspd_high, wspd_low)])
    dnm = [a2 * b2 * np.log(c2 / d2)**2 for a2, b2, c2, d2 in
           zip(phi_e, phi_m, ht_high, ht_low)]
    dnm = np.asarray([fillvalue if i == 0 else i for i in dnm])
    lh = num/dnm
    lh = [fillvalue if abs(i) >= 100 else i for i in lh]

    return sh, lh
Beispiel #21
0
def read_TInsitu(pname, print_long, e_test, tstart=None, tend=None, d_testing=False):
    ''' Reads data files provided on TORUS19 EOL site (Important that datafiles and filenames follow TORUS19 readme)
        ---
        INPUT: filename string (following readme conventions for each platform)
               tstart,tend: datetime objects
        OUTPUT: dataframe containing all data collected during desired times
    '''
    if pname in pform_names('UNL'):
        mmfile = glob.glob(config.g_mesonet_directory+config.day+'/mesonets/UNL/UNL.'+pname+'.*')

        # Test Data availability
        if d_testing == True:
            try:
                #if there is no files this will will cause the try statement to fail
                mtest = mmfile[0]
                return True
            except: return False
        #  if the defn was only called to it will exit at this point (saving computing time)
        # + + + + + + + + + + + + ++ + +

        data_hold = [] #empty list to append to
        for i in range(len(mmfile)):
            ds = xr.open_dataset(mmfile[i])

            #convert form epoch time to utc datetime object
            timearray = np.array([dt.datetime.utcfromtimestamp(t/1e9) for t in ds.time.values])
            U,V = mpcalc.wind_components(ds.wind_speed, ds.wind_dir)
            lats = np.array([40]) * units.degrees

            #create new xarray dataset to make plotting easier cause original is trash
            dims = ['datetime']
            coords = { 'datetime': timearray }
            data_vars = {
                'lat': (dims, ds.lat.values, {'units':str(lats.units)}),
                'lon': (dims, ds.lon.values, {'units':str(lats.units)}),
                'Z_ASL': (dims, ds.alt.values, {'units':str(ds.alt.units)}),
                'Z_AGL': (dims, np.zeros_like(ds.alt.values), {'units':str(ds.alt.units)}),
                'Temperature': (dims, ds.fast_temp.values, {'units':str(ds.fast_temp.units)}),
                'Dewpoint': (dims, ds.dewpoint.values, {'units':str(ds.dewpoint.units)}),
                'RH': (dims, ds.calc_corr_RH.values, {'units':str(ds.calc_corr_RH.units)}),
                'Pressure': (dims, ds.pressure.values, {'units':str(ds.pressure.units)}),
                'U': (dims, U.m, {'units':str(U.units)}),
                'V': (dims, V.m, {'units':str(V.units)}),
                'Theta': (dims, ds.theta.values, {'units':str(ds.theta.units)}),
                'Thetae': (dims, ds.theta_e.values, {'units':str(ds.theta_e.units)}),
                'Thetav': (dims, ds.theta_v.values, {'units':str(ds.theta_v.units)})
            }

            subds = xr.Dataset(data_vars, coords)

            #convert to pandas
            pd_unl = subds.to_dataframe()
            pd_unl.reset_index(inplace=True)

            # Subset the dataset to the desired 22:43
            if tstart is None: hstart = pd_unl['datetime'].iloc[0]
            else: hstart = tstart
            if tend is None: hend = pd_unl['datetime'].iloc[-1]
            else: hend = tend
            # Save only desired iop data
            data_u = pd_unl.loc[(pd_unl['datetime'] >= hstart) & (pd_unl['datetime'] <= hend)]
            data_hold.append(data_u)

        #convert the list holding the dataframes to one large dataframe
        data_unl = pd.concat(data_hold)

        #drop all the columns that we will not use past this point (to save memory/computing time)
        data_unl = data_unl.drop(columns=['Temperature', 'Z_ASL', 'Z_AGL', 'Theta', 'Dewpoint', 'Pressure', 'RH'])
        #  print(data_unl.memory_usage())
        #  data_unl.set_index('datetime', inplace=True, drop=False)
        return data_unl, 'UNL'

    # * * *
    elif pname in pform_names('NSSL'):
        mmfile = glob.glob(config.g_mesonet_directory+config.day+'/mesonets/NSSL/'+pname+'_'+config.day[2:]+'_QC_met.dat')

        # Test Data availability
        if d_testing == True:
            try:
                #if there is no files this will will cause the try statement to fail
                mtest = mmfile[0]
                return True
            except: return False
        # + + + + + + + + + + + + ++ + +

        mmfile=mmfile[0]
        # Read NSSL file using column names from readme
        column_names = ['id','time','lat','lon','alt','tfast','tslow','rh','p','dir','spd','qc1','qc2','qc3','qc4']
        data = pd.read_csv(mmfile, header=0, delim_whitespace=True, names=column_names)
        data = data.drop_duplicates()

        # Find timedelta of hours since start of iop (IOP date taken from filename!)
        tiop = dt.datetime(2019, np.int(mmfile[-15:-13]), np.int(mmfile[-13:-11]), 0, 0, 0)

        if tstart is None:
            hstart = tiop
            hstart_dec = hstart.hour + (hstart.minute/60) + (hstart.second/3600) #convert to decimal hours HH.HHH
        else:
            hstart = float((tstart - tiop).seconds)/3600
            hstart_dec = hstart

        if tend is None:
            hend = data['time'].iloc[-1]
        else:
            hend = (tend - tiop)
            if hend >= dt.timedelta(days=1): hend = float((tend-tiop).seconds)/3600 + 24.
            else: hend = float((tend-tiop)).seconds/3600

        # Save only desired iop data
        data_nssl = data.loc[(data['time'] >= hstart_dec) & (data['time'] <= hend)]
        # Convert time into timedeltas
        date = dt.datetime.strptime('2019-'+mmfile[-15:-13]+'-'+mmfile[-13:-11],'%Y-%m-%d')
        time_deltas = []
        for i in np.arange(len(data_nssl)):
            j = data_nssl['time'].iloc[i]
            time_deltas = np.append(time_deltas, date + dt.timedelta(hours=int(j), minutes=int((j*60) % 60), seconds=int((j*3600) % 60)))
        data_nssl['datetime'] = time_deltas

        ## Caclulate desired variables
        p, t = data_nssl['p'].values * units.hectopascal, data_nssl['tfast'].values * units.degC
        r_h = data_nssl['rh'].values/100
        data_nssl['Theta'] = (mpcalc.potential_temperature(p, t)).m

        mixing = mpcalc.mixing_ratio_from_relative_humidity(r_h, t, p)
        data_nssl['Thetav'] = (mpcalc.virtual_potential_temperature(p, t, mixing)).m

        td = mpcalc.dewpoint_rh(temperature= t, rh= r_h)
        data_nssl['Thetae'] = (mpcalc.equivalent_potential_temperature(p, t, td)).m

        Spd, dire = data_nssl['spd'].values * units('m/s') , data_nssl['dir'].values * units('degrees')
        u, v = mpcalc.wind_components(Spd, dire)
        data_nssl['U'], data_nssl['V'] = u.to('knot'), v.to('knot')

        #  q_list = ['qc1','qc2','qc3','qc4']
        q_list = config.NSSL_qcflags
        data_nssl['all_qc_flags'] = data_nssl[q_list].sum(axis=1)
        #  data_nssl.set_index('datetime', inplace=True, drop=False)

        #drop all the columns that we will not use past this point (to save memory/computing time)
        data_nssl = data_nssl.drop(columns=['rh', 'p', 'time', 'alt', 'Theta', 'tfast', 'tslow'])
        #  print(data_nssl.memory_usage())
        return data_nssl, 'NSSL'

    # * * *
    elif pname == 'UAS':
        if print_long == True: print("no code for reading UAS yet")
        if d_testing == True: return False
        return 'UAS'
Beispiel #22
0
def plotter(fdict):
    """ Go """
    pgconn = get_dbconn('asos')

    ctx = get_autoplot_context(fdict, get_description())
    station = ctx['zstation']
    h1 = int(ctx['h1'])
    h2 = int(ctx['h2'])
    varname = ctx['v']

    tzname = ctx['_nt'].sts[station]['tzname']

    df = read_sql("""
    WITH data as (
        SELECT valid at time zone %s + '10 minutes'::interval as localvalid,
        date_trunc(
             'hour', valid at time zone %s  + '10 minutes'::interval) as v,
        tmpf, dwpf, sknt, drct, alti, relh, random() as r,
        coalesce(mslp, alti * 33.8639, 1013.25) as slp
        from alldata where station = %s and report_type = 2
        and extract(hour from valid at time zone %s + '10 minutes'::interval)
        in (%s, %s)),
     agg as (
          select *, extract(hour from v) as hour,
          rank() OVER (PARTITION by v ORDER by localvalid ASC, r ASC) from data
     )

     SELECT *, date(
         case when hour = %s
         then date(v - '1 day'::interval)
         else date(v) end) from agg WHERE rank = 1
    """,
                  pgconn,
                  params=(tzname, tzname, station, tzname, h1, h2,
                          h2 if h2 < h1 else -1),
                  index_col=None)
    if df.empty:
        raise NoDataFound("No data was found.")
    if varname == 'q':
        df['pressure'] = mcalc.add_height_to_pressure(
            df['slp'].values * units('millibars'),
            ctx['_nt'].sts[station]['elevation'] * units('m')).to(
                units('millibar'))
        # compute mixing ratio
        df['q'] = mcalc.mixing_ratio_from_relative_humidity(
            df['relh'].values * units('percent'), df['tmpf'].values *
            units('degF'), df['pressure'].values * units('millibars')) * 1000.

    # pivot
    df = df.pivot(index='date', columns='hour', values=varname).reset_index()
    df = df.dropna()
    df['doy'] = pd.to_numeric(pd.to_datetime(df['date']).dt.strftime("%j"))
    df['year'] = pd.to_datetime(df['date']).dt.year
    df['week'] = (df['doy'] / 7).astype(int)
    df['delta'] = df[h2] - df[h1]

    (fig, ax) = plt.subplots(1, 1)
    if ctx['opt'] == 'no':
        ax.set_xlabel("Plotted lines are smoothed over %.0f days" %
                      (ctx['smooth'], ))
    ax.set_ylabel(
        "%s %s Difference" %
        (PDICT[varname], "Accumulated Sum" if ctx['opt'] == 'yes' else ''))

    if ctx['opt'] == 'no':
        # Histogram
        H, xedges, yedges = np.histogram2d(df['doy'].values,
                                           df['delta'].values,
                                           bins=(50, 50))
        ax.pcolormesh(xedges,
                      yedges,
                      H.transpose(),
                      cmap=plt.get_cmap(ctx['cmap']),
                      alpha=0.5)

    # Plot an average line
    gdf = df.groupby('doy').mean().rolling(ctx['smooth'],
                                           min_periods=1,
                                           center=True).mean()
    y = gdf['delta'] if ctx['opt'] == 'no' else gdf['delta'].cumsum()
    ax.plot(gdf.index.values,
            y,
            label='Average',
            zorder=6,
            lw=2,
            color='k',
            linestyle='-.')

    # Plot selected year
    for i in range(1, 5):
        year = ctx.get("y%s" % (i, ))
        if year is None:
            continue
        df2 = df[df['year'] == year]
        if not df2.empty:
            gdf = df2.groupby('doy').mean().rolling(ctx['smooth'],
                                                    min_periods=1,
                                                    center=True).mean()
            y = gdf['delta'] if ctx['opt'] == 'no' else gdf['delta'].cumsum()
            ax.plot(gdf.index.values, y, label=str(year), lw=2, zorder=10)

    ax.set_xticks((1, 32, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 365))
    ax.set_xticklabels(calendar.month_abbr[1:])
    ax.set_xlim(1, 366)
    ax.grid(True)
    ax.legend(loc='best', ncol=5)
    sts = datetime.datetime(2000, 6, 1, h1)
    ets = datetime.datetime(2000, 6, 1, h2)
    title = ("%s [%s] %s Difference (%.0f-%.0f)\n"
             "%s minus %s (%s) (timezone: %s)") % (
                 ctx['_nt'].sts[station]['name'], station, PDICT[varname],
                 df['year'].min(), df['year'].max(), ets.strftime("%-I %p"),
                 sts.strftime("%-I %p"),
                 "same day" if h2 > h1 else "previous day", tzname)
    fitbox(fig, title, 0.05, 0.95, 0.91, 0.99, ha='center')

    return fig, df
Beispiel #23
0
def main():
    load_start = dt.datetime.now()
    #Try parsing arguments using argparse
    parser = argparse.ArgumentParser(
        description='wrf non-parallel convective diagnostics processer')
    parser.add_argument("-m", help="Model name", required=True)
    parser.add_argument("-r",
                        help="Region name (default is aus)",
                        default="aus")
    parser.add_argument("-t1", help="Time start YYYYMMDDHH", required=True)
    parser.add_argument("-t2", help="Time end YYYYMMDDHH", required=True)
    parser.add_argument(
        "-e",
        help=
        "CMIP5 experiment name (not required if using era5, erai or barra)",
        default="")
    parser.add_argument(
        "--barpa_forcing_mdl",
        help="BARPA forcing model (erai or ACCESS1-0). Default erai.",
        default="erai")
    parser.add_argument(
        "--ens",
        help="CMIP5 ensemble name (not required if using era5, erai or barra)",
        default="r1i1p1")
    parser.add_argument("--group",
                        help="CMIP6 modelling group name",
                        default="")
    parser.add_argument("--project",
                        help="CMIP6 modelling intercomparison project",
                        default="CMIP")
    parser.add_argument("--ver6hr",
                        help="Version on al33 for 6hr data",
                        default="")
    parser.add_argument("--ver3hr",
                        help="Version on al33 for 3hr data",
                        default="")
    parser.add_argument("--issave",
                        help="Save output (True or False, default is False)",
                        default="False")
    parser.add_argument(
        "--ub4",
        help=
        "Where to get era5 data. Default True for ub4 project, otherwise rt52",
        default="True")
    parser.add_argument(
        "--outname",
        help=
        "Name of saved output. In the form *outname*_*t1*_*t2*.nc. Default behaviour is the model name",
        default=None)
    parser.add_argument(
        "--is_dcape",
        help="Should DCAPE be calculated? (1 or 0. Default is 1)",
        default=1)
    parser.add_argument(
        "--al33",
        help=
        "Should data be gathered from al33? Default is False, and data is gathered from r87. If True, then group is required",
        default="False")
    parser.add_argument(
        "--delta_t",
        help=
        "Time step spacing for ERA5 data, in hours. Default is one the minimum spacing (1 hour)",
        default="1")
    parser.add_argument(
        "--era5_interp",
        help=
        "Horizontally interpolate model data before calculating convective parameters",
        default="False")
    args = parser.parse_args()

    #Parse arguments from cmd line and set up inputs (date region model)
    model = args.m
    region = args.r
    t1 = args.t1
    t2 = args.t2
    issave = args.issave
    ub4 = args.ub4
    al33 = args.al33
    if args.outname == None:
        out_name = model
    else:
        out_name = args.outname
    is_dcape = args.is_dcape
    barpa_forcing_mdl = args.barpa_forcing_mdl
    experiment = args.e
    ensemble = args.ens
    group = args.group
    project = args.project
    ver6hr = args.ver6hr
    ver3hr = args.ver3hr
    delta_t = int(args.delta_t)
    era5_interp = args.era5_interp
    if region == "sa_small":
        start_lat = -38
        end_lat = -26
        start_lon = 132
        end_lon = 142
    elif region == "aus":
        start_lat = -44.525
        end_lat = -9.975
        start_lon = 111.975
        end_lon = 156.275
    elif region == "global":
        start_lat = -70
        end_lat = 70
        start_lon = -180
        end_lon = 179.75
    else:
        raise ValueError("INVALID REGION\n")
    domain = [start_lat, end_lat, start_lon, end_lon]
    try:
        time = [
            dt.datetime.strptime(t1, "%Y%m%d%H"),
            dt.datetime.strptime(t2, "%Y%m%d%H")
        ]
    except:
        raise ValueError("INVALID START OR END TIME. SHOULD BE YYYYMMDDHH\n")
    if era5_interp == "True":
        era5_interp = True
    elif era5_interp == "False":
        era5_interp = False
    else:
        raise ValueError("\n INVALID era5_interp...SHOULD BE True OR False")
    if ub4 == "True":
        ub4 = True
    elif ub4 == "False":
        ub4 = False
    else:
        raise ValueError("\n INVALID ub4...SHOULD BE True OR False")
    if issave == "True":
        issave = True
    elif issave == "False":
        issave = False
    else:
        raise ValueError("\n INVALID ISSAVE...SHOULD BE True OR False")
    if al33 == "True":
        al33 = True
    elif al33 == "False":
        al33 = False
    else:
        raise ValueError("\n INVALID al33...SHOULD BE True OR False")

    #Load data
    print("LOADING DATA...")
    if model == "erai":
        ta,temp1,hur,hgt,terrain,p,ps,wap,ua,va,uas,vas,tas,ta2d,\
         cp,tp,wg10,mod_cape,lon,lat,date_list = \
         read_erai(domain,time)
        cp = cp.astype("float32", order="C")
        tp = tp.astype("float32", order="C")
        mod_cape = mod_cape.astype("float32", order="C")
    elif model == "era5":
        if ub4:
            ta,temp1,hur,hgt,terrain,p,ps,ua,va,uas,vas,tas,ta2d,\
             cp,wg10,mod_cape,lon,lat,date_list = \
             read_era5(domain,time,delta_t=delta_t)
        else:
            ta,temp1,hur,hgt,terrain,p,ps,ua,va,uas,vas,tas,ta2d,\
             cp,tp,wg10,mod_cape,lon,lat,date_list = \
             read_era5_rt52(domain,time,delta_t=delta_t)
        cp = cp.astype("float32", order="C")
        tp = tp.astype("float32", order="C")
        mod_cape = mod_cape.astype("float32", order="C")
        wap = np.zeros(hgt.shape)
    elif model == "barra":
        ta,temp1,hur,hgt,terrain,p,ps,wap,ua,va,uas,vas,tas,ta2d,wg10,lon,lat,date_list = \
         read_barra(domain,time)
    elif model == "barra_fc":
        ta,temp1,hur,hgt,terrain,p,ps,wap,ua,va,uas,vas,tas,ta2d,wg10,lon,lat,date_list = \
         read_barra_fc(domain,time)
    elif model == "barpa":
        ta,hur,hgt,terrain,p,ps,ua,va,uas,vas,tas,ta2d,wg10,lon,lat,date_list = \
         read_barpa(domain, time, experiment, barpa_forcing_mdl, ensemble)
        wap = np.zeros(hgt.shape)
        temp1 = None
    elif model == "barra_ad":
        wg10,temp2,ta,temp1,hur,hgt,terrain,p,ps,wap,ua,va,uas,vas,tas,ta2d,lon,lat,date_list = \
         read_barra_ad(domain, time, False)
    elif model in ["ACCESS1-0","ACCESS1-3","GFDL-CM3","GFDL-ESM2M","CNRM-CM5","MIROC5",\
         "MRI-CGCM3","IPSL-CM5A-LR","IPSL-CM5A-MR","GFDL-ESM2G","bcc-csm1-1","MIROC-ESM",\
         "BNU-ESM"]:
        #Check that t1 and t2 are in the same year
        year = np.arange(int(t1[0:4]), int(t2[0:4]) + 1)
        ta, hur, hgt, terrain, p_3d, ps, ua, va, uas, vas, tas, ta2d, tp, lon, lat, \
            date_list = read_cmip(model, experiment, \
            ensemble, year, domain, cmip_ver=5, al33=al33, group=group, ver6hr=ver6hr, ver3hr=ver3hr)
        wap = np.zeros(hgt.shape)
        wg10 = np.zeros(ps.shape)
        mod_cape = np.zeros(ps.shape)
        p = np.zeros(p_3d[0, :, 0, 0].shape)
        #date_list = pd.to_datetime(date_list).to_pydatetime()
        temp1 = None
        tp = tp.astype("float32", order="C")
    elif model in ["ACCESS-ESM1-5", "ACCESS-CM2"]:
        year = np.arange(int(t1[0:4]), int(t2[0:4]) + 1)
        ta, hur, hgt, terrain, p_3d, ps, ua, va, uas, vas, tas, ta2d, lon, lat, \
            date_list = read_cmip(model, experiment,\
            ensemble, year, domain, cmip_ver=6, group=group, project=project)
        wap = np.zeros(hgt.shape)
        wg10 = np.zeros(ps.shape)
        p = np.zeros(p_3d[0, :, 0, 0].shape)
        #date_list = pd.to_datetime(date_list).to_pydatetime()
        temp1 = None
    else:
        raise ValueError("Model not recognised")
    del temp1
    ta = ta.astype("float32", order="C")
    hur = hur.astype("float32", order="C")
    hgt = hgt.astype("float32", order="C")
    terrain = terrain.astype("float32", order="C")
    p = p.astype("float32", order="C")
    ps = ps.astype("float32", order="C")
    wap = wap.astype("float32", order="C")
    ua = ua.astype("float32", order="C")
    va = va.astype("float32", order="C")
    uas = uas.astype("float32", order="C")
    vas = vas.astype("float32", order="C")
    tas = tas.astype("float32", order="C")
    ta2d = ta2d.astype("float32", order="C")
    wg10 = wg10.astype("float32", order="C")
    lon = lon.astype("float32", order="C")
    lat = lat.astype("float32", order="C")

    gc.collect()

    param = np.array([
        "mu_cape", "mu_cin", "muq", "s06", "s0500", "lr700_500", "mhgt",
        "ta500", "tp"
    ])

    if model in ["erai", "era5"]:
        param = np.concatenate([param, ["mod_cape"]])

    #Option to interpolate to the ERA5 grid
    if era5_interp:
        #Interpolate model data to the ERA5 grid
        from era5_read import get_lat_lon_rt52 as get_era5_lat_lon
        era5_lon, era5_lat = get_era5_lat_lon()
        era5_lon_ind = np.where((era5_lon >= domain[2])
                                & (era5_lon <= domain[3]))[0]
        era5_lat_ind = np.where((era5_lat >= domain[0])
                                & (era5_lat <= domain[1]))[0]
        era5_lon = era5_lon[era5_lon_ind]
        era5_lat = era5_lat[era5_lat_ind]
        terrain = interp_era5(terrain, lon, lat, era5_lon, era5_lat, d3=False)
        #Set output array
        output_data = np.zeros(
            (ps.shape[0], era5_lat.shape[0], era5_lon.shape[0], len(param)))
    else:
        output_data = np.zeros(
            (ps.shape[0], ps.shape[1], ps.shape[2], len(param)))

    #Assign p levels to a 3d array, with same dimensions as input variables (ta, hgt, etc.)
    #If the 3d p-lvl array already exists, then declare the variable "mdl_lvl" as true.
    try:
        p_3d
        mdl_lvl = True
        full_p3d = p_3d
    except:
        mdl_lvl = False
        if era5_interp:
            p_3d = np.moveaxis(np.tile(p,[ta.shape[2],ta.shape[3],1]),[0,1,2],[1,2,0]).\
                astype(np.float32)
        else:
            p_3d = np.moveaxis(np.tile(p,[era5_lat.shape[0],era5_lon.shape[0],1]),[0,1,2],[1,2,0]).\
                astype(np.float32)

    print("LOAD TIME..." + str(dt.datetime.now() - load_start))
    tot_start = dt.datetime.now()

    for t in np.arange(0, ta.shape[0]):
        cape_start = dt.datetime.now()

        if era5_interp:
            ta_t = interp_era5(ta[t], lon, lat, era5_lon, era5_lat, d3=True)
            hur_t = interp_era5(hur[t], lon, lat, era5_lon, era5_lat, d3=True)
            hgt_t = interp_era5(hgt[t], lon, lat, era5_lon, era5_lat, d3=True)
            ps_t = interp_era5(ps[t], lon, lat, era5_lon, era5_lat, d3=False)
            wap_t = interp_era5(wap[t], lon, lat, era5_lon, era5_lat, d3=True)
            ua_t = interp_era5(ua[t], lon, lat, era5_lon, era5_lat, d3=True)
            va_t = interp_era5(va[t], lon, lat, era5_lon, era5_lat, d3=True)
            uas_t = interp_era5(uas[t], lon, lat, era5_lon, era5_lat, d3=False)
            vas_t = interp_era5(vas[t], lon, lat, era5_lon, era5_lat, d3=False)
            tas_t = interp_era5(tas[t], lon, lat, era5_lon, era5_lat, d3=False)
            ta2d_t = interp_era5(ta2d[t],
                                 lon,
                                 lat,
                                 era5_lon,
                                 era5_lat,
                                 d3=False)
            tp_t = interp_era5(tp[t], lon, lat, era5_lon, era5_lat, d3=False)
            mod_cape_t = interp_era5(mod_cape[t],
                                     lon,
                                     lat,
                                     era5_lon,
                                     era5_lat,
                                     d3=False)
        else:
            ta_t = ta[t]
            hur_t = hur[t]
            hgt_t = hgt[t]
            ps_t = ps[t]
            wap_t = wap[t]
            ua_t = ua[t]
            va_t = va[t]
            uas_t = uas[t]
            vas_t = vas[t]
            tas_t = tas[t]
            ta2d_t = ta2d[t]
            tp_t = tp[t]
            mod_cape_t = mod_cape[t]
        print(date_list[t])
        output = np.zeros((1, ps_t.shape[0], ps_t.shape[1], len(param)))

        if mdl_lvl:
            if era5_interp:
                p_3d = interp_era5(full_p3d[t],
                                   lon,
                                   lat,
                                   era5_lon,
                                   era5_lat,
                                   d3=True)
            else:
                p_3d = full_p3d[t]

        dp = get_dp(hur=hur_t, ta=ta_t, dp_mask=False)

        #Insert surface arrays, creating new arrays with "sfc" prefix
        sfc_ta = np.insert(ta_t, 0, tas_t, axis=0)
        sfc_hgt = np.insert(hgt_t, 0, terrain, axis=0)
        sfc_dp = np.insert(dp, 0, ta2d_t, axis=0)
        sfc_p_3d = np.insert(p_3d, 0, ps_t, axis=0)
        sfc_ua = np.insert(ua_t, 0, uas_t, axis=0)
        sfc_va = np.insert(va_t, 0, vas_t, axis=0)
        sfc_wap = np.insert(wap_t, 0, np.zeros(vas_t.shape), axis=0)

        #Sort by ascending p
        a,temp1,temp2 = np.meshgrid(np.arange(sfc_p_3d.shape[0]) , np.arange(sfc_p_3d.shape[1]),\
          np.arange(sfc_p_3d.shape[2]))
        sort_inds = np.flip(np.lexsort([np.swapaxes(a, 1, 0), sfc_p_3d],
                                       axis=0),
                            axis=0)
        sfc_hgt = np.take_along_axis(sfc_hgt, sort_inds, axis=0)
        sfc_dp = np.take_along_axis(sfc_dp, sort_inds, axis=0)
        sfc_p_3d = np.take_along_axis(sfc_p_3d, sort_inds, axis=0)
        sfc_ua = np.take_along_axis(sfc_ua, sort_inds, axis=0)
        sfc_va = np.take_along_axis(sfc_va, sort_inds, axis=0)
        sfc_ta = np.take_along_axis(sfc_ta, sort_inds, axis=0)

        #Calculate q and wet bulb for pressure level arrays with surface values
        sfc_ta_unit = units.units.degC * sfc_ta
        sfc_dp_unit = units.units.degC * sfc_dp
        sfc_p_unit = units.units.hectopascals * sfc_p_3d
        hur_unit = mpcalc.relative_humidity_from_dewpoint(ta_t*units.units.degC, dp*units.units.degC)*\
         100*units.units.percent
        q_unit = mpcalc.mixing_ratio_from_relative_humidity(hur_unit,\
         ta_t*units.units.degC,np.array(p_3d)*units.units.hectopascals)
        sfc_hur_unit = mpcalc.relative_humidity_from_dewpoint(sfc_ta_unit, sfc_dp_unit)*\
         100*units.units.percent
        sfc_q_unit = mpcalc.mixing_ratio_from_relative_humidity(sfc_hur_unit,\
         sfc_ta_unit,sfc_p_unit)
        sfc_theta_unit = mpcalc.potential_temperature(sfc_p_unit, sfc_ta_unit)
        sfc_thetae_unit = mpcalc.equivalent_potential_temperature(
            sfc_p_unit, sfc_ta_unit, sfc_dp_unit)
        sfc_thetae = np.array(mpcalc.equivalent_potential_temperature(ps_t*units.units.hectopascals,tas_t*units.units.degC,\
              ta2d_t*units.units.degC))
        sfc_q = np.array(sfc_q_unit)
        sfc_hur = np.array(sfc_hur_unit)
        #sfc_wb = np.array(wrf.wetbulb( sfc_p_3d*100, sfc_ta+273.15, sfc_q, units="degC"))

        #Use getcape.f90
        #cape_gb_mu1, cape_gb_mu4 = getcape_driver(sfc_p_3d, sfc_ta, sfc_dp, ps_t)

        #Now get most-unstable CAPE (max CAPE in vertical, ensuring parcels used are AGL)
        cape3d = wrf.cape_3d(sfc_p_3d,sfc_ta+273.15,\
          sfc_q,sfc_hgt,\
          terrain,ps_t,\
          True,meta=False, missing=0)
        cape = cape3d.data[0]
        cin = cape3d.data[1]
        lfc = cape3d.data[2]
        lcl = cape3d.data[3]
        el = cape3d.data[4]
        #Mask values which are below the surface and above 350 hPa AGL
        cape[(sfc_p_3d > ps_t) | (sfc_p_3d < (ps_t - 350))] = np.nan
        cin[(sfc_p_3d > ps_t) | (sfc_p_3d < (ps_t - 350))] = np.nan
        lfc[(sfc_p_3d > ps_t) | (sfc_p_3d < (ps_t - 350))] = np.nan
        lcl[(sfc_p_3d > ps_t) | (sfc_p_3d < (ps_t - 350))] = np.nan
        el[(sfc_p_3d > ps_t) | (sfc_p_3d < (ps_t - 350))] = np.nan
        #Get maximum (in the vertical), and get cin, lfc, lcl for the same parcel
        mu_cape_inds = np.tile(np.nanargmax(cape, axis=0),
                               (cape.shape[0], 1, 1))
        mu_cape = np.take_along_axis(cape, mu_cape_inds, 0)[0]
        mu_cin = np.take_along_axis(cin, mu_cape_inds, 0)[0]
        mu_lfc = np.take_along_axis(lfc, mu_cape_inds, 0)[0]
        mu_lcl = np.take_along_axis(lcl, mu_cape_inds, 0)[0]
        mu_el = np.take_along_axis(el, mu_cape_inds, 0)[0]
        muq = np.take_along_axis(sfc_q, mu_cape_inds, 0)[0] * 1000

        #Calculate other parameters
        #Thermo
        thermo_start = dt.datetime.now()
        lr700_500 = get_lr_p(ta_t, p_3d, hgt_t, 700, 500)
        melting_hgt = get_t_hgt(sfc_ta, np.copy(sfc_hgt), 0, terrain)
        melting_hgt = np.where((melting_hgt < 0) | (np.isnan(melting_hgt)), 0,
                               melting_hgt)
        ta500 = get_var_p_lvl(np.copy(sfc_ta), sfc_p_3d, 500)
        ta925 = get_var_p_lvl(np.copy(sfc_ta), sfc_p_3d, 925)
        ta850 = get_var_p_lvl(np.copy(sfc_ta), sfc_p_3d, 850)
        ta700 = get_var_p_lvl(np.copy(sfc_ta), sfc_p_3d, 700)
        rho = mpcalc.density(
            np.array(sfc_p_3d) * (units.units.hectopascal),
            sfc_ta * units.units.degC, sfc_q_unit)
        rho925 = np.array(get_var_p_lvl(np.array(rho), sfc_p_3d, 925))
        rho850 = np.array(get_var_p_lvl(np.array(rho), sfc_p_3d, 850))
        rho700 = np.array(get_var_p_lvl(np.array(rho), sfc_p_3d, 700))
        #Winds
        winds_start = dt.datetime.now()
        s06 = get_shear_hgt(sfc_ua, sfc_va, np.copy(sfc_hgt), 0, 6000, terrain)
        s0500 = get_shear_p(ua_t,
                            va_t,
                            p_3d,
                            "sfc",
                            np.array([500]),
                            p_3d,
                            uas=uas_t,
                            vas=vas_t)[0]

        #WAP
        if model in ["erai", "era5"]:
            sfc_w = mpcalc.vertical_velocity( wap_t * (units.units.pascal / units.units.second),\
             np.array(p_3d) * (units.units.hectopascal), \
             ta_t * units.units.degC,  q_unit)
            w925 = np.array(get_var_p_lvl(np.array(sfc_w), p_3d, 925))
            w850 = np.array(get_var_p_lvl(np.array(sfc_w), p_3d, 850))
            w700 = np.array(get_var_p_lvl(np.array(sfc_w), p_3d, 700))

        #Convergence
        if era5_interp:
            x, y = np.meshgrid(era5_lon, era5_lat)
        else:
            x, y = np.meshgrid(lon, lat)
        dx, dy = mpcalc.lat_lon_grid_deltas(x, y)
        u925 = np.array(get_var_p_lvl(np.copy(sfc_ua), sfc_p_3d, 925))
        u850 = np.array(get_var_p_lvl(np.copy(sfc_ua), sfc_p_3d, 850))
        u700 = np.array(get_var_p_lvl(np.copy(sfc_ua), sfc_p_3d, 700))
        v925 = np.array(get_var_p_lvl(np.copy(sfc_va), sfc_p_3d, 925))
        v850 = np.array(get_var_p_lvl(np.copy(sfc_va), sfc_p_3d, 850))
        v700 = np.array(get_var_p_lvl(np.copy(sfc_va), sfc_p_3d, 700))
        conv925 = -1e5 * np.array(
            mpcalc.divergence(u925 * (units.units.meter / units.units.second),
                              v925 * (units.units.meter / units.units.second),
                              dx, dy))
        conv850 = -1e5 * np.array(
            mpcalc.divergence(u850 * (units.units.meter / units.units.second),
                              v850 * (units.units.meter / units.units.second),
                              dx, dy))
        conv700 = -1e5 * np.array(
            mpcalc.divergence(u700 * (units.units.meter / units.units.second),
                              v700 * (units.units.meter / units.units.second),
                              dx, dy))

        #CS6
        mucs6 = mu_cape * np.power(s06, 1.67)

        #Fill output
        output = fill_output(output, t, param, ps, "mu_cape", mu_cape)
        output = fill_output(output, t, param, ps, "mu_cin", mu_cin)
        output = fill_output(output, t, param, ps, "muq", muq)
        output = fill_output(output, t, param, ps, "s06", s06)
        output = fill_output(output, t, param, ps, "s0500", s0500)
        output = fill_output(output, t, param, ps, "lr700_500", lr700_500)
        output = fill_output(output, t, param, ps, "ta500", ta500)
        output = fill_output(output, t, param, ps, "mhgt", melting_hgt)
        output = fill_output(output, t, param, ps, "tp", tp_t)
        if (model == "erai") | (model == "era5"):
            output = fill_output(output, t, param, ps, "mod_cape", mod_cape_t)

        output_data[t] = output

    print("SAVING DATA...")
    param_out = []
    for param_name in param:
        temp_data = output_data[:, :, :, np.where(param == param_name)[0][0]]
        param_out.append(temp_data)

    #If the mhgt variable is zero everywhere, then it is likely that data has not been read.
    #In this case, all values are missing, set to zero.
    for t in np.arange(param_out[0].shape[0]):
        if param_out[np.where(param == "mhgt")[0][0]][t].max() == 0:
            for p in np.arange(len(param_out)):
                param_out[p][t] = np.nan

    if issave:
        if era5_interp:
            save_netcdf(region, model, out_name, date_list, era5_lat, era5_lon, param, param_out, \
             out_dtype = "f4", compress=True)
        else:
            save_netcdf(region, model, out_name, date_list, lat, lon, param, param_out, \
             out_dtype = "f4", compress=True)

    print(dt.datetime.now() - tot_start)
Beispiel #24
0
    def get_weather(self):

        #------------------------------------------------------
        #------------------------------------------------------
        # Load TMY2
        if self.file_ext == TMY2EXT:
            f = open(self.weatherpath + self.city + self.file_ext, 'r')

            # Header read for lat and lon
            head = f.readline()
            self.lat = int(head[39:41]) + int(head[42:44]) / 60.0
            self.lon = int(head[47:50]) + int(head[51:53]) / 60.0

            line = f.readline()
            ind = 0
            while line:

                # Process the line
                self.tothor[ind] = float(
                    line[17:21])  #Total horizontal solar Wh/m2
                self.dirnorm[ind] = float(
                    line[23:27])  #Direct normal solar Wh/m2
                self.difhor[ind] = float(
                    line[29:33])  #Diffuse Horizontal Solar Wh/m2

                self.tdry[ind] = float(line[67:71]) * 0.1
                #tdrybulb (deg C)
                self.rhs[ind] = float(line[79:82]) * 0.01
                #relative humidity (%)
                self.tdew[ind] = float(line[73:77]) * 0.1
                #tdew (deg C) to conform with TB code

                self.press[ind] = float(line[84:88])
                #atmospheric pressure (mbar) mb = 100 Pascals
                #self.wind_speed[ind]   = float(line[95:98]) * 0.1;		#windspeed m/s
                #self.wind_dir[ind]     = float(line[90:93]);   			#wind direction azimuth

                #self.cloud[ind]        = float(line[59:61])/10.0;		    #Could cover fraction
                #wd.ocloud       = getint(line,63,2)/10.0;		        #Opaque cloud cover fraction
                #wd.ceilht       = getint(line,106,5);		            #Cloud ceiling height m

                # Calculate specfic humidity from dry bulb, dew point, and atm pressure using MetPy
                self.huss[ind] = mpcalc.specific_humidity_from_mixing_ratio(
                    mpcalc.mixing_ratio_from_relative_humidity(
                        mpcalc.relative_humidity_from_dewpoint(
                            self.tdry[ind] * units.degC,
                            self.tdew[ind] * units.degC),
                        self.tdry[ind] * units.degC,
                        self.press[ind] * units.mbar))

                #Next line
                line = f.readline()
                ind = ind + 1

            f.close()

        #------------------------------------------------------
        #------------------------------------------------------
        # Load TMY3
        elif self.file_ext == TMY3EXT:
            f = open(
                self.weatherpath + TMY3NUMBER[CITY.index(self.city)] +
                self.file_ext, 'r')

            # Header read for lat and lon
            head = f.readline().split(',')
            self.lat = float(head[4])
            self.lon = float(head[5])

            #Burn a line for the second part of the header.
            line = f.readline()

            line = f.readline()
            ind = 0
            while line:

                line = line.split(',')

                if len(line) < 20:
                    print('line is short!')
                # Process the line
                self.tothor[ind] = float(
                    line[4])  #Total horizontal solar Wh/m2
                self.dirnorm[ind] = float(line[7])  #Direct normal solar Wh/m2
                self.difhor[ind] = float(
                    line[10])  #Diffuse Horizontal Solar Wh/m2

                self.tdry[ind] = float(line[31])
                #tdrybulb (deg C)
                self.rhs[ind] = float(line[37]) * 0.01
                #relative humidity (%)
                self.tdew[ind] = float(line[34])
                #tdew (deg C) to conform with TB code

                self.press[ind] = float(line[40])
                #atmospheric pressure (mbar) mb = 100 Pascals
                #self.wind_speed[ind]   = float(line[46]);		#windspeed m/s
                #self.wind_dir[ind]     = float(line[43]);   			#wind direction azimuth

                # Calculate specfic humidity from dry bulb, dew point, and atm pressure using MetPy
                self.huss[ind] = mpcalc.specific_humidity_from_mixing_ratio(
                    mpcalc.mixing_ratio_from_relative_humidity(
                        mpcalc.relative_humidity_from_dewpoint(
                            self.tdry[ind] * units.degC,
                            self.tdew[ind] * units.degC),
                        self.tdry[ind] * units.degC,
                        self.press[ind] * units.mbar))

                #Next line
                line = f.readline()
                ind = ind + 1

            f.close()
# --------------------------------
#
# Use MetPy to calculate common variables for plotting a cross section,
# specifically potential temperature and mixing ratio
#

potemp = mpcalc.potential_temperature(
    xsect['p_grid'] * units('hPa'),
    xsect['grid_data']['temperature'] * units('degC'))

relhum = mpcalc.relative_humidity_from_dewpoint(
    xsect['grid_data']['temperature'] * units('degC'),
    xsect['grid_data']['dewpoint'] * units('degC'))

mixrat = mpcalc.mixing_ratio_from_relative_humidity(
    relhum, xsect['grid_data']['temperature'] * units('degC'),
    xsect['p_grid'] * units('hPa'))

######################################################################
# Plot Cross Section
# ------------------
#
# Use standard Matplotlib to plot the now 2D cross section grid using the
# data from xsect and those calculated above. Additionally, the actualy
# radiosonde wind observations are plotted as barbs on this plot.
#

# Start Figure, set big size for cross section
fig = plt.figure(figsize=(17, 11))

# Specify plotting axis (single panel)
Beispiel #26
0
def plotter(fdict):
    """ Go """
    pgconn = get_dbconn('asos')
    ctx = get_autoplot_context(fdict, get_description())
    station = ctx['zstation']
    network = ctx['network']
    nt = NetworkTable(network)
    year = ctx['year']
    varname = ctx['var']

    df = read_sql("""
        SELECT extract(year from valid) as year,
        coalesce(mslp, alti * 33.8639, 1013.25) as slp,
        extract(doy from valid) as doy, tmpf, dwpf from alldata
        where station = %s and dwpf > -50 and dwpf < 90 and
        tmpf > -50 and tmpf < 120 and valid > '1950-01-01'
        and report_type = 2
    """,
                  pgconn,
                  params=(station, ),
                  index_col=None)
    # compute RH
    df['relh'] = mcalc.relative_humidity_from_dewpoint(
        df['tmpf'].values * units('degF'), df['dwpf'].values * units('degF'))
    # saturation vapor pressure
    # Convert sea level pressure to station pressure
    df['pressure'] = mcalc.add_height_to_pressure(
        df['slp'].values * units('millibars'),
        nt.sts[station]['elevation'] * units('m')).to(units('millibar'))
    # Compute the relative humidity
    df['relh'] = mcalc.relative_humidity_from_dewpoint(
        df['tmpf'].values * units('degF'), df['dwpf'].values * units('degF'))
    # Compute the mixing ratio
    df['mixing_ratio'] = mcalc.mixing_ratio_from_relative_humidity(
        df['relh'].values, df['tmpf'].values * units('degF'),
        df['pressure'].values * units('millibars'))
    # Compute the saturation mixing ratio
    df['saturation_mixingratio'] = mcalc.saturation_mixing_ratio(
        df['pressure'].values * units('millibars'),
        df['tmpf'].values * units('degF'))
    df['vapor_pressure'] = mcalc.vapor_pressure(
        df['pressure'].values * units('millibars'),
        df['mixing_ratio'].values * units('kg/kg')).to(units('kPa'))
    df['saturation_vapor_pressure'] = mcalc.vapor_pressure(
        df['pressure'].values * units('millibars'),
        df['saturation_mixingratio'].values * units('kg/kg')).to(units('kPa'))
    df['vpd'] = df['saturation_vapor_pressure'] - df['vapor_pressure']

    dailymeans = df[['year', 'doy', varname]].groupby(['year', 'doy']).mean()
    dailymeans = dailymeans.reset_index()

    df2 = dailymeans[['doy', varname]].groupby('doy').describe()

    dyear = df[df['year'] == year]
    df3 = dyear[['doy', varname]].groupby('doy').describe()
    df3[(varname, 'diff')] = df3[(varname, 'mean')] - df2[(varname, 'mean')]

    (fig, ax) = plt.subplots(2, 1, figsize=(8, 6))
    multiplier = 1000. if varname == 'mixing_ratio' else 10.

    ax[0].fill_between(df2[(varname, 'min')].index.values,
                       df2[(varname, 'min')].values * multiplier,
                       df2[(varname, 'max')].values * multiplier,
                       color='gray')

    ax[0].plot(df2[(varname, 'mean')].index.values,
               df2[(varname, 'mean')].values * multiplier,
               label="Climatology")
    ax[0].plot(df3[(varname, 'mean')].index.values,
               df3[(varname, 'mean')].values * multiplier,
               color='r',
               label="%s" % (year, ))

    ax[0].set_title(("%s [%s]\nDaily Mean Surface %s") %
                    (station, nt.sts[station]['name'], PDICT[varname]))
    lbl = ("Mixing Ratio ($g/kg$)"
           if varname == 'mixing_ratio' else PDICT[varname])
    ax[0].set_ylabel(lbl)
    ax[0].set_xlim(0, 366)
    ax[0].set_ylim(bottom=0)
    ax[0].set_xticks(
        (1, 32, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 365))
    ax[0].set_xticklabels(calendar.month_abbr[1:])
    ax[0].grid(True)
    ax[0].legend(loc=2, fontsize=10)

    cabove = 'b' if varname == 'mixing_ratio' else 'r'
    cbelow = 'r' if cabove == 'b' else 'b'
    rects = ax[1].bar(df3[(varname, 'diff')].index.values,
                      df3[(varname, 'diff')].values * multiplier,
                      facecolor=cabove,
                      edgecolor=cabove)
    for rect in rects:
        if rect.get_height() < 0.:
            rect.set_facecolor(cbelow)
            rect.set_edgecolor(cbelow)

    plunits = '$g/kg$' if varname == 'mixing_ratio' else 'hPa'
    ax[1].set_ylabel("%.0f Departure (%s)" % (year, plunits))
    ax[1].set_xlim(0, 366)
    ax[1].set_xticks(
        (1, 32, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 365))
    ax[1].set_xticklabels(calendar.month_abbr[1:])
    ax[1].grid(True)
    return fig, df3
Beispiel #27
0
def plotter(fdict):
    """ Go """
    pgconn = get_dbconn("asos")

    ctx = get_autoplot_context(fdict, get_description())
    station = ctx["zstation"]
    h1 = int(ctx["h1"])
    h2 = int(ctx["h2"])
    varname = ctx["v"]

    tzname = ctx["_nt"].sts[station]["tzname"]

    df = read_sql(
        """
    WITH data as (
        SELECT valid at time zone %s + '10 minutes'::interval as localvalid,
        date_trunc(
             'hour', valid at time zone %s  + '10 minutes'::interval) as v,
        tmpf, dwpf, sknt, drct, alti, relh, random() as r,
        coalesce(mslp, alti * 33.8639, 1013.25) as slp
        from alldata where station = %s and report_type = 2
        and extract(hour from valid at time zone %s + '10 minutes'::interval)
        in (%s, %s)),
     agg as (
          select *, extract(hour from v) as hour,
          rank() OVER (PARTITION by v ORDER by localvalid ASC, r ASC) from data
     )

     SELECT *, date(
         case when hour = %s
         then date(v - '1 day'::interval)
         else date(v) end) from agg WHERE rank = 1
    """,
        pgconn,
        params=(
            tzname,
            tzname,
            station,
            tzname,
            h1,
            h2,
            h2 if h2 < h1 else -1,
        ),
        index_col=None,
    )
    if df.empty:
        raise NoDataFound("No data was found.")
    if varname == "q":
        df["pressure"] = mcalc.add_height_to_pressure(
            df["slp"].values * units("millibars"),
            ctx["_nt"].sts[station]["elevation"] * units("m"),
        ).to(units("millibar"))
        # compute mixing ratio
        df["q"] = (mcalc.mixing_ratio_from_relative_humidity(
            df["relh"].values * units("percent"),
            df["tmpf"].values * units("degF"),
            df["pressure"].values * units("millibars"),
        ) * 1000.0)

    # pivot
    df = df.pivot(index="date", columns="hour", values=varname).reset_index()
    df = df.dropna()
    df["doy"] = pd.to_numeric(pd.to_datetime(df["date"]).dt.strftime("%j"))
    df["year"] = pd.to_datetime(df["date"]).dt.year
    df["week"] = (df["doy"] / 7).astype(int)
    df["delta"] = df[h2] - df[h1]

    (fig, ax) = plt.subplots(1, 1)
    if ctx["opt"] == "no":
        ax.set_xlabel("Plotted lines are smoothed over %.0f days" %
                      (ctx["smooth"], ))
    ax.set_ylabel(
        "%s %s Difference" %
        (PDICT[varname], "Accumulated Sum" if ctx["opt"] == "yes" else ""))

    if ctx["opt"] == "no":
        # Histogram
        H, xedges, yedges = np.histogram2d(df["doy"].values,
                                           df["delta"].values,
                                           bins=(50, 50))
        ax.pcolormesh(
            xedges,
            yedges,
            H.transpose(),
            cmap=get_cmap(ctx["cmap"]),
            alpha=0.5,
        )

    # Plot an average line
    gdf = (df.groupby("doy").mean().rolling(ctx["smooth"],
                                            min_periods=1,
                                            center=True).mean())
    y = gdf["delta"] if ctx["opt"] == "no" else gdf["delta"].cumsum()
    ax.plot(
        gdf.index.values,
        y,
        label="Average",
        zorder=6,
        lw=2,
        color="k",
        linestyle="-.",
    )

    # Plot selected year
    for i in range(1, 5):
        year = ctx.get("y%s" % (i, ))
        if year is None:
            continue
        df2 = df[df["year"] == year]
        if not df2.empty:
            gdf = (df2.groupby("doy").mean().rolling(ctx["smooth"],
                                                     min_periods=1,
                                                     center=True).mean())
            y = gdf["delta"] if ctx["opt"] == "no" else gdf["delta"].cumsum()
            ax.plot(gdf.index.values, y, label=str(year), lw=2, zorder=10)

    ax.set_xticks((1, 32, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335))
    ax.set_xticklabels(calendar.month_abbr[1:])
    ax.set_xlim(1, 366)
    ax.grid(True)
    ax.legend(loc="best", ncol=5)
    sts = datetime.datetime(2000, 6, 1, h1)
    ets = datetime.datetime(2000, 6, 1, h2)
    title = ("%s [%s] %s Difference (%.0f-%.0f)\n"
             "%s minus %s (%s) (timezone: %s)") % (
                 ctx["_nt"].sts[station]["name"],
                 station,
                 PDICT[varname],
                 df["year"].min(),
                 df["year"].max(),
                 ets.strftime("%-I %p"),
                 sts.strftime("%-I %p"),
                 "same day" if h2 > h1 else "previous day",
                 tzname,
             )
    fitbox(fig, title, 0.05, 0.95, 0.91, 0.99, ha="center")

    return fig, df
Beispiel #28
0
def atmCalc(height, temp, humid):
    print("ATMCALC", height, temp, humid, file=sys.stderr)
    mtny = windh(MTNX, height, ratio=1,
                 yoffset=0)

    windx = XVALUES
    windy = windh(windx, height)

    temp_ = temp * units.degC
    initp = mc.height_to_pressure_std(windy[0] * units.meters)
    dewpt = mc.dewpoint_from_relative_humidity(temp_, humid / 100.)
    lcl_ = mc.lcl(initp, temp_, dewpt, max_iters=50, eps=1e-5)
    LCL = mc.pressure_to_height_std(lcl_[0])

    if (lcl_[0] > mc.height_to_pressure_std(max(windy) * units.meters)
            and LCL > windy[0] * units.meters * 1.000009):
        # add LCL to x
        xlcl = windh(LCL.to('meters').magnitude, height, inv=True)
        windx = np.sort(np.append(windx, xlcl))
        windy = windh(windx, height)

    pressures = mc.height_to_pressure_std(windy * units.meters)

    wvmr0 = mc.mixing_ratio_from_relative_humidity(initp, temp_, humid / 100.)

    # now calculate the air parcel temperatures and RH at each position
    if (lcl_[0] <= min(pressures)):
        T = mc.dry_lapse(pressures, temp_)
        RH = [
            mc.relative_humidity_from_mixing_ratio(
                wvmr0, t, p) for t, p in zip(
                T, pressures)]
    else:
        mini = np.argmin(pressures)
        p1 = pressures[:mini + 1]
        p2 = pressures[mini:]  # with an overlap
        p11 = p1[p1 >= lcl_[0] * .9999999]  # lower (with tol) with lcl
        p12 = p1[p1 < lcl_[0] * 1.000009]  # upper (with tol) with lcl
        T11 = mc.dry_lapse(p11, temp_)
        T12 = mc.moist_lapse(p12, lcl_[1])
        T1 = concatenate((T11[:-1], T12))
        T2 = mc.dry_lapse(p2, T1[-1])
        T = concatenate((T1, T2[1:]))
        wvmrtop = mc.saturation_mixing_ratio(pressures[mini], T[mini])
        RH=[]
        for i in range(len(pressures)):
            if pressures[i] > lcl_[0] and i <= mini:
                v=mc.relative_humidity_from_mixing_ratio(pressures[i], T[i], wvmr0)
            else:
                if i < mini:
                    v=1
                else:
                    v=mc.relative_humidity_from_mixing_ratio(pressures[i], T[i], wvmrtop)
            RH.append(v)
        
        #RH = [mc.relative_humidity_from_mixing_ratio(*tp, wvmr0) if tp[1] > lcl_[
            #0] and i <= mini else 1.0 if i < mini else
            #mc.relative_humidity_from_mixing_ratio(*tp, wvmrtop)
            #for i, tp in enumerate(zip(pressures, T))]

    RH = concatenate(RH)
    return windx, mtny, windy, lcl_, LCL, T.to("degC"), RH
Beispiel #29
0
                        "/v1/forecast/spec/max_wndgust10m/" + year + "/" +
                        month + "/max_wndgust10m-fc-spec-PT1H-" + model +
                        "-v1-" + date + ".sub.nc").sel({
                            "time":
                            dt.datetime.strptime(valid, "%Y-%m-%d %H:%M") +
                            dt.timedelta(hours=wg_lead)
                        }).max_wndgust10m

#Calculate a few thermodynamic variables
dp = get_dp(hur=da_rh, ta=da_ta, dp_mask=False)
ta_unit = units.units.degC * da_ta
dp_unit = units.units.degC * dp
p_unit = units.units.hectopascals * p_3d
hur_unit = mpcalc.relative_humidity_from_dewpoint(ta_unit, dp_unit)*\
          100*units.units.percent
q_unit = mpcalc.mixing_ratio_from_relative_humidity(hur_unit,\
           ta_unit,p_unit)

#plot
m = Basemap(llcrnrlon=lon.min(),
            llcrnrlat=lat.min(),
            urcrnrlon=lon.max(),
            urcrnrlat=lat.max(),
            projection="cyl",
            resolution="h")
#omega = wrf.omega(q_unit, ta_unit.to("K"), da_w.values, p_3d*100)
#omega = omega.assign_coords(dim_0=p, dim_2=lon, dim_1=lat)
#omega = xr.where((da_z >= 1000) & (da_z <= 3000), omega, np.nan)
#c = xr.plot.contour(omega.mean("dim_0"), levels=[10,20,50], colors=["#d0d1e6","#3690c0","#034e7b"])
c = xr.plot.contour(da_w.min("pressure").coarsen(
    {
        "latitude": 10,
Beispiel #30
0
def plotter(fdict):
    """ Go """
    pgconn = get_dbconn('asos')
    ctx = get_autoplot_context(fdict, get_description())

    station = ctx['zstation']
    network = ctx['network']
    month = ctx['month']

    nt = NetworkTable(network)

    if month == 'all':
        months = range(1, 13)
    elif month == 'fall':
        months = [9, 10, 11]
    elif month == 'winter':
        months = [12, 1, 2]
    elif month == 'spring':
        months = [3, 4, 5]
    elif month == 'summer':
        months = [6, 7, 8]
    else:
        ts = datetime.datetime.strptime("2000-" + month + "-01", '%Y-%b-%d')
        # make sure it is length two for the trick below in SQL
        months = [ts.month, 999]

    df = read_sql("""
        SELECT drct::int as t, dwpf, tmpf, relh,
        coalesce(mslp, alti * 33.8639, 1013.25) as slp
        from alldata where station = %s
        and drct is not null and dwpf is not null and dwpf <= tmpf
        and sknt > 3 and drct::int %% 10 = 0
        and extract(month from valid) in %s
        and report_type = 2
    """,
                  pgconn,
                  params=(station, tuple(months)))
    # Convert sea level pressure to station pressure
    df['pressure'] = mcalc.add_height_to_pressure(
        df['slp'].values * units('millibars'),
        nt.sts[station]['elevation'] * units('m')).to(units('millibar'))
    # compute mixing ratio
    df['mixingratio'] = mcalc.mixing_ratio_from_relative_humidity(
        df['relh'].values * units('percent'),
        df['tmpf'].values * units('degF'),
        df['pressure'].values * units('millibars'))
    # compute pressure
    df['vapor_pressure'] = mcalc.vapor_pressure(
        df['pressure'].values * units('millibars'),
        df['mixingratio'].values * units('kg/kg')).to(units('kPa'))

    means = df.groupby('t').mean().copy()
    # compute dewpoint now
    means['dwpf'] = mcalc.dewpoint(means['vapor_pressure'].values *
                                   units('kPa')).to(units('degF')).m

    (fig, ax) = plt.subplots(1, 1)
    ax.bar(means.index.values,
           means['dwpf'].values,
           ec='green',
           fc='green',
           width=10,
           align='center')
    ax.grid(True, zorder=11)
    ax.set_title(("%s [%s]\nAverage Dew Point by Wind Direction (month=%s) "
                  "(%s-%s)\n"
                  "(must have 3+ hourly obs > 3 knots at given direction)") %
                 (nt.sts[station]['name'], station, month.upper(),
                  max([1973, nt.sts[station]['archive_begin'].year
                       ]), datetime.datetime.now().year),
                 size=10)

    ax.set_ylabel("Dew Point [F]")
    ax.set_ylim(means['dwpf'].min() - 5, means['dwpf'].max() + 5)
    ax.set_xlim(-5, 365)
    ax.set_xticks([0, 45, 90, 135, 180, 225, 270, 315, 360])
    ax.set_xticklabels(['N', 'NE', 'E', 'SE', 'S', 'SW', 'W', 'NW', 'N'])
    ax.set_xlabel("Wind Direction")

    return fig, means['dwpf']
Beispiel #31
0
def plotter(fdict):
    """ Go """
    pgconn = get_dbconn('asos')
    ctx = get_autoplot_context(fdict, get_description())
    station = ctx['zstation']
    network = ctx['network']
    month = ctx['month']

    nt = NetworkTable(network)

    if month == 'all':
        months = range(1, 13)
    elif month == 'fall':
        months = [9, 10, 11]
    elif month == 'winter':
        months = [12, 1, 2]
    elif month == 'spring':
        months = [3, 4, 5]
    elif month == 'summer':
        months = [6, 7, 8]
    else:
        ts = datetime.datetime.strptime("2000-"+month+"-01", '%Y-%b-%d')
        # make sure it is length two for the trick below in SQL
        months = [ts.month, 999]

    df = read_sql("""
        SELECT tmpf::int as tmpf, dwpf,
        coalesce(mslp, alti * 33.8639, 1013.25) as slp
        from alldata where station = %s
        and drct is not null and dwpf is not null and dwpf <= tmpf
        and sknt > 3 and drct::int %% 10 = 0
        and extract(month from valid) in %s
        and report_type = 2
    """, pgconn, params=(station, tuple(months)))
    # Convert sea level pressure to station pressure
    df['pressure'] = mcalc.add_height_to_pressure(
        df['slp'].values * units('millibars'),
        nt.sts[station]['elevation'] * units('m')
    ).to(units('millibar'))
    # compute RH
    df['relh'] = mcalc.relative_humidity_from_dewpoint(
        df['tmpf'].values * units('degF'),
        df['dwpf'].values * units('degF')
    )
    # compute mixing ratio
    df['mixingratio'] = mcalc.mixing_ratio_from_relative_humidity(
        df['relh'].values,
        df['tmpf'].values * units('degF'),
        df['pressure'].values * units('millibars')
    )
    # compute pressure
    df['vapor_pressure'] = mcalc.vapor_pressure(
        df['pressure'].values * units('millibars'),
        df['mixingratio'].values * units('kg/kg')
    ).to(units('kPa'))

    means = df.groupby('tmpf').mean().copy()
    # compute dewpoint now
    means['dwpf'] = mcalc.dewpoint(
        means['vapor_pressure'].values * units('kPa')
    ).to(units('degF')).m
    means.reset_index(inplace=True)
    # compute RH again
    means['relh'] = mcalc.relative_humidity_from_dewpoint(
        means['tmpf'].values * units('degF'),
        means['dwpf'].values * units('degF')
    ) * 100.

    (fig, ax) = plt.subplots(1, 1, figsize=(8, 6))
    ax.bar(
        means['tmpf'].values - 0.5, means['dwpf'].values - 0.5,
        ec='green', fc='green', width=1
    )
    ax.grid(True, zorder=11)
    ax.set_title(("%s [%s]\nAverage Dew Point by Air Temperature (month=%s) "
                  "(%s-%s)\n"
                  "(must have 3+ hourly observations at the given temperature)"
                  ) % (nt.sts[station]['name'], station, month.upper(),
                       nt.sts[station]['archive_begin'].year,
                       datetime.datetime.now().year), size=10)

    ax.plot([0, 140], [0, 140], color='b')
    ax.set_ylabel("Dew Point [F]")
    y2 = ax.twinx()
    y2.plot(means['tmpf'].values, means['relh'].values, color='k')
    y2.set_ylabel("Relative Humidity [%] (black line)")
    y2.set_yticks([0, 5, 10, 25, 50, 75, 90, 95, 100])
    y2.set_ylim(0, 100)
    ax.set_ylim(0, means['tmpf'].max() + 2)
    ax.set_xlim(0, means['tmpf'].max() + 2)
    ax.set_xlabel(r"Air Temperature $^\circ$F")

    return fig, means[['tmpf', 'dwpf', 'relh']]